Tools is broken
This commit is contained in:
120
scripts/discover_llm_endpoint.py
Normal file
120
scripts/discover_llm_endpoint.py
Normal file
@@ -0,0 +1,120 @@
|
||||
import requests
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
base_url = "http://api.chat.pathcore.org"
|
||||
|
||||
print(f"Discovering endpoints for: {base_url}\n")
|
||||
|
||||
# Try to get the root/health endpoint
|
||||
print("=" * 60)
|
||||
print("Step 1: Testing root endpoint")
|
||||
print("=" * 60)
|
||||
|
||||
for path in ["/", "/health", "/api", "/docs", "/v1"]:
|
||||
try:
|
||||
url = f"{base_url}{path}"
|
||||
print(f"\nTrying: {url}")
|
||||
resp = requests.get(url, timeout=5, verify=False)
|
||||
print(f" Status: {resp.status_code}")
|
||||
if resp.status_code == 200:
|
||||
print(f" Content-Type: {resp.headers.get('content-type')}")
|
||||
print(f" Response (first 500 chars):\n{resp.text[:500]}")
|
||||
except Exception as e:
|
||||
print(f" Error: {e}")
|
||||
|
||||
# Try common LLM API patterns
|
||||
print("\n" + "=" * 60)
|
||||
print("Step 2: Testing common LLM endpoints")
|
||||
print("=" * 60)
|
||||
|
||||
endpoints = [
|
||||
# OpenAI compatible
|
||||
"/v1/chat/completions",
|
||||
"/chat/completions",
|
||||
"/v1/completions",
|
||||
|
||||
# Ollama style
|
||||
"/api/generate",
|
||||
"/api/chat",
|
||||
"/generate",
|
||||
|
||||
# Text Generation WebUI
|
||||
"/api/v1/generate",
|
||||
"/api/v1/chat/completions",
|
||||
|
||||
# Custom
|
||||
"/completion",
|
||||
"/inference",
|
||||
"/predict",
|
||||
"/chat",
|
||||
]
|
||||
|
||||
for endpoint in endpoints:
|
||||
try:
|
||||
url = f"{base_url}{endpoint}"
|
||||
print(f"\nPOST {url}")
|
||||
|
||||
# Try minimal payload
|
||||
resp = requests.post(
|
||||
url,
|
||||
json={
|
||||
"prompt": "Hello",
|
||||
"model": "default",
|
||||
"messages": [{"role": "user", "content": "test"}],
|
||||
"max_tokens": 10
|
||||
},
|
||||
timeout=5,
|
||||
verify=False
|
||||
)
|
||||
|
||||
print(f" Status: {resp.status_code}")
|
||||
|
||||
if resp.status_code in [200, 201]:
|
||||
print(f" ✓✓✓ SUCCESS! ✓✓✓")
|
||||
print(f" Content-Type: {resp.headers.get('content-type')}")
|
||||
print(f" Response:\n{resp.text[:500]}")
|
||||
print(f"\n Use this in your .env:")
|
||||
print(f" LLM_API_URL={url}")
|
||||
break
|
||||
elif resp.status_code == 422: # Validation error means endpoint exists!
|
||||
print(f" ⚠ Endpoint exists but payload wrong")
|
||||
print(f" Response: {resp.text[:300]}")
|
||||
elif resp.status_code == 401:
|
||||
print(f" ⚠ Endpoint exists but requires authentication")
|
||||
|
||||
except Exception as e:
|
||||
print(f" Error: {e}")
|
||||
|
||||
# Try to find OpenAPI/Swagger docs
|
||||
print("\n" + "=" * 60)
|
||||
print("Step 3: Looking for API documentation")
|
||||
print("=" * 60)
|
||||
|
||||
doc_paths = [
|
||||
"/docs",
|
||||
"/swagger",
|
||||
"/api/docs",
|
||||
"/openapi.json",
|
||||
"/swagger.json",
|
||||
"/api/swagger.json",
|
||||
]
|
||||
|
||||
for path in doc_paths:
|
||||
try:
|
||||
url = f"{base_url}{path}"
|
||||
print(f"\nTrying: {url}")
|
||||
resp = requests.get(url, timeout=5, verify=False)
|
||||
if resp.status_code == 200:
|
||||
print(f" ✓ Found documentation!")
|
||||
print(f" Content-Type: {resp.headers.get('content-type')}")
|
||||
if 'json' in resp.headers.get('content-type', ''):
|
||||
print(f" First 500 chars:\n{resp.text[:500]}")
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Discovery complete!")
|
||||
print("=" * 60)
|
||||
45
scripts/test_llm_endpoint.py
Normal file
45
scripts/test_llm_endpoint.py
Normal file
@@ -0,0 +1,45 @@
|
||||
# test_llm_endpoint.py
|
||||
import requests
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
base_url = os.getenv("LLM_API_URL", "http://api.chat.pathcore.org")
|
||||
|
||||
# Test different possible endpoints
|
||||
endpoints = [
|
||||
f"{base_url}/v1/chat/completions",
|
||||
f"{base_url}/chat/completions",
|
||||
f"{base_url}/completions",
|
||||
f"{base_url}/v1/completions",
|
||||
f"{base_url}/api/v1/chat/completions",
|
||||
]
|
||||
|
||||
print(f"Testing LLM API endpoints...\n")
|
||||
|
||||
for endpoint in endpoints:
|
||||
print(f"Testing: {endpoint}")
|
||||
try:
|
||||
# Try a simple GET request first
|
||||
resp = requests.get(endpoint, timeout=5, verify=False)
|
||||
print(f" GET response: {resp.status_code}")
|
||||
|
||||
# Try POST with minimal data
|
||||
resp = requests.post(
|
||||
endpoint,
|
||||
json={
|
||||
"model": "default",
|
||||
"messages": [{"role": "user", "content": "test"}]
|
||||
},
|
||||
timeout=5,
|
||||
verify=False
|
||||
)
|
||||
print(f" POST response: {resp.status_code}")
|
||||
if resp.status_code == 200:
|
||||
print(f" ✓ SUCCESS! This endpoint works!")
|
||||
print(f" Response: {resp.json()}")
|
||||
break
|
||||
except Exception as e:
|
||||
print(f" Error: {e}")
|
||||
print()
|
||||
Reference in New Issue
Block a user