delightfulrachel's picture
Update api_client.py
30d2304 verified
"""
API client for LLM providers (Anthropic, Together.ai, etc.)
"""
import os
import logging
from typing import Dict, Any
# Try to import requests, but gracefully handle if not available
try:
import requests
REQUESTS_AVAILABLE = True
except ImportError:
REQUESTS_AVAILABLE = False
print("Warning: requests module not available. API calls will use mock responses.")
# Configure logging
logger = logging.getLogger(__name__)
# Model configurations
anthropic_models = [
"claude-3-5-sonnet-20241022",
"claude-3-sonnet-20240229",
"claude-3-haiku-20240307",
"claude-opus-4-20250514",
"claude-sonnet-4-20250514"
]
# together_models = [
# "meta-llama/Llama-2-70b-chat-hf",
# "mistralai/Mixtral-8x7B-Instruct-v0.1",
# "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
# "teknium/OpenHermes-2.5-Mistral-7B",
# "microsoft/DialoGPT-medium"
# ]
together_models = [
"Qwen/Qwen2.5-Coder-32B-Instruct",
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
"meta-llama/Llama-3.3-70B-Instruct-Turbo-Free"
]
# Combined list of all available models
all_models = anthropic_models + together_models
def call_llm(model: str, prompt: str, temperature: float = 0.3, max_tokens: int = 4000) -> str:
"""
Call the specified LLM model with the given prompt.
Args:
model: Model name to use
prompt: Input prompt
temperature: Sampling temperature
max_tokens: Maximum tokens to generate
Returns:
Model response as string
"""
try:
if model in anthropic_models:
return call_anthropic(model, prompt, temperature, max_tokens)
elif model in together_models:
return call_together_ai(model, prompt, temperature, max_tokens)
else:
return f"Unsupported model: {model}"
except Exception as e:
logger.error(f"Error calling model {model}: {str(e)}")
return f"Error calling model {model}: {str(e)}"
def call_anthropic(model: str, prompt: str, temperature: float = 0.3, max_tokens: int = 4000) -> str:
"""
Call Anthropic Claude API.
Args:
model: Claude model name
prompt: Input prompt
temperature: Sampling temperature
max_tokens: Maximum tokens to generate
Returns:
Model response
"""
if not REQUESTS_AVAILABLE:
return mock_llm_response(model, prompt)
api_key = os.getenv("ANTHROPIC_API_KEY")
if not api_key:
return mock_llm_response(model, prompt)
try:
headers = {
"Content-Type": "application/json",
"x-api-key": api_key,
"anthropic-version": "2023-06-01"
}
payload = {
"model": model,
"max_tokens": max_tokens,
"temperature": temperature,
"messages": [
{
"role": "user",
"content": prompt
}
]
}
response = requests.post(
"https://api.anthropic.com/v1/messages",
headers=headers,
json=payload,
timeout=60
)
if response.status_code == 200:
result = response.json()
return result["content"][0]["text"]
else:
return f"Anthropic API error: {response.status_code} - {response.text}"
except Exception as e:
logger.warning(f"Anthropic API call failed: {str(e)}, using mock response")
return mock_llm_response(model, prompt)
def call_together_ai(model: str, prompt: str, temperature: float = 0.3, max_tokens: int = 4000) -> str:
"""
Call Together.ai API.
Args:
model: Together.ai model name
prompt: Input prompt
temperature: Sampling temperature
max_tokens: Maximum tokens to generate
Returns:
Model response
"""
if not REQUESTS_AVAILABLE:
return mock_llm_response(model, prompt)
api_key = os.getenv("TOGETHER_API_KEY")
if not api_key:
return mock_llm_response(model, prompt)
try:
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"max_tokens": max_tokens,
"temperature": temperature,
"messages": [
{
"role": "user",
"content": prompt
}
]
}
response = requests.post(
"https://api.together.xyz/v1/chat/completions",
headers=headers,
json=payload,
timeout=60
)
if response.status_code == 200:
result = response.json()
return result["choices"][0]["message"]["content"]
else:
return f"Together.ai API error: {response.status_code} - {response.text}"
except Exception as e:
logger.warning(f"Together.ai API call failed: {str(e)}, using mock response")
return mock_llm_response(model, prompt)
def test_model_connectivity() -> Dict[str, str]:
"""
Test connectivity to different model providers.
Returns:
Dictionary with model provider status
"""
results = {}
# Test Anthropic
anthropic_key = os.getenv("ANTHROPIC_API_KEY")
if anthropic_key:
test_response = call_anthropic(anthropic_models[0], "Hello", 0.1, 10)
results["anthropic"] = "Connected" if not test_response.startswith("Error") else f"Failed: {test_response}"
else:
results["anthropic"] = "API key not set"
# Test Together.ai
together_key = os.getenv("TOGETHER_API_KEY")
if together_key:
test_response = call_together_ai(together_models[0], "Hello", 0.1, 10)
results["together_ai"] = "Connected" if not test_response.startswith("Error") else f"Failed: {test_response}"
else:
results["together_ai"] = "API key not set"
return results
def get_model_info(model: str) -> Dict[str, Any]:
"""
Get information about a specific model.
Args:
model: Model name
Returns:
Dictionary with model information
"""
if model in anthropic_models:
return {
"provider": "Anthropic",
"model": model,
"type": "Chat",
"max_tokens": 4096,
"supports_functions": True
}
elif model in together_models:
return {
"provider": "Together.ai",
"model": model,
"type": "Chat",
"max_tokens": 4096,
"supports_functions": False
}
else:
return {
"provider": "Unknown",
"model": model,
"error": "Model not found"
}
def validate_api_keys() -> Dict[str, bool]:
"""
Validate that required API keys are set.
Returns:
Dictionary with API key validation status
"""
return {
"anthropic": bool(os.getenv("ANTHROPIC_API_KEY")),
"together_ai": bool(os.getenv("TOGETHER_API_KEY"))
}
# Mock functions for testing when API keys are not available
def mock_llm_response(model: str, prompt: str) -> str:
"""
Generate a mock response for testing purposes.
Args:
model: Model name
prompt: Input prompt
Returns:
Mock response
"""
return f"""## CORRECTED CODE
```apex
// This is a mock response for model: {model}
trigger MockTrigger on Account (before insert, before update) {{
// Mock corrected trigger logic
for (Account acc : Trigger.new) {{
if (Trigger.isInsert) {{
// Insert logic
}}
if (Trigger.isUpdate) {{
// Update logic
}}
}}
}}
```
## KEY CHANGES
- Added proper trigger context checks
- Implemented bulkification patterns
- Added error handling
## CRITICAL ISSUES FIXED
1. Missing trigger context: Added Trigger.isInsert/isUpdate checks
2. Governor limits: Implemented proper bulkification
3. Error handling: Added try-catch blocks
## REMAINING WARNINGS
- Test coverage needed for all scenarios
- Consider adding custom metadata for configuration
"""
# Use mock responses if API keys are not available
def call_llm_with_fallback(model: str, prompt: str, temperature: float = 0.3, max_tokens: int = 4000) -> str:
"""
Call LLM with fallback to mock response if API keys are not available.
Args:
model: Model name
prompt: Input prompt
temperature: Sampling temperature
max_tokens: Maximum tokens to generate
Returns:
Model response or mock response
"""
api_keys = validate_api_keys()
if model in anthropic_models and not api_keys["anthropic"]:
logger.warning("Anthropic API key not available, using mock response")
return mock_llm_response(model, prompt)
elif model in together_models and not api_keys["together_ai"]:
logger.warning("Together.ai API key not available, using mock response")
return mock_llm_response(model, prompt)
return call_llm(model, prompt, temperature, max_tokens)