File size: 9,373 Bytes
30d2304
 
 
c2acf75
 
 
30d2304
c2acf75
30d2304
 
 
 
 
 
 
 
 
c2acf75
 
30d2304
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2acf75
 
 
 
 
 
 
30d2304
 
c2acf75
30d2304
 
 
 
 
 
 
 
 
 
 
 
 
c2acf75
30d2304
 
 
 
c2acf75
30d2304
c2acf75
30d2304
 
c2acf75
30d2304
 
 
c2acf75
30d2304
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2acf75
30d2304
c2acf75
30d2304
 
 
c2acf75
30d2304
c2acf75
 
 
30d2304
 
 
 
 
 
 
c2acf75
30d2304
 
 
c2acf75
 
30d2304
c2acf75
30d2304
 
 
 
 
 
 
 
 
 
c2acf75
30d2304
 
 
c2acf75
30d2304
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2acf75
30d2304
c2acf75
30d2304
 
c2acf75
30d2304
c2acf75
 
30d2304
c2acf75
30d2304
 
 
 
 
 
c2acf75
30d2304
 
 
c2acf75
 
30d2304
c2acf75
30d2304
 
 
 
 
 
 
 
 
 
 
 
 
 
c2acf75
30d2304
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2acf75
30d2304
 
 
c2acf75
30d2304
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
"""
API client for LLM providers (Anthropic, Together.ai, etc.)
"""

import os
import logging
from typing import Dict, Any

# Try to import requests, but gracefully handle if not available
try:
    import requests
    REQUESTS_AVAILABLE = True
except ImportError:
    REQUESTS_AVAILABLE = False
    print("Warning: requests module not available. API calls will use mock responses.")

# Configure logging
logger = logging.getLogger(__name__)

# Model configurations
anthropic_models = [
    "claude-3-5-sonnet-20241022",
    "claude-3-sonnet-20240229",
    "claude-3-haiku-20240307",
    "claude-opus-4-20250514",
    "claude-sonnet-4-20250514"
]

# together_models = [
#     "meta-llama/Llama-2-70b-chat-hf",
#     "mistralai/Mixtral-8x7B-Instruct-v0.1",
#     "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
#     "teknium/OpenHermes-2.5-Mistral-7B",
#     "microsoft/DialoGPT-medium"
# ]
together_models = [
    "Qwen/Qwen2.5-Coder-32B-Instruct",
    "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
    "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
    "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free"
]

# Combined list of all available models
all_models = anthropic_models + together_models

def call_llm(model: str, prompt: str, temperature: float = 0.3, max_tokens: int = 4000) -> str:
    """
    Call the specified LLM model with the given prompt.
    
    Args:
        model: Model name to use
        prompt: Input prompt
        temperature: Sampling temperature
        max_tokens: Maximum tokens to generate
        
    Returns:
        Model response as string
    """
    try:
        if model in anthropic_models:
            return call_anthropic(model, prompt, temperature, max_tokens)
        elif model in together_models:
            return call_together_ai(model, prompt, temperature, max_tokens)
        else:
            return f"Unsupported model: {model}"
    except Exception as e:
        logger.error(f"Error calling model {model}: {str(e)}")
        return f"Error calling model {model}: {str(e)}"

def call_anthropic(model: str, prompt: str, temperature: float = 0.3, max_tokens: int = 4000) -> str:
    """
    Call Anthropic Claude API.
    
    Args:
        model: Claude model name
        prompt: Input prompt
        temperature: Sampling temperature
        max_tokens: Maximum tokens to generate
        
    Returns:
        Model response
    """
    if not REQUESTS_AVAILABLE:
        return mock_llm_response(model, prompt)
        
    api_key = os.getenv("ANTHROPIC_API_KEY")
    if not api_key:
        return mock_llm_response(model, prompt)
    
    try:
        headers = {
            "Content-Type": "application/json",
            "x-api-key": api_key,
            "anthropic-version": "2023-06-01"
        }
        
        payload = {
            "model": model,
            "max_tokens": max_tokens,
            "temperature": temperature,
            "messages": [
                {
                    "role": "user",
                    "content": prompt
                }
            ]
        }
        
        response = requests.post(
            "https://api.anthropic.com/v1/messages",
            headers=headers,
            json=payload,
            timeout=60
        )
        
        if response.status_code == 200:
            result = response.json()
            return result["content"][0]["text"]
        else:
            return f"Anthropic API error: {response.status_code} - {response.text}"
            
    except Exception as e:
        logger.warning(f"Anthropic API call failed: {str(e)}, using mock response")
        return mock_llm_response(model, prompt)

def call_together_ai(model: str, prompt: str, temperature: float = 0.3, max_tokens: int = 4000) -> str:
    """
    Call Together.ai API.
    
    Args:
        model: Together.ai model name
        prompt: Input prompt
        temperature: Sampling temperature
        max_tokens: Maximum tokens to generate
        
    Returns:
        Model response
    """
    if not REQUESTS_AVAILABLE:
        return mock_llm_response(model, prompt)
        
    api_key = os.getenv("TOGETHER_API_KEY")
    if not api_key:
        return mock_llm_response(model, prompt)
    
    try:
        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
        
        payload = {
            "model": model,
            "max_tokens": max_tokens,
            "temperature": temperature,
            "messages": [
                {
                    "role": "user",
                    "content": prompt
                }
            ]
        }
        
        response = requests.post(
            "https://api.together.xyz/v1/chat/completions",
            headers=headers,
            json=payload,
            timeout=60
        )
        
        if response.status_code == 200:
            result = response.json()
            return result["choices"][0]["message"]["content"]
        else:
            return f"Together.ai API error: {response.status_code} - {response.text}"
            
    except Exception as e:
        logger.warning(f"Together.ai API call failed: {str(e)}, using mock response")
        return mock_llm_response(model, prompt)

def test_model_connectivity() -> Dict[str, str]:
    """
    Test connectivity to different model providers.
    
    Returns:
        Dictionary with model provider status
    """
    results = {}
    
    # Test Anthropic
    anthropic_key = os.getenv("ANTHROPIC_API_KEY")
    if anthropic_key:
        test_response = call_anthropic(anthropic_models[0], "Hello", 0.1, 10)
        results["anthropic"] = "Connected" if not test_response.startswith("Error") else f"Failed: {test_response}"
    else:
        results["anthropic"] = "API key not set"
    
    # Test Together.ai
    together_key = os.getenv("TOGETHER_API_KEY")
    if together_key:
        test_response = call_together_ai(together_models[0], "Hello", 0.1, 10)
        results["together_ai"] = "Connected" if not test_response.startswith("Error") else f"Failed: {test_response}"
    else:
        results["together_ai"] = "API key not set"
    
    return results

def get_model_info(model: str) -> Dict[str, Any]:
    """
    Get information about a specific model.
    
    Args:
        model: Model name
        
    Returns:
        Dictionary with model information
    """
    if model in anthropic_models:
        return {
            "provider": "Anthropic",
            "model": model,
            "type": "Chat",
            "max_tokens": 4096,
            "supports_functions": True
        }
    elif model in together_models:
        return {
            "provider": "Together.ai",
            "model": model,
            "type": "Chat",
            "max_tokens": 4096,
            "supports_functions": False
        }
    else:
        return {
            "provider": "Unknown",
            "model": model,
            "error": "Model not found"
        }

def validate_api_keys() -> Dict[str, bool]:
    """
    Validate that required API keys are set.
    
    Returns:
        Dictionary with API key validation status
    """
    return {
        "anthropic": bool(os.getenv("ANTHROPIC_API_KEY")),
        "together_ai": bool(os.getenv("TOGETHER_API_KEY"))
    }

# Mock functions for testing when API keys are not available
def mock_llm_response(model: str, prompt: str) -> str:
    """
    Generate a mock response for testing purposes.
    
    Args:
        model: Model name
        prompt: Input prompt
        
    Returns:
        Mock response
    """
    return f"""## CORRECTED CODE
```apex
// This is a mock response for model: {model}
trigger MockTrigger on Account (before insert, before update) {{
    // Mock corrected trigger logic
    for (Account acc : Trigger.new) {{
        if (Trigger.isInsert) {{
            // Insert logic
        }}
        if (Trigger.isUpdate) {{
            // Update logic
        }}
    }}
}}
```

## KEY CHANGES
- Added proper trigger context checks
- Implemented bulkification patterns
- Added error handling

## CRITICAL ISSUES FIXED
1. Missing trigger context: Added Trigger.isInsert/isUpdate checks
2. Governor limits: Implemented proper bulkification
3. Error handling: Added try-catch blocks

## REMAINING WARNINGS
- Test coverage needed for all scenarios
- Consider adding custom metadata for configuration
"""

# Use mock responses if API keys are not available
def call_llm_with_fallback(model: str, prompt: str, temperature: float = 0.3, max_tokens: int = 4000) -> str:
    """
    Call LLM with fallback to mock response if API keys are not available.
    
    Args:
        model: Model name
        prompt: Input prompt
        temperature: Sampling temperature
        max_tokens: Maximum tokens to generate
        
    Returns:
        Model response or mock response
    """
    api_keys = validate_api_keys()
    
    if model in anthropic_models and not api_keys["anthropic"]:
        logger.warning("Anthropic API key not available, using mock response")
        return mock_llm_response(model, prompt)
    elif model in together_models and not api_keys["together_ai"]:
        logger.warning("Together.ai API key not available, using mock response")
        return mock_llm_response(model, prompt)
    
    return call_llm(model, prompt, temperature, max_tokens)