delightfulrachel commited on
Commit
62b53a3
·
verified ·
1 Parent(s): 85472bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -642
app.py CHANGED
@@ -1,534 +1,20 @@
1
  import gradio as gr
2
- import os
3
- import time
4
- import requests
5
  import json
6
  import plotly.express as px
7
  import pandas as pd
8
- import re
9
- import hashlib
10
- from functools import lru_cache
11
- from typing import Dict, Tuple, Optional, List
12
- import logging
13
 
14
- # Configure logging
15
- logging.basicConfig(level=logging.INFO)
16
- logger = logging.getLogger(__name__)
 
 
 
 
17
 
18
- # Model options for dropdown with both Together AI and Anthropic models
19
- together_models = [
20
- "Qwen/Qwen2.5-Coder-32B-Instruct",
21
- "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
22
- "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
23
- "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free"
24
- ]
25
-
26
- anthropic_models = [
27
- "claude-3-7-sonnet-20250219",
28
- "claude-3-haiku-20240307",
29
- "claude-opus-4-20250514",
30
- "claude-sonnet-4-20250514"
31
- ]
32
-
33
- all_models = together_models + anthropic_models
34
-
35
- VALIDATION_SCHEMA = {
36
- "quality_rating": "int (1–10)",
37
- "accuracy": "float (0.0–1.0)",
38
- "completeness": "float (0.0–1.0)",
39
- "best_practices_alignment": "float (0.0–1.0)",
40
- "syntax_validity": "float (0.0–1.0)",
41
- "security_score": "float (0.0–1.0)",
42
- "performance_score": "float (0.0–1.0)",
43
- "explanations": {
44
- "quality_rating": "string",
45
- "accuracy": "string",
46
- "completeness": "string",
47
- "best_practices_alignment": "string",
48
- "syntax_validity": "string",
49
- "security_score": "string",
50
- "performance_score": "string"
51
- },
52
- "errors": ["list of syntax errors"],
53
- "warnings": ["list of potential issues"],
54
- "suggestions": ["list of improvement suggestions"]
55
- }
56
-
57
- # Apex syntax patterns for validation
58
- APEX_PATTERNS = {
59
- "class_declaration": r"(?:public|private|global|protected)\s+(?:virtual|abstract|with sharing|without sharing|inherited sharing)?\s*class\s+\w+",
60
- "trigger_declaration": r"trigger\s+\w+\s+on\s+\w+\s*\([^)]+\)",
61
- "method_declaration": r"(?:public|private|global|protected)\s+(?:static)?\s*(?:void|\w+)\s+\w+\s*\([^)]*\)",
62
- "soql_query": r"(?:\[|Database\.query\s*\()\s*SELECT\s+.*?\s+FROM\s+\w+.*?(?:\]|\))",
63
- "dml_operation": r"(?:insert|update|delete|undelete|upsert|merge)\s+\w+",
64
- "bulkification_issue": r"for\s*\([^)]+\)\s*{[^}]*(?:insert|update|delete|undelete)\s+",
65
- "hardcoded_id": r"(?:\'[a-zA-Z0-9]{15}\'|\'[a-zA-Z0-9]{18}\')",
66
- "missing_null_check": r"(\w+)\.(\w+)(?!\s*(?:!=|==)\s*null)",
67
- "governor_limit_risk": r"(?:for\s*\([^)]+\)\s*{[^}]*\[SELECT|Database\.query)",
68
- }
69
-
70
- # Common Apex errors and their fixes
71
- APEX_ERRORS = {
72
- "missing_semicolon": {
73
- "pattern": r"[^{};]\s*\n\s*(?:public|private|global|protected|if|for|while|try)",
74
- "message": "Missing semicolon at end of statement",
75
- "severity": "error"
76
- },
77
- "unclosed_bracket": {
78
- "pattern": r"(?:\{(?:[^{}]|(?:\{[^{}]*\}))*$)|(?:^[^{}]*\})",
79
- "message": "Unclosed or extra bracket detected",
80
- "severity": "error"
81
- },
82
- "invalid_soql": {
83
- "pattern": r"\[\s*SELECT\s+FROM\s+\w+",
84
- "message": "Invalid SOQL: Missing field selection",
85
- "severity": "error"
86
- },
87
- "missing_try_catch_dml": {
88
- "pattern": r"(?<!try\s{[^}]*)(insert|update|delete|upsert)\s+(?!.*catch)",
89
- "message": "DML operation without try-catch block",
90
- "severity": "warning"
91
- }
92
- }
93
-
94
- # B2B Commerce specific patterns
95
- B2B_COMMERCE_PATTERNS = {
96
- "cloudcraze_reference": r"(?:ccrz__|E_\w+|CC_\w+)",
97
- "b2b_lex_object": r"(?:OrderSummary|CartItem|WebCart|ProductCatalog|BuyerGroup|CommerceEntitlementPolicy)",
98
- "deprecated_method": r"(?:ccrz\.cc_CallContext|ccrz\.ccAPI|cc_bean_\w+)",
99
- "migration_required": r"(?:E_Product__|E_Cart__|E_Order__|CC_Promotions__|CC_Tax__)"
100
- }
101
-
102
- def get_api_key(provider: str) -> str:
103
- """Securely retrieve API key for the specified provider."""
104
- try:
105
- if provider == "together":
106
- api_key = os.getenv("TOGETHER_API_KEY")
107
- if not api_key:
108
- raise ValueError("API key not configured. Please contact administrator.")
109
- return api_key
110
- elif provider == "anthropic":
111
- api_key = os.getenv("ANTHROPIC_API_KEY")
112
- if not api_key:
113
- raise ValueError("API key not configured. Please contact administrator.")
114
- return api_key
115
- else:
116
- raise ValueError(f"Unknown provider: {provider}")
117
- except Exception as e:
118
- logger.error(f"Error retrieving API key: {e}")
119
- raise
120
-
121
- def get_provider(model: str) -> str:
122
- """Determine the provider for a given model."""
123
- if model in together_models:
124
- return "together"
125
- elif model in anthropic_models:
126
- return "anthropic"
127
- else:
128
- raise ValueError(f"Unknown model: {model}")
129
-
130
- def handle_api_error(status_code: int, response_text: str) -> str:
131
- """Handle API errors with appropriate user-friendly messages."""
132
- if status_code == 401:
133
- return "Authentication failed. Please check API configuration."
134
- elif status_code == 429:
135
- return "Rate limit exceeded. Please try again later."
136
- elif status_code == 403:
137
- return "Access forbidden. Please check your permissions."
138
- elif status_code >= 500:
139
- return "Service temporarily unavailable. Please try again."
140
- else:
141
- return f"Request failed with status {status_code}"
142
-
143
- def call_api_with_retry(api_func, *args, max_retries: int = 3, timeout: int = 30, **kwargs):
144
- """Call API with retry logic and timeout."""
145
- for attempt in range(max_retries):
146
- try:
147
- kwargs['timeout'] = timeout
148
- return api_func(*args, **kwargs)
149
- except requests.Timeout:
150
- if attempt == max_retries - 1:
151
- return "Request timed out. Please try again with a shorter input."
152
- except requests.ConnectionError:
153
- if attempt == max_retries - 1:
154
- return "Connection error. Please check your internet connection."
155
- except Exception as e:
156
- if attempt == max_retries - 1:
157
- return f"Error: {str(e)}"
158
- time.sleep(2 ** attempt) # Exponential backoff
159
-
160
- def call_together_api(model: str, prompt: str, temperature: float = 0.7, max_tokens: int = 1500) -> str:
161
- """Call Together AI API with enhanced error handling."""
162
- api_key = get_api_key("together")
163
- system_message = (
164
- "You are a Salesforce B2B Commerce expert. Be CONCISE and PRECISE. "
165
- "Focus on CODE QUALITY over explanations. Use structured formats when requested. "
166
- "Always check for syntax errors, security issues, and performance problems."
167
- )
168
-
169
- def make_request():
170
- headers = {
171
- "Authorization": f"Bearer {api_key}",
172
- "Content-Type": "application/json"
173
- }
174
- payload = {
175
- "model": model,
176
- "messages": [
177
- {"role": "system", "content": system_message},
178
- {"role": "user", "content": prompt}
179
- ],
180
- "temperature": temperature,
181
- "max_tokens": max_tokens,
182
- "top_p": 0.9
183
- }
184
- resp = requests.post(
185
- "https://api.together.xyz/v1/chat/completions",
186
- headers=headers,
187
- json=payload,
188
- timeout=30
189
- )
190
- if resp.status_code != 200:
191
- return handle_api_error(resp.status_code, resp.text)
192
- data = resp.json()
193
- return data["choices"][0]["message"]["content"]
194
-
195
- return call_api_with_retry(make_request)
196
-
197
- def call_anthropic_api(model: str, prompt: str, temperature: float = 0.7, max_tokens: int = 1500) -> str:
198
- """Call Anthropic API with enhanced error handling."""
199
- api_key = get_api_key("anthropic")
200
- system_message = (
201
- "You are a Salesforce B2B Commerce expert. Be CONCISE and PRECISE. "
202
- "Focus on CODE QUALITY over explanations. Use structured formats when requested. "
203
- "Always check for syntax errors, security issues, and performance problems."
204
- )
205
-
206
- def make_request():
207
- headers = {
208
- "x-api-key": api_key,
209
- "anthropic-version": "2023-06-01",
210
- "content-type": "application/json"
211
- }
212
- payload = {
213
- "model": model,
214
- "system": system_message,
215
- "messages": [
216
- {"role": "user", "content": prompt}
217
- ],
218
- "temperature": temperature,
219
- "max_tokens": max_tokens
220
- }
221
- resp = requests.post(
222
- "https://api.anthropic.com/v1/messages",
223
- headers=headers,
224
- json=payload,
225
- timeout=30
226
- )
227
- if resp.status_code != 200:
228
- return handle_api_error(resp.status_code, resp.text)
229
- data = resp.json()
230
- return data["content"][0]["text"]
231
-
232
- return call_api_with_retry(make_request)
233
-
234
- @lru_cache(maxsize=100)
235
- def cached_llm_call(model_hash: str, prompt_hash: str, model: str, prompt: str, temperature: float = 0.7, max_tokens: int = 1500) -> str:
236
- """Cached LLM call to avoid repeated API calls for same inputs."""
237
- provider = get_provider(model)
238
- if provider == "together":
239
- return call_together_api(model, prompt, temperature, max_tokens)
240
- elif provider == "anthropic":
241
- return call_anthropic_api(model, prompt, temperature, max_tokens)
242
- else:
243
- return f"Error: Unknown provider for model {model}"
244
-
245
- def call_llm(model: str, prompt: str, temperature: float = 0.7, max_tokens: int = 1500) -> str:
246
- """Call LLM with caching support."""
247
- model_hash = hashlib.md5(model.encode()).hexdigest()
248
- prompt_hash = hashlib.md5(prompt.encode()).hexdigest()
249
- return cached_llm_call(model_hash, prompt_hash, model, prompt, temperature, max_tokens)
250
-
251
- def validate_apex_syntax(code: str) -> Tuple[bool, List[Dict[str, str]]]:
252
- """Validate Apex syntax and return errors/warnings."""
253
- issues = []
254
-
255
- # Check for basic syntax errors
256
- for error_type, error_info in APEX_ERRORS.items():
257
- matches = re.finditer(error_info["pattern"], code, re.MULTILINE | re.DOTALL)
258
- for match in matches:
259
- issues.append({
260
- "type": error_info["severity"],
261
- "message": error_info["message"],
262
- "line": code[:match.start()].count('\n') + 1,
263
- "position": match.start()
264
- })
265
-
266
- # Check for Apex-specific patterns
267
- if not re.search(APEX_PATTERNS["class_declaration"], code) and \
268
- not re.search(APEX_PATTERNS["trigger_declaration"], code):
269
- issues.append({
270
- "type": "error",
271
- "message": "No valid Apex class or trigger declaration found",
272
- "line": 1,
273
- "position": 0
274
- })
275
-
276
- # Check for bulkification issues
277
- bulk_issues = re.finditer(APEX_PATTERNS["bulkification_issue"], code, re.DOTALL)
278
- for match in bulk_issues:
279
- issues.append({
280
- "type": "error",
281
- "message": "DML operation inside loop - violates bulkification best practices",
282
- "line": code[:match.start()].count('\n') + 1,
283
- "position": match.start()
284
- })
285
-
286
- # Check for hardcoded IDs
287
- hardcoded_ids = re.finditer(APEX_PATTERNS["hardcoded_id"], code)
288
- for match in hardcoded_ids:
289
- issues.append({
290
- "type": "warning",
291
- "message": "Hardcoded Salesforce ID detected - use Custom Settings or Custom Metadata",
292
- "line": code[:match.start()].count('\n') + 1,
293
- "position": match.start()
294
- })
295
-
296
- # Check for governor limit risks
297
- gov_limit_risks = re.finditer(APEX_PATTERNS["governor_limit_risk"], code, re.DOTALL)
298
- for match in gov_limit_risks:
299
- issues.append({
300
- "type": "warning",
301
- "message": "SOQL query inside loop - potential governor limit issue",
302
- "line": code[:match.start()].count('\n') + 1,
303
- "position": match.start()
304
- })
305
-
306
- has_errors = any(issue["type"] == "error" for issue in issues)
307
- return not has_errors, issues
308
-
309
- def extract_code_blocks(text: str) -> str:
310
- """Enhanced code extraction with multiple strategies."""
311
- # Strategy 1: Standard code blocks with language markers
312
- pattern = r"```(?:apex|java|Apex|Java|APEX|JAVA)?\s*(.*?)```"
313
- matches = re.findall(pattern, text, re.DOTALL | re.IGNORECASE)
314
-
315
- code_blocks = []
316
- for block in matches:
317
- cleaned_block = block.strip()
318
- if cleaned_block:
319
- code_blocks.append(cleaned_block)
320
-
321
- # Strategy 2: Improved fallback detection for Apex-specific patterns
322
- if not code_blocks:
323
- apex_patterns = [
324
- # Class declarations (including inner classes)
325
- r"((?:public|private|global|protected)\s+(?:virtual|abstract|with sharing|without sharing|inherited sharing)?\s*class\s+\w+(?:\s+extends\s+\w+)?(?:\s+implements\s+[\w\s,]+)?\s*\{(?:[^{}]|\{[^{}]*\})*\})",
326
- # Trigger declarations
327
- r"(trigger\s+\w+\s+on\s+\w+\s*\([^)]+\)\s*\{(?:[^{}]|\{[^{}]*\})*\})",
328
- # Interface declarations
329
- r"((?:public|private|global)\s+interface\s+\w+(?:\s+extends\s+[\w\s,]+)?\s*\{(?:[^{}]|\{[^{}]*\})*\})",
330
- # Enum declarations
331
- r"((?:public|private|global)\s+enum\s+\w+\s*\{[^}]+\})",
332
- # Annotated methods or classes
333
- r"(@\w+(?:\([^)]*\))?\s*(?:public|private|global|protected).*?(?:\{(?:[^{}]|\{[^{}]*\})*\}|;))"
334
- ]
335
-
336
- for pattern in apex_patterns:
337
- found = re.findall(pattern, text, re.DOTALL | re.MULTILINE)
338
- code_blocks.extend(found)
339
-
340
- # Strategy 3: Look for code between specific markers
341
- if not code_blocks:
342
- # Look for code after phrases like "corrected code:", "here's the code:", etc.
343
- marker_patterns = [
344
- r"(?:corrected|fixed|updated|converted|modified)\s+code\s*:\s*\n((?:(?:public|private|global|trigger).*?)(?=\n\n|\Z))",
345
- r"(?:here'?s?|below is)\s+(?:the|your)\s+(?:corrected|fixed|updated)\s+\w+\s*:\s*\n((?:(?:public|private|global|trigger).*?)(?=\n\n|\Z))"
346
- ]
347
-
348
- for pattern in marker_patterns:
349
- found = re.findall(pattern, text, re.DOTALL | re.IGNORECASE)
350
- code_blocks.extend(found)
351
-
352
- return '\n\n'.join(filter(None, code_blocks))
353
-
354
- def format_structured_explanation(response: str, code_output: str) -> str:
355
- """Format the explanation in a structured, brief manner."""
356
- # Extract key sections using regex
357
- sections = {
358
- "key_changes": "",
359
- "critical_issues": "",
360
- "warnings": ""
361
- }
362
-
363
- # Extract KEY CHANGES section
364
- key_match = re.search(r"##\s*KEY CHANGES.*?\n((?:[-•]\s*.*?\n)+)", response, re.IGNORECASE | re.DOTALL)
365
- if key_match:
366
- sections["key_changes"] = key_match.group(1).strip()
367
-
368
- # Extract CRITICAL ISSUES section
369
- critical_match = re.search(r"##\s*CRITICAL ISSUES.*?\n((?:\d+\..*?\n)+)", response, re.IGNORECASE | re.DOTALL)
370
- if critical_match:
371
- sections["critical_issues"] = critical_match.group(1).strip()
372
-
373
- # Extract WARNINGS section
374
- warning_match = re.search(r"##\s*REMAINING WARNINGS.*?\n((?:[-•]\s*.*?\n)*)", response, re.IGNORECASE | re.DOTALL)
375
- if warning_match:
376
- sections["warnings"] = warning_match.group(1).strip()
377
-
378
- # Build formatted explanation
379
- formatted = "### Summary of Changes\n\n"
380
-
381
- if sections["key_changes"]:
382
- formatted += "**Key Changes:**\n" + sections["key_changes"] + "\n\n"
383
-
384
- if sections["critical_issues"]:
385
- formatted += "**Critical Issues Fixed:**\n" + sections["critical_issues"] + "\n\n"
386
-
387
- if sections["warnings"]:
388
- formatted += "**⚠️ Remaining Warnings:**\n" + sections["warnings"]
389
-
390
- # If structured extraction failed, provide a brief summary
391
- if not any(sections.values()):
392
- # Fall back to a simple extraction
393
- formatted = "### Code Correction Summary\n\n"
394
- formatted += "The code has been corrected and optimized. "
395
- formatted += "Check the code output for inline comments explaining specific changes.\n\n"
396
- formatted += "For detailed analysis, see the Full Model Response."
397
-
398
- return formatted.strip()
399
-
400
- def perform_skeptical_evaluation(code: str, context: str = "trigger") -> Dict[str, any]:
401
- """Perform skeptical evaluation of code looking for common issues."""
402
- evaluation = {
403
- "syntax_issues": [],
404
- "security_concerns": [],
405
- "performance_issues": [],
406
- "best_practice_violations": [],
407
- "b2b_commerce_issues": []
408
- }
409
-
410
- # Syntax validation
411
- is_valid, syntax_issues = validate_apex_syntax(code)
412
- evaluation["syntax_issues"] = syntax_issues
413
-
414
- # Security checks
415
- if re.search(r"without\s+sharing", code, re.IGNORECASE):
416
- evaluation["security_concerns"].append({
417
- "type": "warning",
418
- "message": "Class declared 'without sharing' - ensure this is intentional"
419
- })
420
-
421
- if not re.search(r"\.stripInaccessible\(", code) and re.search(r"(insert|update)\s+", code):
422
- evaluation["security_concerns"].append({
423
- "type": "warning",
424
- "message": "DML operations without stripInaccessible - potential FLS violation"
425
- })
426
-
427
- # Performance checks
428
- nested_loops = re.findall(r"for\s*\([^)]+\)\s*\{[^}]*for\s*\([^)]+\)", code, re.DOTALL)
429
- if nested_loops:
430
- evaluation["performance_issues"].append({
431
- "type": "warning",
432
- "message": f"Nested loops detected ({len(nested_loops)} occurrences) - review for O(n²) complexity"
433
- })
434
-
435
- # Check for missing test assertions (if it's a test class)
436
- if re.search(r"@isTest|testMethod", code, re.IGNORECASE):
437
- if not re.search(r"System\.assert|Assert\.", code):
438
- evaluation["best_practice_violations"].append({
439
- "type": "error",
440
- "message": "Test class without assertions - tests must verify behavior"
441
- })
442
-
443
- # B2B Commerce specific checks
444
- cloudcraze_refs = re.findall(B2B_COMMERCE_PATTERNS["cloudcraze_reference"], code)
445
- if cloudcraze_refs:
446
- evaluation["b2b_commerce_issues"].append({
447
- "type": "error",
448
- "message": f"CloudCraze references found ({len(set(cloudcraze_refs))} unique) - must be migrated to B2B LEX"
449
- })
450
-
451
- deprecated_methods = re.findall(B2B_COMMERCE_PATTERNS["deprecated_method"], code)
452
- if deprecated_methods:
453
- evaluation["b2b_commerce_issues"].append({
454
- "type": "error",
455
- "message": f"Deprecated CloudCraze methods found: {', '.join(set(deprecated_methods))}"
456
- })
457
-
458
- return evaluation
459
-
460
- def generate_test_cases(code_type: str, code: str) -> str:
461
- """Generate test cases for the given code."""
462
- if code_type == "trigger":
463
- return f"""
464
- // Test class for the trigger
465
- @isTest
466
- private class Test_MigratedTrigger {{
467
- @TestSetup
468
- static void setup() {{
469
- // Create test data
470
- // TODO: Add specific test data setup
471
- }}
472
-
473
- @isTest
474
- static void testBulkInsert() {{
475
- // Test bulk insert scenario
476
- List<SObject> testRecords = new List<SObject>();
477
- for(Integer i = 0; i < 200; i++) {{
478
- // TODO: Create test records
479
- }}
480
-
481
- Test.startTest();
482
- insert testRecords;
483
- Test.stopTest();
484
-
485
- // TODO: Add assertions
486
- System.assert(true, 'Bulk insert test needs implementation');
487
- }}
488
-
489
- @isTest
490
- static void testBulkUpdate() {{
491
- // Test bulk update scenario
492
- // TODO: Implement bulk update test
493
- }}
494
-
495
- @isTest
496
- static void testErrorHandling() {{
497
- // Test error scenarios
498
- // TODO: Test validation rules, required fields, etc.
499
- }}
500
-
501
- @isTest
502
- static void testGovernorLimits() {{
503
- // Test near governor limits
504
- // TODO: Test with large data volumes
505
- }}
506
- }}
507
- """
508
- else: # object conversion
509
- return f"""
510
- // Test data creation for migrated object
511
- @isTest
512
- public class Test_MigratedObjectData {{
513
- public static SObject createTestRecord() {{
514
- // TODO: Create and return test instance
515
- return null;
516
- }}
517
-
518
- public static List<SObject> createBulkTestRecords(Integer count) {{
519
- List<SObject> records = new List<SObject>();
520
- for(Integer i = 0; i < count; i++) {{
521
- // TODO: Create test records
522
- }}
523
- return records;
524
- }}
525
-
526
- public static void validateMigrationMapping() {{
527
- // Validate that all fields are properly mapped
528
- // TODO: Add field mapping validation
529
- }}
530
- }}
531
- """
532
 
533
  def correct_apex_trigger(model: str, trigger_code: str, progress=None) -> Tuple[str, str, str]:
534
  """Correct Apex Trigger with skeptical evaluation."""
@@ -640,6 +126,7 @@ def convert_cc_object(model: str, cc_object_code: str, progress=None) -> Tuple[s
640
  progress(0.3, desc="Analyzing CloudCraze structure...")
641
 
642
  # Check for CloudCraze patterns
 
643
  has_cc_pattern = bool(re.search(B2B_COMMERCE_PATTERNS["cloudcraze_reference"], cc_object_code))
644
  if not has_cc_pattern:
645
  logger.warning("No obvious CloudCraze patterns found in input")
@@ -711,116 +198,6 @@ BE CONCISE. FOCUS ON ACTIONABLE INFORMATION.
711
 
712
  return response, code_output, explanation
713
 
714
- def extract_validation_metrics(validation_text: str) -> Optional[Dict[str, float]]:
715
- """Enhanced JSON extraction for validation metrics."""
716
- try:
717
- # Strategy 1: Look for JSON after specific markers
718
- json_patterns = [
719
- r'(?:json|JSON|assessment|Assessment)[\s:]*({[^{}]*(?:{[^{}]*}[^{}]*)*})',
720
- r'```json\s*({[^`]+})\s*```',
721
- r'({[^{}]*"quality_rating"[^{}]*(?:{[^{}]*}[^{}]*)*})'
722
- ]
723
-
724
- for pattern in json_patterns:
725
- matches = re.findall(pattern, validation_text, re.DOTALL)
726
- for match in matches:
727
- try:
728
- data = json.loads(match)
729
- if "quality_rating" in data:
730
- return normalize_metrics(data)
731
- except json.JSONDecodeError:
732
- continue
733
-
734
- # Strategy 2: Extract individual metrics if JSON parsing fails
735
- metrics = {}
736
- metric_patterns = {
737
- "quality_rating": r"quality_rating[\"']?\s*:\s*(\d+(?:\.\d+)?)",
738
- "accuracy": r"accuracy[\"']?\s*:\s*(\d+(?:\.\d+)?)",
739
- "completeness": r"completeness[\"']?\s*:\s*(\d+(?:\.\d+)?)",
740
- "best_practices_alignment": r"best_practices_alignment[\"']?\s*:\s*(\d+(?:\.\d+)?)",
741
- "syntax_validity": r"syntax_validity[\"']?\s*:\s*(\d+(?:\.\d+)?)",
742
- "security_score": r"security_score[\"']?\s*:\s*(\d+(?:\.\d+)?)",
743
- "performance_score": r"performance_score[\"']?\s*:\s*(\d+(?:\.\d+)?)"
744
- }
745
-
746
- for metric, pattern in metric_patterns.items():
747
- match = re.search(pattern, validation_text, re.IGNORECASE)
748
- if match:
749
- metrics[metric] = float(match.group(1))
750
-
751
- if metrics:
752
- return normalize_metrics(metrics)
753
-
754
- return None
755
-
756
- except Exception as e:
757
- logger.error(f"Error extracting metrics: {e}")
758
- return None
759
-
760
- def normalize_metrics(data: Dict) -> Dict[str, float]:
761
- """Ensure metrics are in the correct format and range."""
762
- normalized = {
763
- "quality_rating": min(10, max(0, float(data.get("quality_rating", 0)))),
764
- "accuracy": min(1.0, max(0.0, float(data.get("accuracy", 0.0)))),
765
- "completeness": min(1.0, max(0.0, float(data.get("completeness", 0.0)))),
766
- "best_practices_alignment": min(1.0, max(0.0, float(data.get("best_practices_alignment", 0.0)))),
767
- "syntax_validity": min(1.0, max(0.0, float(data.get("syntax_validity", 0.0)))),
768
- "security_score": min(1.0, max(0.0, float(data.get("security_score", 0.0)))),
769
- "performance_score": min(1.0, max(0.0, float(data.get("performance_score", 0.0))))
770
- }
771
- return normalized
772
-
773
- def create_enhanced_radar_chart(metrics: Optional[Dict[str, float]]) -> Optional[object]:
774
- """Create an enhanced radar chart with more metrics."""
775
- if not metrics:
776
- return None
777
-
778
- # Create data for the radar chart
779
- categories = [
780
- "Quality",
781
- "Accuracy",
782
- "Completeness",
783
- "Best Practices",
784
- "Syntax Valid",
785
- "Security",
786
- "Performance"
787
- ]
788
-
789
- values = [
790
- metrics.get("quality_rating", 0) / 10, # Normalize to 0-1 scale
791
- metrics.get("accuracy", 0),
792
- metrics.get("completeness", 0),
793
- metrics.get("best_practices_alignment", 0),
794
- metrics.get("syntax_validity", 0),
795
- metrics.get("security_score", 0),
796
- metrics.get("performance_score", 0)
797
- ]
798
-
799
- # Create a DataFrame for plotting
800
- df = pd.DataFrame({
801
- 'Category': categories,
802
- 'Value': values
803
- })
804
-
805
- # Create the radar chart
806
- fig = px.line_polar(
807
- df, r='Value', theta='Category', line_close=True,
808
- range_r=[0, 1], title="Comprehensive Validation Assessment"
809
- )
810
- fig.update_traces(fill='toself', fillcolor='rgba(0, 100, 255, 0.2)')
811
- fig.update_layout(
812
- polar=dict(
813
- radialaxis=dict(
814
- visible=True,
815
- range=[0, 1]
816
- )
817
- ),
818
- showlegend=False,
819
- height=400
820
- )
821
-
822
- return fig
823
-
824
  def validate_apex_trigger(validation_model: str, original_code: str, corrected_code: str) -> str:
825
  """Enhanced validation with skeptical evaluation and structured output."""
826
  if not validation_model or not original_code.strip() or not corrected_code.strip():
@@ -937,6 +314,57 @@ BE HARSH. Maximum 3 items per category. Focus on REAL issues.
937
 
938
  return call_llm(validation_model, prompt, temperature=0.1)
939
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
940
  def get_theme_styles(theme_choice: str) -> Tuple[str, str, str, str]:
941
  """Get theme styles for different UI elements."""
942
  themes = {
@@ -1225,13 +653,6 @@ def main():
1225
  - **Primary Model**: Performs initial conversion with skeptical analysis
1226
  - **Validation Model**: Double-checks work with harsh but fair evaluation
1227
 
1228
- **⚡ Key Improvements:**
1229
- - Syntax validation before and after correction
1230
- - Security vulnerability detection (FLS, CRUD, injection)
1231
- - Performance analysis (O(n²) algorithms, governor limits)
1232
- - B2B Commerce specific migration validation
1233
- - Automatic test case suggestions
1234
-
1235
  **⚠️ Important**: Always review and test AI-generated code in a sandbox before production deployment.
1236
  """
1237
  )
 
1
  import gradio as gr
 
 
 
2
  import json
3
  import plotly.express as px
4
  import pandas as pd
5
+ from typing import Tuple, Dict, Optional, List
 
 
 
 
6
 
7
+ # Import from our modules
8
+ from utils import (
9
+ validate_apex_syntax, perform_skeptical_evaluation, extract_code_blocks,
10
+ format_structured_explanation, format_object_conversion_explanation,
11
+ extract_validation_metrics, normalize_metrics, generate_test_cases,
12
+ VALIDATION_SCHEMA, B2B_COMMERCE_PATTERNS, logger
13
+ )
14
 
15
+ from api_client import (
16
+ all_models, together_models, anthropic_models, call_llm
17
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  def correct_apex_trigger(model: str, trigger_code: str, progress=None) -> Tuple[str, str, str]:
20
  """Correct Apex Trigger with skeptical evaluation."""
 
126
  progress(0.3, desc="Analyzing CloudCraze structure...")
127
 
128
  # Check for CloudCraze patterns
129
+ import re
130
  has_cc_pattern = bool(re.search(B2B_COMMERCE_PATTERNS["cloudcraze_reference"], cc_object_code))
131
  if not has_cc_pattern:
132
  logger.warning("No obvious CloudCraze patterns found in input")
 
198
 
199
  return response, code_output, explanation
200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  def validate_apex_trigger(validation_model: str, original_code: str, corrected_code: str) -> str:
202
  """Enhanced validation with skeptical evaluation and structured output."""
203
  if not validation_model or not original_code.strip() or not corrected_code.strip():
 
314
 
315
  return call_llm(validation_model, prompt, temperature=0.1)
316
 
317
+ def create_enhanced_radar_chart(metrics: Optional[Dict[str, float]]) -> Optional[object]:
318
+ """Create an enhanced radar chart with more metrics."""
319
+ if not metrics:
320
+ return None
321
+
322
+ # Create data for the radar chart
323
+ categories = [
324
+ "Quality",
325
+ "Accuracy",
326
+ "Completeness",
327
+ "Best Practices",
328
+ "Syntax Valid",
329
+ "Security",
330
+ "Performance"
331
+ ]
332
+
333
+ values = [
334
+ metrics.get("quality_rating", 0) / 10, # Normalize to 0-1 scale
335
+ metrics.get("accuracy", 0),
336
+ metrics.get("completeness", 0),
337
+ metrics.get("best_practices_alignment", 0),
338
+ metrics.get("syntax_validity", 0),
339
+ metrics.get("security_score", 0),
340
+ metrics.get("performance_score", 0)
341
+ ]
342
+
343
+ # Create a DataFrame for plotting
344
+ df = pd.DataFrame({
345
+ 'Category': categories,
346
+ 'Value': values
347
+ })
348
+
349
+ # Create the radar chart
350
+ fig = px.line_polar(
351
+ df, r='Value', theta='Category', line_close=True,
352
+ range_r=[0, 1], title="Comprehensive Validation Assessment"
353
+ )
354
+ fig.update_traces(fill='toself', fillcolor='rgba(0, 100, 255, 0.2)')
355
+ fig.update_layout(
356
+ polar=dict(
357
+ radialaxis=dict(
358
+ visible=True,
359
+ range=[0, 1]
360
+ )
361
+ ),
362
+ showlegend=False,
363
+ height=400
364
+ )
365
+
366
+ return fig
367
+
368
  def get_theme_styles(theme_choice: str) -> Tuple[str, str, str, str]:
369
  """Get theme styles for different UI elements."""
370
  themes = {
 
653
  - **Primary Model**: Performs initial conversion with skeptical analysis
654
  - **Validation Model**: Double-checks work with harsh but fair evaluation
655
 
 
 
 
 
 
 
 
656
  **⚠️ Important**: Always review and test AI-generated code in a sandbox before production deployment.
657
  """
658
  )