File size: 3,223 Bytes
0040626
 
 
159c5e2
0040626
b595fa7
93c3822
0040626
b595fa7
0040626
5590ae6
 
09197ab
 
850a3e0
 
0040626
b595fa7
 
850a3e0
5590ae6
 
850a3e0
5590ae6
850a3e0
5590ae6
 
 
 
850a3e0
5590ae6
6bb18d3
b595fa7
6bb18d3
b6e81a2
 
850a3e0
b6e81a2
850a3e0
 
b6e81a2
b595fa7
 
 
 
 
 
850a3e0
b595fa7
 
850a3e0
 
 
 
159c5e2
 
 
 
 
 
0040626
6bb18d3
b595fa7
0040626
159c5e2
 
0040626
159c5e2
0040626
 
 
 
 
 
159c5e2
0040626
 
b595fa7
0040626
 
 
b595fa7
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import torch
import tempfile  # βœ… Import tempfile to create temp files

# βœ… Load the fastest model on CPU
model_name = "Salesforce/codegen-350M-mono"  # Fastest model for code review
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name).to("cpu")  # Force CPU mode

import tempfile

import tempfile

import tempfile

def review_code(code_snippet):
    print("βœ… Received Code:", code_snippet)  # Debugging log
    
    # βœ… Improved prompt with strict format rules
    prompt = f"""
### Instruction:
You are a Python code reviewer. Your job is to analyze and fix errors in the provided Python code.
Make necessary corrections such as adding missing return statements, fixing syntax errors, and correcting logical mistakes. 
ONLY return the corrected function definitionβ€”DO NOT generate any new function calls or explanations.

### Input Code:
{code_snippet}

### Corrected Function:
"""

    # Process input
    inputs = tokenizer(prompt, return_tensors="pt").to("cpu")  # Move to CPU
    outputs = model.generate(
        **inputs,
        max_new_tokens=50,  # βœ… Limits AI output to prevent hallucinations
        do_sample=False,
        num_beams=4,  # βœ… Higher beams for better correction quality
        repetition_penalty=3.0  # βœ… Stronger penalty to prevent repeated tokens
    )

    # Check if the model generated output
    if outputs is None:
        print("❌ Model did not generate output!")  # Debugging log
        return "Error: Model did not generate output."

    reviewed_code = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
    print("βœ… Generated Code:", reviewed_code)  # Debugging log

    # βœ… Ensure AI output starts with "def" to guarantee it returned a function
    if not reviewed_code.startswith("def"):
        return "Error: AI did not return a valid function."

    # βœ… Write reviewed code to a temporary file for download
    temp_file_path = tempfile.NamedTemporaryFile(delete=False, suffix=".txt").name
    with open(temp_file_path, "w") as temp_file:
        temp_file.write(reviewed_code)

    return reviewed_code, temp_file_path  # βœ… Return reviewed code & file path


# βœ… Handle user input and return reviewed code
def check_code(input_code):
    reviewed_code, file_path = review_code(input_code)
    return input_code, reviewed_code, file_path  # βœ… Correctly return file path

# βœ… Gradio UI with Side-by-Side Comparison & Fixed Download Option
interface = gr.Interface(
    fn=check_code,
    inputs=gr.Textbox(label="Enter Python Code"),
    outputs=[
        gr.Textbox(label="Original Code", interactive=False),  # Left side
        gr.Textbox(label="Reviewed Code", interactive=False),  # Right side
        gr.File(label="Download Reviewed Code")  # βœ… Fixed Download Button
    ],
    title="πŸš€ AI Code Reviewer",
    description="πŸ“Œ Enter Python code and get a reviewed version. Download the reviewed code as a file.",
    allow_flagging="never"
)

# βœ… Launch app (Fixes font issues and removes `share=True`)
interface.launch(server_name="0.0.0.0", server_port=7860, show_error=True)