File size: 2,940 Bytes
0040626
 
 
159c5e2
0040626
b595fa7
93c3822
0040626
b595fa7
0040626
5590ae6
 
0040626
b595fa7
 
5590ae6
 
 
 
 
 
 
 
 
 
 
 
6bb18d3
b595fa7
6bb18d3
b6e81a2
 
5590ae6
b6e81a2
5590ae6
 
b6e81a2
b595fa7
 
 
 
 
 
0040626
b595fa7
 
159c5e2
 
 
 
 
 
0040626
6bb18d3
b595fa7
0040626
159c5e2
 
0040626
159c5e2
0040626
 
 
 
 
 
159c5e2
0040626
 
b595fa7
0040626
 
 
b595fa7
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import torch
import tempfile  # βœ… Import tempfile to create temp files

# βœ… Load the fastest model on CPU
model_name = "Salesforce/codegen-350M-mono"  # Fastest model for code review
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name).to("cpu")  # Force CPU mode

import tempfile

def review_code(code_snippet):
    print("βœ… Received Code:", code_snippet)  # Debugging log
    
    # βœ… Better instruction prompt
    prompt = f"""
### Instruction:
You are a Python code reviewer. Your job is to analyze and fix errors in the provided Python code. 
Make necessary corrections such as adding missing return statements, fixing syntax errors, and correcting logical mistakes. 
Do NOT generate new functions or extra textβ€”only return the fixed version of the provided code.

### Input Code:
{code_snippet}

### Reviewed Code:
"""

    # Process input
    inputs = tokenizer(prompt, return_tensors="pt").to("cpu")  # Move to CPU
    outputs = model.generate(
        **inputs,
        max_length=60,  # βœ… Keeps response concise & correct
        do_sample=False,
        num_beams=4,  # βœ… Ensures better correction quality
        repetition_penalty=2.5  # βœ… Prevents repeated/unnecessary output
    )

    # Check if the model generated output
    if outputs is None:
        print("❌ Model did not generate output!")  # Debugging log
        return "Error: Model did not generate output."

    reviewed_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
    print("βœ… Generated Code:", reviewed_code)  # Debugging log

    # βœ… Write reviewed code to a temporary file for download
    temp_file_path = tempfile.NamedTemporaryFile(delete=False, suffix=".txt").name
    with open(temp_file_path, "w") as temp_file:
        temp_file.write(reviewed_code)

    return reviewed_code, temp_file_path  # βœ… Return reviewed code & file path


# βœ… Handle user input and return reviewed code
def check_code(input_code):
    reviewed_code, file_path = review_code(input_code)
    return input_code, reviewed_code, file_path  # βœ… Correctly return file path

# βœ… Gradio UI with Side-by-Side Comparison & Fixed Download Option
interface = gr.Interface(
    fn=check_code,
    inputs=gr.Textbox(label="Enter Python Code"),
    outputs=[
        gr.Textbox(label="Original Code", interactive=False),  # Left side
        gr.Textbox(label="Reviewed Code", interactive=False),  # Right side
        gr.File(label="Download Reviewed Code")  # βœ… Fixed Download Button
    ],
    title="πŸš€ AI Code Reviewer",
    description="πŸ“Œ Enter Python code and get a reviewed version. Download the reviewed code as a file.",
    allow_flagging="never"
)

# βœ… Launch app (Fixes font issues and removes `share=True`)
interface.launch(server_name="0.0.0.0", server_port=7860, show_error=True)