Update app.py
Browse files
app.py
CHANGED
@@ -2,22 +2,33 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
|
5 |
-
# β
Load the fastest model
|
6 |
-
model_name = "EleutherAI/pythia-70m" #
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
-
model = AutoModelForCausalLM.from_pretrained(model_name).to("cpu")
|
9 |
|
10 |
-
# β
Function to
|
11 |
def review_code(code_snippet):
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
reviewed_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
15 |
return reviewed_code
|
16 |
|
17 |
-
# β
|
18 |
def check_code(input_code):
|
19 |
reviewed_code = review_code(input_code)
|
20 |
-
return input_code, reviewed_code, reviewed_code # Return for UI
|
21 |
|
22 |
# β
Gradio UI with Side-by-Side Comparison & Download Option
|
23 |
interface = gr.Interface(
|
@@ -29,9 +40,9 @@ interface = gr.Interface(
|
|
29 |
gr.File(label="Download Reviewed Code") # Download button
|
30 |
],
|
31 |
title="π AI Code Reviewer",
|
32 |
-
description="Enter Python code and get a reviewed version. Download the reviewed code as a file.",
|
33 |
allow_flagging="never"
|
34 |
)
|
35 |
|
36 |
-
# β
Launch
|
37 |
-
interface.launch(
|
|
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
|
5 |
+
# β
Load the fastest model on CPU
|
6 |
+
model_name = "EleutherAI/pythia-70m" # Fastest model for code review
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
+
model = AutoModelForCausalLM.from_pretrained(model_name).to("cpu") # Force CPU mode
|
9 |
|
10 |
+
# β
Function to review Python code with debug logs
|
11 |
def review_code(code_snippet):
|
12 |
+
print("β
Received Code:", code_snippet) # Debugging log
|
13 |
+
|
14 |
+
# Process input
|
15 |
+
inputs = tokenizer(code_snippet, return_tensors="pt").to("cpu") # Move to CPU
|
16 |
+
outputs = model.generate(**inputs, max_length=80, do_sample=False, num_beams=3)
|
17 |
+
|
18 |
+
# Check if the model generated output
|
19 |
+
if outputs is None:
|
20 |
+
print("β Model did not generate output!") # Debugging log
|
21 |
+
return "Error: Model did not generate output."
|
22 |
+
|
23 |
reviewed_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
24 |
+
print("β
Generated Code:", reviewed_code) # Debugging log
|
25 |
+
|
26 |
return reviewed_code
|
27 |
|
28 |
+
# β
Handle user input and return reviewed code
|
29 |
def check_code(input_code):
|
30 |
reviewed_code = review_code(input_code)
|
31 |
+
return input_code, reviewed_code, reviewed_code # Return all for UI & download
|
32 |
|
33 |
# β
Gradio UI with Side-by-Side Comparison & Download Option
|
34 |
interface = gr.Interface(
|
|
|
40 |
gr.File(label="Download Reviewed Code") # Download button
|
41 |
],
|
42 |
title="π AI Code Reviewer",
|
43 |
+
description="π Enter Python code and get a reviewed version. Download the reviewed code as a file.",
|
44 |
allow_flagging="never"
|
45 |
)
|
46 |
|
47 |
+
# β
Launch app (Fixes font issues and removes `share=True`)
|
48 |
+
interface.launch(server_name="0.0.0.0", server_port=7860, show_error=True)
|