|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import gradio as gr |
|
import torch |
|
import tempfile |
|
|
|
|
|
model_name = "EleutherAI/pythia-70m" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name).to("cpu") |
|
|
|
|
|
def review_code(code_snippet): |
|
print("β
Received Code:", code_snippet) |
|
|
|
|
|
inputs = tokenizer(code_snippet, return_tensors="pt").to("cpu") |
|
outputs = model.generate( |
|
**inputs, |
|
max_length=50, |
|
do_sample=False, |
|
num_beams=3, |
|
repetition_penalty=2.0 |
|
) |
|
|
|
|
|
if outputs is None: |
|
print("β Model did not generate output!") |
|
return "Error: Model did not generate output." |
|
|
|
reviewed_code = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
print("β
Generated Code:", reviewed_code) |
|
|
|
|
|
temp_file_path = tempfile.NamedTemporaryFile(delete=False, suffix=".txt").name |
|
with open(temp_file_path, "w") as temp_file: |
|
temp_file.write(reviewed_code) |
|
|
|
return reviewed_code, temp_file_path |
|
|
|
|
|
def check_code(input_code): |
|
reviewed_code, file_path = review_code(input_code) |
|
return input_code, reviewed_code, file_path |
|
|
|
|
|
interface = gr.Interface( |
|
fn=check_code, |
|
inputs=gr.Textbox(label="Enter Python Code"), |
|
outputs=[ |
|
gr.Textbox(label="Original Code", interactive=False), |
|
gr.Textbox(label="Reviewed Code", interactive=False), |
|
gr.File(label="Download Reviewed Code") |
|
], |
|
title="π AI Code Reviewer", |
|
description="π Enter Python code and get a reviewed version. Download the reviewed code as a file.", |
|
allow_flagging="never" |
|
) |
|
|
|
|
|
interface.launch(server_name="0.0.0.0", server_port=7860, show_error=True) |
|
|