|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import gradio as gr |
|
import torch |
|
|
|
|
|
model_name = "EleutherAI/pythia-70m" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16).to("cuda") |
|
|
|
|
|
def review_code(code_snippet): |
|
inputs = tokenizer(code_snippet, return_tensors="pt").to("cuda") |
|
outputs = model.generate(**inputs, max_length=80, do_sample=False, num_beams=3) |
|
reviewed_code = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return reviewed_code |
|
|
|
|
|
def check_code(input_code): |
|
reviewed_code = review_code(input_code) |
|
return input_code, reviewed_code, reviewed_code |
|
|
|
|
|
interface = gr.Interface( |
|
fn=check_code, |
|
inputs=gr.Textbox(label="Enter Python Code"), |
|
outputs=[ |
|
gr.Textbox(label="Original Code", interactive=False), |
|
gr.Textbox(label="Reviewed Code", interactive=False), |
|
gr.File(label="Download Reviewed Code") |
|
], |
|
title="π AI Code Reviewer", |
|
description="Enter Python code and get a reviewed version. Download the reviewed code as a file.", |
|
allow_flagging="never" |
|
) |
|
|
|
|
|
interface.launch(share=True, server_timeout=40) |
|
|