Spaces:
Sleeping
Sleeping
File size: 3,366 Bytes
1efd29f 6245b3b b138e3b 791be58 5b7f342 e0b6f12 1efd29f 63c0f13 b138e3b 63c0f13 b138e3b 1efd29f 5b7f342 791be58 5b7f342 791be58 5b7f342 791be58 5b7f342 791be58 5b7f342 791be58 5b7f342 791be58 5b7f342 791be58 5b7f342 1efd29f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import gradio as gr
import regex as re
import csv
import pandas as pd
from analyzer import combine_repo_files_for_llm, analyze_combined_file
from hf_utils import download_space_repo
# from hf_utils import download_space_repo
def process_repo_input(text):
if not text:
return pd.DataFrame(columns=["repo id", "strength", "weaknesses", "speciality", "relevance rating"])
# Split by newlines and commas, strip whitespace
repo_ids = [repo.strip() for repo in re.split(r'[\n,]+', text) if repo.strip()]
# Write to CSV
csv_filename = "repo_ids.csv"
with open(csv_filename, mode="w", newline='', encoding="utf-8") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["repo id", "strength", "weaknesses", "speciality", "relevance rating"])
for repo_id in repo_ids:
writer.writerow([repo_id, "", "", "", ""])
# Read the CSV into a DataFrame to display
df = pd.read_csv(csv_filename)
return df
# Store the last entered repo ids in a global variable for button access
last_repo_ids = []
def process_repo_input_and_store(text):
global last_repo_ids
if not text:
last_repo_ids = []
return pd.DataFrame(columns=["repo id", "strength", "weaknesses", "speciality", "relevance rating"])
repo_ids = [repo.strip() for repo in re.split(r'[\n,]+', text) if repo.strip()]
last_repo_ids = repo_ids
csv_filename = "repo_ids.csv"
with open(csv_filename, mode="w", newline='', encoding="utf-8") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["repo id", "strength", "weaknesses", "speciality", "relevance rating"])
for repo_id in repo_ids:
writer.writerow([repo_id, "", "", "", ""])
df = pd.read_csv(csv_filename)
return df
def show_combined_repo_and_llm():
if not last_repo_ids:
return "No repo ID available. Please submit repo IDs first.", ""
first_repo_id = last_repo_ids[0]
try:
download_space_repo(first_repo_id, local_dir="repo_files")
except Exception as e:
return f"Error downloading repo: {e}", ""
txt_path = combine_repo_files_for_llm()
try:
with open(txt_path, "r", encoding="utf-8") as f:
combined_content = f.read()
except Exception as e:
return f"Error reading {txt_path}: {e}", ""
llm_output = analyze_combined_file(txt_path)
return combined_content, llm_output
repo_id_input = gr.Textbox(label="Enter repo IDs (comma or newline separated)", lines=5, placeholder="repo1, repo2\nrepo3")
df_output = gr.Dataframe(headers=["repo id", "strength", "weaknesses", "speciality", "relevance rating", "Usecase"])
with gr.Blocks() as demo:
gr.Markdown("## Repo ID Input")
repo_id_box = repo_id_input.render()
df_box = df_output.render()
submit_btn = gr.Button("Submit Repo IDs")
submit_btn.click(process_repo_input_and_store, inputs=repo_id_box, outputs=df_box)
gr.Markdown("---")
gr.Markdown("## Combine and Display Repo Files")
combine_btn = gr.Button("Download, Combine & Show .py/.md Files from First Repo and Analyze")
combined_txt = gr.Textbox(label="Combined Repo Files", lines=20)
llm_output_txt = gr.Textbox(label="LLM Analysis Output", lines=10)
combine_btn.click(show_combined_repo_and_llm, inputs=None, outputs=[combined_txt, llm_output_txt])
demo.launch() |