import gradio as gr from huggingface_hub import InferenceClient import os import requests from transformers import pipeline from sentence_transformers import SentenceTransformer, util # Hugging Face Inference Client client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") # Load a pre-trained model for sentence similarity similarity_model = SentenceTransformer('all-mpnet-base-v2') ### Function to analyze issues and provide solutions def analyze_issues(issue_text: str, model_name: str, severity: str = None, programming_language: str = None) -> str: """ Analyze issues and provide solutions. Args: issue_text (str): The issue text. model_name (str): The model name. severity (str, optional): The severity of the issue. Defaults to None. programming_language (str, optional): The programming language. Defaults to None. Returns: str: The analyzed issue and solution. """ prompt = f"""Issue: {issue_text} Severity: {severity} Programming Language: {programming_language} Please provide a comprehensive resolution in the following format: ## Problem Summary: (Concise summary of the issue) ## Root Cause Analysis: (Possible reasons for the issue) ## Solution Options: 1. **Option 1:** (Description) - Pros: (Advantages) - Cons: (Disadvantages) 2. **Option 2:** (Description) - Pros: (Advantages) - Cons: (Disadvantages) ## Recommended Solution: (The best solution with justification) ## Implementation Steps: 1. (Step 1) 2. (Step 2) 3. (Step 3) ## Verification Steps: 1. (Step 1) 2. (Step 2) """ try: nlp = pipeline("text-generation", model=model_name, max_length=1000) # Increase max_length result = nlp(prompt) return result[0]['generated_text'] except Exception as e: return f"Error analyzing issue with model {model_name}: {e}" ### Function to find related issues def find_related_issues(issue_text: str, issues: list) -> list: """ Find related issues. Args: issue_text (str): The issue text. issues (list): The list of issues. Returns: list: The list of related issues. """ issue_embedding = similarity_model.encode(issue_text) related_issues = [] for issue in issues: title_embedding = similarity_model.encode(issue['title']) similarity = util.cos_sim(issue_embedding, title_embedding)[0][0] related_issues.append((issue, similarity)) related_issues = sorted(related_issues, key=lambda x: x[1], reverse=True) return related_issues[:3] # Return top 3 most similar issues ### Function to fetch GitHub issues def fetch_github_issues(github_api_token: str, github_username: str, github_repository: str) -> list: """ Fetch GitHub issues. Args: github_api_token (str): The GitHub API token. github_username (str): The GitHub username. github_repository (str): The GitHub repository. Returns: list: The list of GitHub issues. """ url = f"https://api.github.com/repos/{github_username}/{github_repository}/issues" headers = { "Authorization": f"Bearer {github_api_token}", "Accept": "application/vnd.github.v3+json" } response = requests.get(url, headers=headers) if response.status_code == 200: return response.json() else: raise Exception(f"Error fetching issues: {response.status_code}") ### Function to handle chat responses def respond( command: str, history: list[tuple[str, str]], system_message: str, max_tokens: int, temperature: float, top_p: float, github_api_token: str, github_username: str, github_repository: str, selected_model: str, severity: str, programming_language: str ) -> str: """ Handle chat responses. Args: command (str): The command. history (list[tuple[str, str]]): The chat history. system_message (str): The system message. max_tokens (int): The maximum number of tokens. temperature (float): The temperature. top_p (float): The top-p value. github_api_token (str): The GitHub API token. github_username (str): The GitHub username. github_repository (str): The GitHub repository. selected_model (str): The selected model. severity (str): The severity. programming_language (str): The programming language. Returns: str: The chat response. """ global GITHUB_API_TOKEN GITHUB_API_TOKEN = github_api_token global issues issues = [] messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) if command == "/github": if not github_api_token: yield "Please enter your GitHub API token first. [Click here to get your token](https://github.com/settings/tokens)" else: try: issues = fetch_github_issues(github_api_token, github_username, github_repository) issue_list = "\n".join([f"{i+1}. {issue['title']}" for i, issue in enumerate(issues)]) yield f"Available GitHub Issues:\n{issue_list}\n\nEnter the issue number to analyze:" except Exception as e: yield f"Error fetching GitHub issues: {e}" elif command == "/help": yield "Available commands:\n" \ "- `/github`: Analyze a GitHub issue\n" \ "- `/help`: Show this help message\n" \ "- `/generate_code [code description]`: Generate code based on the description\n" \ "- `/explain_concept [concept]`: Explain a concept\n" \ "- `/write_documentation [topic]`: Write documentation for a given topic\n" \ "- `/translate_code [code] to [target language]`: Translate code to another language" elif command.isdigit() and issues: try: issue_number = int(command) - 1 issue = issues[issue_number] issue_text = issue['title'] + "\n\n" + issue['body'] resolution = analyze_issues(issue_text, selected_model, severity, programming_language) # Find and display related issues related_issues = find_related_issues(issue_text, issues) related_issue_text = "\n".join( [f"- {issue['title']} (Similarity: {similarity:.2f})" for issue, similarity in related_issues] ) yield f"Resolution for Issue '{issue['title']}':\n{resolution}\n\nRelated Issues:\n{related_issue_text}" except Exception as e: yield f"Error analyzing issue: {e}" elif command.startswith("/generate_code"): # Extract the code description from the command code_description = command.replace("/generate_code", "").strip() if not code_description: yield "Please provide a description of the code you want to generate." else: prompt = f"Generate code for the following: {code_description}\nProgramming Language: {programming_language}" try: generated_code = analyze_issues(prompt, selected_model) # Reuse analyze_issues for code generation yield f"```{programming_language}\n{generated_code}\n```" except Exception as e: yield f"Error generating code: {e}" elif command.startswith("/explain_concept"): concept = command.replace("/explain_concept", "").strip() if not concept: yield "Please provide a concept to explain." else: prompt = f"Explain the concept of {concept} in detail." try: explanation = analyze_issues(prompt, selected_model) # Reuse analyze_issues for explanation yield explanation except Exception as e: yield f"Error explaining concept: {e}" elif command.startswith("/write_documentation"): topic = command.replace("/write_documentation", "").strip() if not topic: yield "Please provide a topic for documentation." else: prompt = f"Write comprehensive documentation for the following topic: {topic}" try: documentation = analyze_issues(prompt, selected_model) yield documentation except Exception as e: yield f"Error writing documentation: {e}" elif command.startswith("/translate_code"): parts = command.replace("/translate_code", "").strip().split(" to ") if len(parts) != 2: yield "Invalid command format. Use: /translate_code [code] to [target language]" else: code, target_language = parts prompt = f"Translate the following code to {target_language}:\n```\n{code}\n```" try: translated_code = analyze_issues(prompt, selected_model) yield f"```{target_language}\n{translated_code}\n```" except Exception as e: yield f"Error translating code: {e}" else: messages.append({"role": "user", "content": command}) response = "" for message in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): token = message.choices[0].delta.content response += token yield response with gr.Blocks() as demo: with gr.Row(): github_api_token = gr.Textbox(label="GitHub API Token", type="password") github_username = gr.Textbox(label="GitHub Username") github_repository = gr.Textbox(label="GitHub Repository") # Define system_message here, after github_username and github_repository are defined system_message = gr.Textbox( value="You are GitBot, the Github project guardian angel. You resolve issues and propose implementation of feature requests", label="System message", ) # Model Selection Dropdown model_dropdown = gr.Dropdown( choices=[ "Xenova/gpt-4o", "acecalisto3/InstructiPhi", "DevShubham/Codellama-13B-Instruct-GGUF", "ricardo-larosa/SWE_Lite_dev-CodeLlama-34b", "DevsDoCode/Gemma-2b-Code-Instruct-Finetune-v0.1", "google/flan-t5-xxl", "facebook/bart-large-cnn", "microsoft/CodeBERT-base", "Salesforce/codegen-350M-mono", "bigcode/starcoder" ], label="Select Model for Issue Resolution", value="microsoft/CodeBERT-base" ) # Severity Dropdown severity_dropdown = gr.Dropdown( choices=["Critical", "Major", "Minor", "Trivial"], label="Severity", value=None # Default to no severity selected ) # Programming Language Textbox programming_language_textbox = gr.Textbox(label="Programming Language") # Command Dropdown command_dropdown = gr.Dropdown( choices=[ "/github", "/help", "/generate_code", "/explain_concept", "/write_documentation", "/translate_code" ], label="Select Command", ) chatbot = gr.ChatInterface( respond, additional_inputs=[ command_dropdown, # Use command_dropdown instead of a regular message input system_message, gr.Slider(minimum=1, maximum=8192, value=2048, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.71, step=0.1, label="Temperature"), gr.Slider( minimum=0.1, maximum=1.0, value=0.95, step=1.1, label="Top-p (nucleus sampling)", ), github_api_token, github_username, github_repository, model_dropdown, severity_dropdown, programming_language_textbox ], ) if __name__ == "__main__": demo.queue().launch(share=True, server_name="0.0.0.0", server_port=7860)