acecalisto3 commited on
Commit
3cc56f9
·
verified ·
1 Parent(s): 9b38389

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -137
app.py CHANGED
@@ -4,10 +4,6 @@ import os
4
  import requests
5
  from transformers import pipeline
6
  from sentence_transformers import SentenceTransformer, util
7
- import gradio as gr
8
- from huggingface_hub import InferenceClient
9
- from sentence_transformers import SentenceTransformer, util
10
- from transformers import pipeline
11
 
12
  # Hugging Face Inference Client
13
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
@@ -19,13 +15,11 @@ similarity_model = SentenceTransformer('all-mpnet-base-v2')
19
  def analyze_issues(issue_text: str, model_name: str, severity: str = None, programming_language: str = None) -> str:
20
  """
21
  Analyze issues and provide solutions.
22
-
23
  Args:
24
  issue_text (str): The issue text.
25
  model_name (str): The model name.
26
  severity (str, optional): The severity of the issue. Defaults to None.
27
  programming_language (str, optional): The programming language. Defaults to None.
28
-
29
  Returns:
30
  str: The analyzed issue and solution.
31
  """
@@ -65,11 +59,9 @@ Please provide a comprehensive resolution in the following format:
65
  def find_related_issues(issue_text: str, issues: list) -> list:
66
  """
67
  Find related issues.
68
-
69
  Args:
70
  issue_text (str): The issue text.
71
  issues (list): The list of issues.
72
-
73
  Returns:
74
  list: The list of related issues.
75
  """
@@ -86,104 +78,10 @@ def find_related_issues(issue_text: str, issues: list) -> list:
86
  def fetch_github_issues(github_api_token: str, github_username: str, github_repository: str) -> list:
87
  """
88
  Fetch GitHub issues.
89
-
90
  Args:
91
  github_api_token (str): The GitHub API token.
92
  github_username (str): The GitHub username.
93
  github_repository (str): The GitHub repository.
94
-
95
- Returns:
96
- list: The list of GitHub issues.
97
- """
98
- url = f"https://api.github.com/repos/{github_username}/{github_repository}/issues"
99
- headers = {
100
- "Authorization": f"Bearer {github_api_token}",
101
- "Accept": "application/vnd.github.v3+json"
102
- }
103
- response = requests.get(url, headers=headers)
104
- if __name__ == "__main__":
105
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") # Correctly indented
106
-
107
- # Load a pre-trained model for sentence similarity
108
- similarity_model = SentenceTransformer('all-mpnet-base-v2')
109
-
110
- ### Function to analyze issues and provide solutions
111
- def analyze_issues(issue_text: str, model_name: str, severity: str = None, programming_language: str = None) -> str:
112
- """
113
- Analyze issues and provide solutions.
114
-
115
- Args:
116
- issue_text (str): The issue text.
117
- model_name (str): The model name.
118
- severity (str, optional): The severity of the issue. Defaults to None.
119
- programming_language (str, optional): The programming language. Defaults to None.
120
-
121
- Returns:
122
- str: The analyzed issue and solution.
123
- """
124
- prompt = f"""Issue: {issue_text}
125
- Severity: {severity}
126
- Programming Language: {programming_language}
127
- Please provide a comprehensive resolution in the following format:
128
- ## Problem Summary:
129
- (Concise summary of the issue)
130
- ## Root Cause Analysis:
131
- (Possible reasons for the issue)
132
- ## Solution Options:
133
- 1. **Option 1:** (Description)
134
- - Pros: (Advantages)
135
- - Cons: (Disadvantages)
136
- 2. **Option 2:** (Description)
137
- - Pros: (Advantages)
138
- - Cons: (Disadvantages)
139
- ## Recommended Solution:
140
- (The best solution with justification)
141
- ## Implementation Steps:
142
- 1. (Step 1)
143
- 2. (Step 2)
144
- 3. (Step 3)
145
- ## Verification Steps:
146
- 1. (Step 1)
147
- 2. (Step 2)
148
- """
149
- try:
150
- nlp = pipeline("text-generation", model=model_name, max_length=1000) # Increase max_length
151
- result = nlp(prompt)
152
- return result[0]['generated_text']
153
- except Exception as e:
154
- return f"Error analyzing issue with model {model_name}: {e}"
155
-
156
- ### Function to find related issues
157
- def find_related_issues(issue_text: str, issues: list) -> list:
158
- """
159
- Find related issues.
160
-
161
- Args:
162
- issue_text (str): The issue text.
163
- issues (list): The list of issues.
164
-
165
- Returns:
166
- list: The list of related issues.
167
- """
168
- issue_embedding = similarity_model.encode(issue_text)
169
- related_issues = []
170
- for issue in issues:
171
- title_embedding = similarity_model.encode(issue['title'])
172
- similarity = util.cos_sim(issue_embedding, title_embedding)[0][0]
173
- related_issues.append((issue, similarity))
174
- related_issues = sorted(related_issues, key=lambda x: x[1], reverse=True)
175
- return related_issues[:3] # Return top 3 most similar issues
176
-
177
- ### Function to fetch GitHub issues
178
- def fetch_github_issues(github_api_token: str, github_username: str, github_repository: str) -> list:
179
- """
180
- Fetch GitHub issues.
181
-
182
- Args:
183
- github_api_token (str): The GitHub API token.
184
- github_username (str): The GitHub username.
185
- github_repository (str): The GitHub repository.
186
-
187
  Returns:
188
  list: The list of GitHub issues.
189
  """
@@ -212,10 +110,9 @@ def respond(
212
  selected_model: str,
213
  severity: str,
214
  programming_language: str
215
- ) -> gr.Interface:
216
  """
217
  Handle chat responses.
218
-
219
  Args:
220
  command (str): The command.
221
  history (list[tuple[str, str]]): The chat history.
@@ -229,11 +126,6 @@ def respond(
229
  selected_model (str): The selected model.
230
  severity (str): The severity.
231
  programming_language (str): The programming language.
232
-
233
- Returns:
234
- gr.Interface: The chat response.
235
- programming_language (str): The programming language.
236
-
237
  Returns:
238
  str: The chat response.
239
  """
@@ -264,7 +156,11 @@ def respond(
264
  elif command == "/help":
265
  yield "Available commands:\n" \
266
  "- `/github`: Analyze a GitHub issue\n" \
267
- "- `/help`: Show this help message"
 
 
 
 
268
 
269
  elif command.isdigit() and issues:
270
  try:
@@ -282,6 +178,57 @@ def respond(
282
  yield f"Resolution for Issue '{issue['title']}':\n{resolution}\n\nRelated Issues:\n{related_issue_text}"
283
  except Exception as e:
284
  yield f"Error analyzing issue: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285
  else:
286
  messages.append({"role": "user", "content": command})
287
 
@@ -305,7 +252,7 @@ with gr.Blocks() as demo:
305
 
306
  # Define system_message here, after github_username and github_repository are defined
307
  system_message = gr.Textbox(
308
- value="You are GitBot, the Github project guardian angel. You resolve issues and propose implementation of feature requests",
309
  label="System message",
310
  )
311
 
@@ -314,36 +261,17 @@ with gr.Blocks() as demo:
314
  choices=[
315
  "Xenova/gpt-4o",
316
  "acecalisto3/InstructiPhi",
317
- "DevShubham/Codellama-70B-AWQ",
318
  "DevShubham/Codellama-13B-Instruct-GGUF",
319
- "DevShubham/Codellama-7B-Instruct-GGUF",
320
  "ricardo-larosa/SWE_Lite_dev-CodeLlama-34b",
321
  "DevsDoCode/Gemma-2b-Code-Instruct-Finetune-v0.1",
322
- "devcodes8/textClassifier",
323
- "mole-code/dev.langchain4j-starcoderbase-1b-fft",
324
- "mole-code/dev.langchain4j-codegen-2B-mono-fft",
325
- "mole-code/dev.langchain4j-starcoderbase-1b-prefix",
326
- "mole-code/org.springframework.ai-dev.langchain4j-com.theokanning.openai-codegen-2B-mono-fft-fft-fft",
327
- "mole-code/dev.langchain4j-codegen-2B-mono-prefix",
328
- "mole-code/dev.langchain4j-com.theokanning.openai-starcoderbase-1b-fft-fft",
329
- "mole-code/org.springframework.ai-dev.langchain4j-com.theokanning.openai-starcoderbase-1b-fft-fft-fft",
330
- "ahmedgongi/code_llama_instruct_devops_expert",
331
- "Dev2410/gemma-Code-Instruct-Finetune-test",
332
- "Dev2410/code_llama_main",
333
- "Dev2410/Code_llama",
334
- "codefuse-ai/CodeFuse-DevOps-Model-14B-Chat",
335
- "codefuse-ai/CodeFuse-DevOps-Model-7B-Base",
336
- "codefuse-ai/CodeFuse-DevOps-Model-7B-Chat",
337
- "codefuse-ai/CodeFuse-DevOps-Model-14B-Base",
338
- "keonju/devocean-code-llama2",
339
- "keonju/devocean-code-llama",
340
- "keonju/Devocean_code_llama_lora",
341
- "KuaFuAI-DevOpsGPT/codellama-7b-instruct-v1",
342
- "dev011brasil/code",
343
- "CodeJunior/devjunior"
344
  ],
345
  label="Select Model for Issue Resolution",
346
- value="DevShubham/Codellama-70B-AWQ"
347
  )
348
 
349
  # Severity Dropdown
@@ -358,7 +286,14 @@ with gr.Blocks() as demo:
358
 
359
  # Command Dropdown
360
  command_dropdown = gr.Dropdown(
361
- choices=["/github", "/help"], # Add your commands here
 
 
 
 
 
 
 
362
  label="Select Command",
363
  )
364
 
 
4
  import requests
5
  from transformers import pipeline
6
  from sentence_transformers import SentenceTransformer, util
 
 
 
 
7
 
8
  # Hugging Face Inference Client
9
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
15
  def analyze_issues(issue_text: str, model_name: str, severity: str = None, programming_language: str = None) -> str:
16
  """
17
  Analyze issues and provide solutions.
 
18
  Args:
19
  issue_text (str): The issue text.
20
  model_name (str): The model name.
21
  severity (str, optional): The severity of the issue. Defaults to None.
22
  programming_language (str, optional): The programming language. Defaults to None.
 
23
  Returns:
24
  str: The analyzed issue and solution.
25
  """
 
59
  def find_related_issues(issue_text: str, issues: list) -> list:
60
  """
61
  Find related issues.
 
62
  Args:
63
  issue_text (str): The issue text.
64
  issues (list): The list of issues.
 
65
  Returns:
66
  list: The list of related issues.
67
  """
 
78
  def fetch_github_issues(github_api_token: str, github_username: str, github_repository: str) -> list:
79
  """
80
  Fetch GitHub issues.
 
81
  Args:
82
  github_api_token (str): The GitHub API token.
83
  github_username (str): The GitHub username.
84
  github_repository (str): The GitHub repository.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  Returns:
86
  list: The list of GitHub issues.
87
  """
 
110
  selected_model: str,
111
  severity: str,
112
  programming_language: str
113
+ ) -> str:
114
  """
115
  Handle chat responses.
 
116
  Args:
117
  command (str): The command.
118
  history (list[tuple[str, str]]): The chat history.
 
126
  selected_model (str): The selected model.
127
  severity (str): The severity.
128
  programming_language (str): The programming language.
 
 
 
 
 
129
  Returns:
130
  str: The chat response.
131
  """
 
156
  elif command == "/help":
157
  yield "Available commands:\n" \
158
  "- `/github`: Analyze a GitHub issue\n" \
159
+ "- `/help`: Show this help message\n" \
160
+ "- `/generate_code [code description]`: Generate code based on the description\n" \
161
+ "- `/explain_concept [concept]`: Explain a concept\n" \
162
+ "- `/write_documentation [topic]`: Write documentation for a given topic\n" \
163
+ "- `/translate_code [code] to [target language]`: Translate code to another language"
164
 
165
  elif command.isdigit() and issues:
166
  try:
 
178
  yield f"Resolution for Issue '{issue['title']}':\n{resolution}\n\nRelated Issues:\n{related_issue_text}"
179
  except Exception as e:
180
  yield f"Error analyzing issue: {e}"
181
+
182
+ elif command.startswith("/generate_code"):
183
+ # Extract the code description from the command
184
+ code_description = command.replace("/generate_code", "").strip()
185
+ if not code_description:
186
+ yield "Please provide a description of the code you want to generate."
187
+ else:
188
+ prompt = f"Generate code for the following: {code_description}\nProgramming Language: {programming_language}"
189
+ try:
190
+ generated_code = analyze_issues(prompt, selected_model) # Reuse analyze_issues for code generation
191
+ yield f"```{programming_language}\n{generated_code}\n```"
192
+ except Exception as e:
193
+ yield f"Error generating code: {e}"
194
+
195
+ elif command.startswith("/explain_concept"):
196
+ concept = command.replace("/explain_concept", "").strip()
197
+ if not concept:
198
+ yield "Please provide a concept to explain."
199
+ else:
200
+ prompt = f"Explain the concept of {concept} in detail."
201
+ try:
202
+ explanation = analyze_issues(prompt, selected_model) # Reuse analyze_issues for explanation
203
+ yield explanation
204
+ except Exception as e:
205
+ yield f"Error explaining concept: {e}"
206
+
207
+ elif command.startswith("/write_documentation"):
208
+ topic = command.replace("/write_documentation", "").strip()
209
+ if not topic:
210
+ yield "Please provide a topic for documentation."
211
+ else:
212
+ prompt = f"Write comprehensive documentation for the following topic: {topic}"
213
+ try:
214
+ documentation = analyze_issues(prompt, selected_model)
215
+ yield documentation
216
+ except Exception as e:
217
+ yield f"Error writing documentation: {e}"
218
+
219
+ elif command.startswith("/translate_code"):
220
+ parts = command.replace("/translate_code", "").strip().split(" to ")
221
+ if len(parts) != 2:
222
+ yield "Invalid command format. Use: /translate_code [code] to [target language]"
223
+ else:
224
+ code, target_language = parts
225
+ prompt = f"Translate the following code to {target_language}:\n```\n{code}\n```"
226
+ try:
227
+ translated_code = analyze_issues(prompt, selected_model)
228
+ yield f"```{target_language}\n{translated_code}\n```"
229
+ except Exception as e:
230
+ yield f"Error translating code: {e}"
231
+
232
  else:
233
  messages.append({"role": "user", "content": command})
234
 
 
252
 
253
  # Define system_message here, after github_username and github_repository are defined
254
  system_message = gr.Textbox(
255
+ value="You are GitBot, the Github project guardian angel. You resolve issues and propose implementation of feature requests",
256
  label="System message",
257
  )
258
 
 
261
  choices=[
262
  "Xenova/gpt-4o",
263
  "acecalisto3/InstructiPhi",
 
264
  "DevShubham/Codellama-13B-Instruct-GGUF",
 
265
  "ricardo-larosa/SWE_Lite_dev-CodeLlama-34b",
266
  "DevsDoCode/Gemma-2b-Code-Instruct-Finetune-v0.1",
267
+ "google/flan-t5-xxl",
268
+ "facebook/bart-large-cnn",
269
+ "microsoft/CodeBERT-base",
270
+ "Salesforce/codegen-350M-mono",
271
+ "bigcode/starcoder"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
  ],
273
  label="Select Model for Issue Resolution",
274
+ value="microsoft/CodeBERT-base"
275
  )
276
 
277
  # Severity Dropdown
 
286
 
287
  # Command Dropdown
288
  command_dropdown = gr.Dropdown(
289
+ choices=[
290
+ "/github",
291
+ "/help",
292
+ "/generate_code",
293
+ "/explain_concept",
294
+ "/write_documentation",
295
+ "/translate_code"
296
+ ],
297
  label="Select Command",
298
  )
299