shrutikaP8497 commited on
Commit
17484be
·
verified ·
1 Parent(s): 2be3ca0

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +9 -6
agent.py CHANGED
@@ -1,13 +1,14 @@
1
  import requests
2
  from typing import Dict
3
- from retriever import retrieve_relevant_context
4
  from tools import clean_answer_with_prompt, build_prompt
 
 
 
 
5
 
6
- # You can later replace this with a more sophisticated tool-calling agent
7
  def simple_llm_call(prompt: str) -> str:
8
  """Send the prompt to the model and return the generated response"""
9
- from transformers import pipeline
10
- pipe = pipeline("text-generation", model="Qwen/Qwen1.5-1.8B-Chat")
11
  out = pipe(prompt, max_new_tokens=512)[0]['generated_text']
12
  return out
13
 
@@ -15,9 +16,11 @@ def run_agent_on_question(task: Dict) -> str:
15
  """Main agent loop: processes a single task and returns an answer"""
16
  task_id = task['task_id']
17
  question = task['question']
 
18
 
19
- # Step 1: Retrieve relevant context
20
- context = retrieve_relevant_context(question)
 
21
 
22
  # Step 2: Build a prompt using system instruction + context + question
23
  prompt = build_prompt(question, context)
 
1
  import requests
2
  from typing import Dict
3
+ from retriever import retrieve_context
4
  from tools import clean_answer_with_prompt, build_prompt
5
+ from transformers import pipeline
6
+
7
+ # Load the model once to save time
8
+ pipe = pipeline("text-generation", model="Qwen/Qwen1.5-1.8B-Chat")
9
 
 
10
  def simple_llm_call(prompt: str) -> str:
11
  """Send the prompt to the model and return the generated response"""
 
 
12
  out = pipe(prompt, max_new_tokens=512)[0]['generated_text']
13
  return out
14
 
 
16
  """Main agent loop: processes a single task and returns an answer"""
17
  task_id = task['task_id']
18
  question = task['question']
19
+ file_name = task.get('file_name', '')
20
 
21
+ # Step 1: Retrieve relevant context (as list of strings)
22
+ context_list = retrieve_context(task_id, question)
23
+ context = "\n".join(context_list)
24
 
25
  # Step 2: Build a prompt using system instruction + context + question
26
  prompt = build_prompt(question, context)