shrutikaP8497 commited on
Commit
1c43eb2
·
verified ·
1 Parent(s): 6a6387f

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +20 -5
agent.py CHANGED
@@ -11,11 +11,7 @@ from transformers import pipeline
11
  # Load once globally for efficiency
12
  qa_pipeline = pipeline("text2text-generation", model="Qwen/Qwen1.5-1.8B-Chat")
13
 
14
- def run_agent_on_question(question_obj):
15
- question = question_obj["question"]
16
- prompt = f"Answer this question:\n{question}"
17
- result = qa_pipeline(prompt, max_new_tokens=50, do_sample=False)[0]["generated_text"]
18
- return result.strip().replace("Answer: ", "").split("\n")[0]
19
 
20
  def simple_llm_call(prompt: str) -> str:
21
  """
@@ -24,4 +20,23 @@ def simple_llm_call(prompt: str) -> str:
24
  out = pipe(prompt, max_new_tokens=512)[0]['generated_text']
25
  return out
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
 
11
  # Load once globally for efficiency
12
  qa_pipeline = pipeline("text2text-generation", model="Qwen/Qwen1.5-1.8B-Chat")
13
 
14
+
 
 
 
 
15
 
16
  def simple_llm_call(prompt: str) -> str:
17
  """
 
20
  out = pipe(prompt, max_new_tokens=512)[0]['generated_text']
21
  return out
22
 
23
+ # agent.py
24
+ def run_agent_on_question(task: dict) -> str:
25
+ """
26
+ task = {
27
+ 'task_id': 'abc123',
28
+ 'question': 'What is the capital of France?'
29
+ }
30
+ """
31
+ question = task['question']
32
+
33
+ # ✅ Your LLM prompt logic goes here (minimal working example)
34
+ import transformers
35
+ from transformers import pipeline
36
+
37
+ llm = pipeline("text-generation", model="tiiuae/falcon-7b-instruct", max_new_tokens=100)
38
+ result = llm(question)[0]['generated_text']
39
+
40
+ # Return a trimmed response (just the answer, no explanation, no prefix)
41
+ return result.strip()
42