shrutikaP8497 commited on
Commit
abecf76
·
verified ·
1 Parent(s): 1c43eb2

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +36 -42
agent.py CHANGED
@@ -1,42 +1,36 @@
1
- import requests
2
- from typing import Dict
3
- from retriever import retrieve_context # ✅ Corrected import
4
- from tools import clean_answer_with_prompt, build_prompt
5
- from transformers import pipeline
6
-
7
- # Load the model once to save time
8
- pipe = pipeline("text-generation", model="tiiuae/falcon-7b-instruct")
9
- from transformers import pipeline
10
-
11
- # Load once globally for efficiency
12
- qa_pipeline = pipeline("text2text-generation", model="Qwen/Qwen1.5-1.8B-Chat")
13
-
14
-
15
-
16
- def simple_llm_call(prompt: str) -> str:
17
- """
18
- Send the prompt to the model and return the generated response.
19
- """
20
- out = pipe(prompt, max_new_tokens=512)[0]['generated_text']
21
- return out
22
-
23
- # agent.py
24
- def run_agent_on_question(task: dict) -> str:
25
- """
26
- task = {
27
- 'task_id': 'abc123',
28
- 'question': 'What is the capital of France?'
29
- }
30
- """
31
- question = task['question']
32
-
33
- # Your LLM prompt logic goes here (minimal working example)
34
- import transformers
35
- from transformers import pipeline
36
-
37
- llm = pipeline("text-generation", model="tiiuae/falcon-7b-instruct", max_new_tokens=100)
38
- result = llm(question)[0]['generated_text']
39
-
40
- # Return a trimmed response (just the answer, no explanation, no prefix)
41
- return result.strip()
42
-
 
1
+ from tools import get_tools
2
+ from retriever import retrieve_context
3
+ from config import LLM_MODEL
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
5
+
6
+ class Agent:
7
+ def __init__(self):
8
+ self.model = AutoModelForCausalLM.from_pretrained(
9
+ LLM_MODEL,
10
+ device_map="auto",
11
+ trust_remote_code=True
12
+ )
13
+ self.tokenizer = AutoTokenizer.from_pretrained(LLM_MODEL)
14
+ self.generator = pipeline("text-generation", model=self.model, tokenizer=self.tokenizer)
15
+ self.tools = get_tools()
16
+
17
+ def generate_answer(self, question: str, context: str = "") -> str:
18
+ prompt = f"""
19
+ You are an expert AI agent answering academic and logical questions concisely.
20
+ Use the context below to help answer the user's question.
21
+
22
+ Context:
23
+ {context}
24
+
25
+ Question:
26
+ {question}
27
+
28
+ Answer:
29
+ """
30
+ outputs = self.generator(prompt, max_new_tokens=100, do_sample=False)
31
+ return outputs[0]['generated_text'].split("Answer:")[-1].strip()
32
+
33
+ def run(self, task: dict) -> str:
34
+ question = task.get("question", "")
35
+ context = retrieve_context(task)
36
+ return self.generate_answer(question, context)