Update utils/generation.py
Browse files- utils/generation.py +1 -1
utils/generation.py
CHANGED
@@ -66,7 +66,7 @@ def generate_answer(query, retrieved_chunks, model=model, tokenizer=tokenizer):
|
|
66 |
|
67 |
prompt = (f"You are a telecom regulations expert. Using the following context, answer the question:\n\n"
|
68 |
f"Context:\n{context}\n\n"
|
69 |
-
f"Question: {query}\
|
70 |
|
71 |
model_type = "seq2seq" if getattr(model.config, "is_encoder_decoder", False) else "causal"
|
72 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
|
|
66 |
|
67 |
prompt = (f"You are a telecom regulations expert. Using the following context, answer the question:\n\n"
|
68 |
f"Context:\n{context}\n\n"
|
69 |
+
f"Question: {query}\n\n")
|
70 |
|
71 |
model_type = "seq2seq" if getattr(model.config, "is_encoder_decoder", False) else "causal"
|
72 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|