Update utils/generation.py
Browse files- utils/generation.py +1 -1
utils/generation.py
CHANGED
@@ -72,7 +72,7 @@ def generate_answer(query, retrieved_chunks, model=model, tokenizer=tokenizer):
|
|
72 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
73 |
outputs = model.generate(
|
74 |
**inputs,
|
75 |
-
max_new_tokens=512,
|
76 |
num_return_sequences=1,
|
77 |
no_repeat_ngram_size=2
|
78 |
)
|
|
|
72 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
73 |
outputs = model.generate(
|
74 |
**inputs,
|
75 |
+
#max_new_tokens=512,
|
76 |
num_return_sequences=1,
|
77 |
no_repeat_ngram_size=2
|
78 |
)
|