Spaces:
Running
on
Zero
Running
on
Zero
Thanush
commited on
Commit
·
dde83e1
1
Parent(s):
a22013f
Increase max_new_tokens in generate method to 1000 for enhanced output capacity
Browse files- medbot/model.py +1 -1
medbot/model.py
CHANGED
@@ -30,7 +30,7 @@ class ModelManager:
|
|
30 |
device_map="auto"
|
31 |
)
|
32 |
|
33 |
-
def generate(self, prompt, max_new_tokens=
|
34 |
self.load()
|
35 |
inputs = self.tokenizer(prompt, return_tensors="pt")
|
36 |
if torch.cuda.is_available():
|
|
|
30 |
device_map="auto"
|
31 |
)
|
32 |
|
33 |
+
def generate(self, prompt, max_new_tokens=1000, temperature=0.5, top_p=0.9):
|
34 |
self.load()
|
35 |
inputs = self.tokenizer(prompt, return_tensors="pt")
|
36 |
if torch.cuda.is_available():
|