htian01 commited on
Commit
2630ff3
·
verified ·
1 Parent(s): d26d037

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -102,7 +102,7 @@ def model_inference(input_dict, history):
102
  padding=True,
103
  ).to("cuda")
104
  # Set up streaming generation.
105
- streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
106
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
107
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
108
  thread.start()
@@ -144,7 +144,7 @@ def model_inference(input_dict, history):
144
  return_tensors="pt",
145
  padding=True,
146
  ).to("cuda")
147
- streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
148
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
149
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
150
  thread.start()
 
102
  padding=True,
103
  ).to("cuda")
104
  # Set up streaming generation.
105
+ streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=False)
106
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
107
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
108
  thread.start()
 
144
  return_tensors="pt",
145
  padding=True,
146
  ).to("cuda")
147
+ streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=False)
148
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
149
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
150
  thread.start()