Metal3d commited on
Commit
fb5a7c9
·
unverified ·
1 Parent(s): d69fd10

Changing the loop methodology

Browse files
Files changed (1) hide show
  1. main.py +8 -5
main.py CHANGED
@@ -65,10 +65,15 @@ def _generate(history):
65
  add_generation_prompt=True,
66
  )
67
 
 
 
 
 
 
 
68
  model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
69
  streamer = AsyncTextIteratorStreamer(tokenizer, skip_special_tokens=True)
70
 
71
- loop = asyncio.new_event_loop()
72
  task = loop.run_in_executor(
73
  None,
74
  functools.partial(
@@ -78,7 +83,7 @@ def _generate(history):
78
  **model_inputs,
79
  ),
80
  )
81
- return loop, task, streamer
82
 
83
 
84
  async def chat(prompt, history):
@@ -92,7 +97,7 @@ async def chat(prompt, history):
92
  history = [] if history is None else history
93
  message_list = history + [message]
94
 
95
- loop, task, streamer = _generate(message_list)
96
 
97
  buffer = ""
98
  reasoning = ""
@@ -127,8 +132,6 @@ async def chat(prompt, history):
127
  print("Signal sent")
128
  raise
129
 
130
- loop.close()
131
-
132
 
133
  chat_bot = gr.Chatbot(
134
  latex_delimiters=[
 
65
  add_generation_prompt=True,
66
  )
67
 
68
+ try:
69
+ loop = asyncio.get_event_loop()
70
+ except:
71
+ loop = asyncio.new_event_loop()
72
+ asyncio.set_event_loop(loop)
73
+
74
  model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
75
  streamer = AsyncTextIteratorStreamer(tokenizer, skip_special_tokens=True)
76
 
 
77
  task = loop.run_in_executor(
78
  None,
79
  functools.partial(
 
83
  **model_inputs,
84
  ),
85
  )
86
+ return task, streamer
87
 
88
 
89
  async def chat(prompt, history):
 
97
  history = [] if history is None else history
98
  message_list = history + [message]
99
 
100
+ task, streamer = _generate(message_list)
101
 
102
  buffer = ""
103
  reasoning = ""
 
132
  print("Signal sent")
133
  raise
134
 
 
 
135
 
136
  chat_bot = gr.Chatbot(
137
  latex_delimiters=[