Ubaidbhat commited on
Commit
e24d0f6
·
verified ·
1 Parent(s): 5fb9d86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -0
app.py CHANGED
@@ -28,6 +28,21 @@ mergedModel.eval()
28
 
29
 
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  def inferance(query: str, model, tokenizer, temp = 1.0, limit = 200) -> str:
32
  device = "cuda:0"
33
 
@@ -50,9 +65,12 @@ def inferance(query: str, model, tokenizer, temp = 1.0, limit = 200) -> str:
50
 
51
 
52
 
 
 
53
  def predict(temp, limit, text):
54
  prompt = text
55
  out = inferance(prompt, mergedModel, tokenizer, temp = 1.0, limit = 200)
 
56
  return out
57
 
58
  pred = gr.Interface(
 
28
 
29
 
30
 
31
+ def extract_answer(message):
32
+ # Find the index of '### Answer:'
33
+ start_index = message.find('### Answer:')
34
+ if start_index != -1:
35
+ # Extract the part of the message after '### Answer:'
36
+ answer_part = message[start_index + len('### Answer:'):].strip()
37
+ # Find the index of the last full stop
38
+ last_full_stop_index = answer_part.rfind('.')
39
+ if last_full_stop_index != -1:
40
+ # Remove the part after the last full stop
41
+ answer_part = answer_part[:last_full_stop_index + 1]
42
+ return answer_part.strip() # Remove leading and trailing whitespace
43
+ else:
44
+ return "I don't have the answer to this question....."
45
+
46
  def inferance(query: str, model, tokenizer, temp = 1.0, limit = 200) -> str:
47
  device = "cuda:0"
48
 
 
65
 
66
 
67
 
68
+
69
+
70
  def predict(temp, limit, text):
71
  prompt = text
72
  out = inferance(prompt, mergedModel, tokenizer, temp = 1.0, limit = 200)
73
+ display = extract_answer(out)
74
  return out
75
 
76
  pred = gr.Interface(