Satyam-Singh commited on
Commit
fa8f986
·
verified ·
1 Parent(s): 0213511

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -82
app.py CHANGED
@@ -1,83 +1,13 @@
1
- import PIL.Image
2
- import gradio as gr
3
- import base64
4
- import time
5
- import os
6
  import google.generativeai as genai
7
-
8
- # Set Google API key
9
- genai.configure(api_key = os.environ['GOOGLE_PALM_KEY'])
10
-
11
- # Create the Model
12
- txt_model = genai.GenerativeModel('gemini-pro')
13
- vis_model = genai.GenerativeModel('gemini-pro-vision')
14
-
15
- # Image to Base 64 Converter
16
- def image_to_base64(image_path):
17
- with open(image_path, 'rb') as img:
18
- encoded_string = base64.b64encode(img.read())
19
- return encoded_string.decode('utf-8')
20
-
21
- # Function that takes User Inputs and displays it on ChatUI
22
- def query_message(history,txt,img):
23
- if not img:
24
- history += [(txt,None)]
25
- return history
26
- base64 = image_to_base64(img)
27
- data_url = f"data:image/jpeg;base64,{base64}"
28
- history += [(f"{txt} ![]({data_url})", None)]
29
- return history
30
-
31
- # Function that takes User Inputs, generates Response and displays on Chat UI
32
- def llm_response(history,text,img):
33
- if not img:
34
- response = txt_model.generate_content(text)
35
- history += [(None,response.text)]
36
- return history
37
-
38
- else:
39
- img = PIL.Image.open(img)
40
- response = vis_model.generate_content([text,img])
41
- history += [(None,response.text)]
42
- return history
43
-
44
- # Interface Code
45
- with gr.Blocks() as app:
46
- with gr.Row():
47
- chatbot = gr.Chatbot(
48
- scale = 2,
49
- height=750
50
- )
51
- text_box = gr.Textbox(
52
- placeholder="Enter text and press enter, or upload an image",
53
- container=False,
54
- )
55
-
56
- btn = gr.Button("Submit")
57
- clicked = btn.click(query_message,
58
- [chatbot,text_box],
59
- chatbot
60
- ).then(llm_response,
61
- [chatbot,text_box],
62
- chatbot
63
- )
64
- app.queue()
65
- app.launch(debug=True)
66
-
67
-
68
-
69
- ''''import google.generativeai as genai
70
  import gradio as gr
71
  import os
72
 
73
- genai.configure(api_key=os.getenv("GOOGLE_PALM_KEY"))
74
-
75
  # Set up the model
76
  generation_config = {
77
  "temperature": 0.9,
78
  "top_p": 1,
79
  "top_k": 1,
80
- "max_output_tokens": 4096,
81
  }
82
 
83
  safety_settings = [
@@ -99,6 +29,8 @@ safety_settings = [
99
  },
100
  ]
101
 
 
 
102
  model = genai.GenerativeModel(model_name="gemini-pro",
103
  generation_config=generation_config,
104
  safety_settings=safety_settings)
@@ -178,21 +110,94 @@ convo = model.start_chat(history=[
178
  },
179
  ])
180
 
181
- #response = convo.send_message(
182
- # messages=messages)
183
- # return response.last.text
184
 
185
- def generate(messages):
186
- model = genai.GenerativeModel('gemini-pro')
187
- response = model.generate_content(
188
- messages,
189
- stream=True,
190
- generation_config=generation_config)
191
 
192
  gr.ChatInterface(
193
- fn=generate,
194
  chatbot=gr.Chatbot(show_label=False, avatar_images=(None, 'palm-logo.png'), show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
195
  title="PaLM-2",
196
  description="This is unofficial demo of ```PaLM-2``` based on ```Google API```. ```History/context``` memory does not work in this demo.",
197
  concurrency_limit=20,
198
- ).launch(show_api=False)'''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import google.generativeai as genai
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import gradio as gr
3
  import os
4
 
 
 
5
  # Set up the model
6
  generation_config = {
7
  "temperature": 0.9,
8
  "top_p": 1,
9
  "top_k": 1,
10
+ "max_output_tokens": 2048,
11
  }
12
 
13
  safety_settings = [
 
29
  },
30
  ]
31
 
32
+ genai.configure(api_key=os.getenv("GOOGLE_PALM_KEY"))
33
+
34
  model = genai.GenerativeModel(model_name="gemini-pro",
35
  generation_config=generation_config,
36
  safety_settings=safety_settings)
 
110
  },
111
  ])
112
 
 
 
 
113
 
114
+ def gemini_chat(prompt, history):
115
+ response = chat.send_message(message)
116
+ return response.text
117
+
 
 
118
 
119
  gr.ChatInterface(
120
+ fn=gemini_chat,
121
  chatbot=gr.Chatbot(show_label=False, avatar_images=(None, 'palm-logo.png'), show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
122
  title="PaLM-2",
123
  description="This is unofficial demo of ```PaLM-2``` based on ```Google API```. ```History/context``` memory does not work in this demo.",
124
  concurrency_limit=20,
125
+ ).launch(show_api=False)
126
+
127
+
128
+ convo.send_message("YOUR_USER_INPUT")
129
+ print(convo.last.text)
130
+
131
+
132
+
133
+
134
+
135
+
136
+
137
+
138
+
139
+ """import PIL.Image
140
+ import gradio as gr
141
+ import base64
142
+ import time
143
+ import os
144
+ import google.generativeai as genai
145
+
146
+ # Set Google API key
147
+ genai.configure(api_key = os.environ['GOOGLE_PALM_KEY'])
148
+
149
+ # Create the Model
150
+ txt_model = genai.GenerativeModel('gemini-pro')
151
+ vis_model = genai.GenerativeModel('gemini-pro-vision')
152
+
153
+ # Image to Base 64 Converter
154
+ def image_to_base64(image_path):
155
+ with open(image_path, 'rb') as img:
156
+ encoded_string = base64.b64encode(img.read())
157
+ return encoded_string.decode('utf-8')
158
+
159
+ # Function that takes User Inputs and displays it on ChatUI
160
+ def query_message(history,txt,img):
161
+ if not img:
162
+ history += [(txt,None)]
163
+ return history
164
+ base64 = image_to_base64(img)
165
+ data_url = f"data:image/jpeg;base64,{base64}"
166
+ history += [(f"{txt} ![]({data_url})", None)]
167
+ return history
168
+
169
+ # Function that takes User Inputs, generates Response and displays on Chat UI
170
+ def llm_response(history,text,img):
171
+ if not img:
172
+ response = txt_model.generate_content(text)
173
+ history += [(None,response.text)]
174
+ return history
175
+
176
+ else:
177
+ img = PIL.Image.open(img)
178
+ response = vis_model.generate_content([text,img])
179
+ history += [(None,response.text)]
180
+ return history
181
+
182
+ # Interface Code
183
+ with gr.Blocks() as app:
184
+ with gr.Row():
185
+ chatbot = gr.Chatbot(
186
+ scale = 2,
187
+ height=750
188
+ )
189
+ text_box = gr.Textbox(
190
+ placeholder="Enter text and press enter, or upload an image",
191
+ container=False,
192
+ )
193
+
194
+ btn = gr.Button("Submit")
195
+ clicked = btn.click(query_message,
196
+ [chatbot,text_box],
197
+ chatbot
198
+ ).then(llm_response,
199
+ [chatbot,text_box],
200
+ chatbot
201
+ )
202
+ app.queue()
203
+ app.launch(debug=True)"""