sagar007 commited on
Commit
b557580
·
verified ·
1 Parent(s): b80ae41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +223 -79
app.py CHANGED
@@ -2,7 +2,9 @@ import gradio as gr
2
  import spaces
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
-
 
 
6
 
7
  # Load the model and tokenizer
8
  model_name = "akjindal53244/Llama-3.1-Storm-8B"
@@ -13,14 +15,41 @@ model = AutoModelForCausalLM.from_pretrained(
13
  device_map="auto"
14
  )
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  @spaces.GPU(duration=120)
17
- def generate_text(prompt, max_length, temperature):
 
 
 
 
 
18
  messages = [
19
- {"role": "system", "content": "You are a helpful assistant."},
20
  {"role": "user", "content": prompt}
21
  ]
 
22
  formatted_prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
23
-
24
  inputs = tokenizer(formatted_prompt, return_tensors="pt").to(model.device)
25
 
26
  outputs = model.generate(
@@ -34,133 +63,248 @@ def generate_text(prompt, max_length, temperature):
34
 
35
  return tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
36
 
37
-
38
- # Custom CSS
39
  css = """
 
 
 
 
 
 
 
40
  body {
41
- background-color: #1a1a2e;
42
- color: #e0e0e0;
43
- font-family: 'Arial', sans-serif;
44
  }
 
45
  .container {
46
- max-width: 900px;
47
  margin: auto;
48
  padding: 20px;
49
  }
 
50
  .gradio-container {
51
- background-color: #16213e;
52
  border-radius: 15px;
53
- box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
54
  }
 
55
  .header {
56
- background-color: #0f3460;
57
- padding: 20px;
58
  border-radius: 15px 15px 0 0;
59
  text-align: center;
60
- margin-bottom: 20px;
 
 
61
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  .header h1 {
63
- color: #e94560;
64
- font-size: 2.5em;
65
- margin-bottom: 10px;
 
 
66
  }
 
67
  .header p {
68
  color: #a0a0a0;
 
 
 
69
  }
70
- .header img {
71
- max-width: 300px;
72
- border-radius: 10px;
73
- margin: 15px auto;
74
- display: block;
75
- }
76
  .input-group, .output-group {
77
- background-color: #1a1a2e;
78
- padding: 20px;
79
- border-radius: 10px;
80
- margin-bottom: 20px;
 
 
81
  }
82
- .input-group label, .output-group label {
83
- color: #e94560;
84
- font-weight: bold;
 
85
  }
 
86
  .generate-btn {
87
- background-color: #e94560 !important;
88
  color: white !important;
89
  border: none !important;
90
- border-radius: 5px !important;
91
- padding: 10px 20px !important;
92
- font-size: 16px !important;
93
  cursor: pointer !important;
94
- transition: background-color 0.3s ease !important;
95
  }
 
96
  .generate-btn:hover {
97
- background-color: #c81e45 !important;
 
98
  }
 
99
  .example-prompts {
100
  background-color: #1f2b47;
101
- padding: 15px;
102
- border-radius: 10px;
103
- margin-bottom: 20px;
 
104
  }
 
105
  .example-prompts h3 {
106
- color: #e94560;
107
- margin-bottom: 10px;
 
108
  }
 
109
  .example-prompts ul {
110
- list-style-type: none;
111
- padding-left: 0;
 
 
 
112
  }
 
113
  .example-prompts li {
114
- margin-bottom: 5px;
 
 
115
  cursor: pointer;
116
- transition: color 0.3s ease;
 
117
  }
 
118
  .example-prompts li:hover {
119
- color: #e94560;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  }
121
  """
122
 
123
- # Example prompts
124
  example_prompts = [
125
- "Write a Python function to find the n-th Fibonacci number.",
126
- "Explain the concept of recursion in programming.",
127
- "What are the key differences between Python and JavaScript?",
128
- "Tell me a short story about a time-traveling robot.",
129
- "Describe the process of photosynthesis in simple terms."
 
 
 
130
  ]
131
 
132
- # Gradio interface
133
- # Gradio interface
134
- with gr.Blocks(css=css) as iface:
135
- gr.HTML(
136
- """
137
  <div class="header">
138
- <h1>Llama-3.1-Storm-8B Text Generation</h1>
139
- <p>Generate text using the powerful Llama-3.1-Storm-8B model. Enter a prompt and let the AI create!</p>
140
- <img src="https://cdn-uploads.huggingface.co/production/uploads/64c75c1237333ccfef30a602/tmOlbERGKP7JSODa6T06J.jpeg" alt="Llama">
 
 
141
  </div>
142
- """
143
- )
144
 
145
- with gr.Group():
146
- with gr.Group(elem_classes="example-prompts"):
147
- gr.HTML("<h3>Example Prompts:</h3>")
148
- example_buttons = [gr.Button(prompt) for prompt in example_prompts]
 
 
 
149
 
150
- with gr.Group(elem_classes="input-group"):
151
- prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...", lines=5)
152
- max_length = gr.Slider(minimum=1, maximum=500, value=128, step=1, label="Max Length")
153
- temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
154
- generate_btn = gr.Button("Generate", elem_classes="generate-btn")
 
 
 
 
 
 
 
 
155
 
156
- with gr.Group(elem_classes="output-group"):
157
- output = gr.Textbox(label="Generated Text", lines=10)
 
 
 
 
158
 
159
- generate_btn.click(generate_text, inputs=[prompt, max_length, temperature], outputs=output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
- # Set up example prompt buttons
162
- for button in example_buttons:
163
- button.click(lambda x: x, inputs=[button], outputs=[prompt])
164
 
165
- # Launch the app
166
  iface.launch()
 
2
  import spaces
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
+ from googlesearch import search # Or use Serper API for better results
6
+ import requests
7
+ from bs4 import BeautifulSoup
8
 
9
  # Load the model and tokenizer
10
  model_name = "akjindal53244/Llama-3.1-Storm-8B"
 
15
  device_map="auto"
16
  )
17
 
18
+ def fetch_web_content(url):
19
+ try:
20
+ response = requests.get(url, timeout=10)
21
+ soup = BeautifulSoup(response.text, 'html.parser')
22
+ return ' '.join(p.get_text() for p in soup.find_all('p'))
23
+ except:
24
+ return "Could not fetch content from this URL"
25
+
26
+ def web_search(query, num_results=3):
27
+ try:
28
+ results = []
29
+ for j in search(query, num_results=num_results, advanced=True):
30
+ content = fetch_web_content(j.url)
31
+ results.append({
32
+ "title": j.title,
33
+ "url": j.url,
34
+ "content": content[:1000] # Limit content length
35
+ })
36
+ return results
37
+ except:
38
+ return []
39
+
40
  @spaces.GPU(duration=120)
41
+ def generate_text(prompt, max_length, temperature, use_web):
42
+ if use_web:
43
+ search_results = web_search(prompt)
44
+ context = "\n".join([f"Source: {res['url']}\nContent: {res['content']}" for res in search_results])
45
+ prompt = f"Web Context:\n{context}\n\nUser Query: {prompt}"
46
+
47
  messages = [
48
+ {"role": "system", "content": "You are a helpful assistant with web search capabilities."},
49
  {"role": "user", "content": prompt}
50
  ]
51
+
52
  formatted_prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
 
53
  inputs = tokenizer(formatted_prompt, return_tensors="pt").to(model.device)
54
 
55
  outputs = model.generate(
 
63
 
64
  return tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
65
 
66
+ # Enhanced CSS
 
67
  css = """
68
+ :root {
69
+ --primary: #e94560;
70
+ --secondary: #1a1a2e;
71
+ --background: #16213e;
72
+ --text: #e0e0e0;
73
+ }
74
+
75
  body {
76
+ background-color: var(--background);
77
+ color: var(--text);
78
+ font-family: 'Inter', sans-serif;
79
  }
80
+
81
  .container {
82
+ max-width: 1200px;
83
  margin: auto;
84
  padding: 20px;
85
  }
86
+
87
  .gradio-container {
88
+ background-color: var(--background);
89
  border-radius: 15px;
90
+ box-shadow: 0 4px 20px rgba(0, 0, 0, 0.3);
91
  }
92
+
93
  .header {
94
+ background: linear-gradient(135deg, #0f3460 0%, #1a1a2e 100%);
95
+ padding: 2rem;
96
  border-radius: 15px 15px 0 0;
97
  text-align: center;
98
+ margin-bottom: 2rem;
99
+ position: relative;
100
+ overflow: hidden;
101
  }
102
+
103
+ .header::before {
104
+ content: '';
105
+ position: absolute;
106
+ top: -50%;
107
+ left: -50%;
108
+ width: 200%;
109
+ height: 200%;
110
+ background: radial-gradient(circle, rgba(233,69,96,0.1) 0%, transparent 70%);
111
+ animation: pulse 8s infinite;
112
+ }
113
+
114
+ @keyframes pulse {
115
+ 0% { transform: scale(0.8); opacity: 0.5; }
116
+ 50% { transform: scale(1.2); opacity: 0.2; }
117
+ 100% { transform: scale(0.8); opacity: 0.5; }
118
+ }
119
+
120
  .header h1 {
121
+ color: var(--primary);
122
+ font-size: 2.8rem;
123
+ margin-bottom: 1rem;
124
+ font-weight: 700;
125
+ position: relative;
126
  }
127
+
128
  .header p {
129
  color: #a0a0a0;
130
+ font-size: 1.1rem;
131
+ max-width: 800px;
132
+ margin: 0 auto;
133
  }
134
+
 
 
 
 
 
135
  .input-group, .output-group {
136
+ background-color: var(--secondary);
137
+ padding: 2rem;
138
+ border-radius: 12px;
139
+ margin-bottom: 2rem;
140
+ border: 1px solid #2d2d4d;
141
+ transition: transform 0.3s ease, box-shadow 0.3s ease;
142
  }
143
+
144
+ .input-group:hover, .output-group:hover {
145
+ transform: translateY(-2px);
146
+ box-shadow: 0 6px 15px rgba(0, 0, 0, 0.3);
147
  }
148
+
149
  .generate-btn {
150
+ background: linear-gradient(135deg, var(--primary) 0%, #c81e45 100%) !important;
151
  color: white !important;
152
  border: none !important;
153
+ border-radius: 8px !important;
154
+ padding: 12px 28px !important;
155
+ font-size: 1.1rem !important;
156
  cursor: pointer !important;
157
+ transition: transform 0.2s ease, box-shadow 0.2s ease !important;
158
  }
159
+
160
  .generate-btn:hover {
161
+ transform: scale(1.05);
162
+ box-shadow: 0 4px 15px rgba(233, 69, 96, 0.4) !important;
163
  }
164
+
165
  .example-prompts {
166
  background-color: #1f2b47;
167
+ padding: 1.5rem;
168
+ border-radius: 12px;
169
+ margin-bottom: 2rem;
170
+ border: 1px solid #3d3d6d;
171
  }
172
+
173
  .example-prompts h3 {
174
+ color: var(--primary);
175
+ margin-bottom: 1rem;
176
+ font-size: 1.3rem;
177
  }
178
+
179
  .example-prompts ul {
180
+ list-style: none;
181
+ padding: 0;
182
+ display: grid;
183
+ grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
184
+ gap: 1rem;
185
  }
186
+
187
  .example-prompts li {
188
+ background-color: var(--secondary);
189
+ padding: 1rem;
190
+ border-radius: 8px;
191
  cursor: pointer;
192
+ transition: all 0.2s ease;
193
+ border: 1px solid #3d3d6d;
194
  }
195
+
196
  .example-prompts li:hover {
197
+ background-color: #2d2d4d;
198
+ transform: translateY(-2px);
199
+ box-shadow: 0 4px 10px rgba(0, 0, 0, 0.2);
200
+ }
201
+
202
+ .param-group {
203
+ display: grid;
204
+ grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
205
+ gap: 1.5rem;
206
+ margin-bottom: 1.5rem;
207
+ }
208
+
209
+ .web-toggle {
210
+ display: flex;
211
+ align-items: center;
212
+ gap: 1rem;
213
+ padding: 1rem;
214
+ background-color: var(--secondary);
215
+ border-radius: 8px;
216
+ margin-bottom: 1.5rem;
217
+ }
218
+
219
+ .output-controls {
220
+ display: flex;
221
+ gap: 1rem;
222
+ margin-top: 1.5rem;
223
+ }
224
+
225
+ #copy-btn {
226
+ background: #2d2d4d !important;
227
+ border: 1px solid #3d3d6d !important;
228
+ }
229
+
230
+ #copy-btn:hover {
231
+ background: #3d3d6d !important;
232
  }
233
  """
234
 
 
235
  example_prompts = [
236
+ "Explain quantum computing in simple terms",
237
+ "Latest developments in AI research",
238
+ "How does blockchain technology work?",
239
+ "Compare React and Vue.js frameworks",
240
+ "Best practices for Python async programming",
241
+ "Impact of climate change on marine life",
242
+ "Recent advancements in cancer treatment",
243
+ "Guide to starting a tech startup in 2024"
244
  ]
245
 
246
+ with gr.Blocks(css=css, theme=gr.themes.Default()) as iface:
247
+ gr.HTML("""
 
 
 
248
  <div class="header">
249
+ <h1>Llama-3.1-Storm-8B AI Assistant</h1>
250
+ <p>Enhanced with real-time web search capabilities and multimodal interaction</p>
251
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/64c75c1237333ccfef30a602/tmOlbERGKP7JSODa6T06J.jpeg"
252
+ alt="Llama"
253
+ style="border-radius: 12px; margin: 1.5rem 0;">
254
  </div>
255
+ """)
 
256
 
257
+ with gr.Tabs():
258
+ with gr.TabItem("Chat Assistant"):
259
+ with gr.Row():
260
+ with gr.Column(scale=3):
261
+ with gr.Group(elem_classes="example-prompts"):
262
+ gr.Markdown("## Example Queries")
263
+ example_btns = [gr.Button(prompt, scale=0) for prompt in example_prompts]
264
 
265
+ with gr.Group(elem_classes="input-group"):
266
+ prompt = gr.Textbox(label="Your Query", placeholder="Enter your question or prompt...",
267
+ lines=5, elem_id="main-input")
268
+
269
+ with gr.Group(elem_classes="web-toggle"):
270
+ web_search_toggle = gr.Checkbox(label="Enable Web Search", value=False)
271
+ num_results = gr.Slider(1, 5, value=3, step=1, label="Search Results to Use")
272
+
273
+ with gr.Group(elem_classes="param-group"):
274
+ max_length = gr.Slider(32, 1024, value=256, step=32, label="Response Length")
275
+ temperature = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Creativity")
276
+
277
+ generate_btn = gr.Button("Generate Response", elem_classes="generate-btn")
278
 
279
+ with gr.Column(scale=2):
280
+ with gr.Group(elem_classes="output-group"):
281
+ output = gr.Textbox(label="Generated Response", lines=12, elem_id="main-output")
282
+ with gr.Group(elem_classes="output-controls"):
283
+ copy_btn = gr.Button("Copy to Clipboard", elem_id="copy-btn")
284
+ clear_btn = gr.Button("Clear Output", elem_id="copy-btn")
285
 
286
+ with gr.TabItem("Web Search Results"):
287
+ web_results = gr.JSON(label="Search Results Preview", visible=True)
288
+
289
+ # Event handling
290
+ generate_btn.click(
291
+ generate_text,
292
+ inputs=[prompt, max_length, temperature, web_search_toggle],
293
+ outputs=output
294
+ ).then(
295
+ lambda q: web_search(q) if q else [],
296
+ inputs=[prompt],
297
+ outputs=web_results
298
+ )
299
+
300
+ for btn in example_btns:
301
+ btn.click(lambda x: x, inputs=[btn], outputs=[prompt])
302
+
303
+ copy_btn.click(lambda x: x, inputs=[output], outputs=[]).then(
304
+ None,
305
+ _js="() => navigator.clipboard.writeText(document.getElementById('main-output').value)"
306
+ )
307
 
308
+ clear_btn.click(lambda: "", outputs=[output])
 
 
309
 
 
310
  iface.launch()