mvcrockett commited on
Commit
d2d0180
·
verified ·
1 Parent(s): a819876

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -1,11 +1,11 @@
1
  import gradio as gr
2
  import requests
3
 
4
- # Function to send your prompt to NVIDIA LLaMA 4 Scout
5
  def talk_to_llama(prompt):
6
- url = "https://api.nvcf.nvidia.com/v1/messages" # ✅ Correct endpoint
7
  headers = {
8
- "Authorization": "Bearer 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",
9
  "Content-Type": "application/json"
10
  }
11
  data = {
@@ -23,8 +23,8 @@ chat = gr.Interface(
23
  fn=talk_to_llama,
24
  inputs="text",
25
  outputs="text",
26
- title="Chat with LLaMA 4 Scout",
27
- description="Ask anything! This chatbot uses NVIDIA’s 3.5M token LLaMA 4 Scout model."
28
  )
29
 
30
  chat.launch()
 
1
  import gradio as gr
2
  import requests
3
 
4
+ # Function to send your prompt to NVIDIA LLaMA 4 Maverick
5
  def talk_to_llama(prompt):
6
+ url = "https://integrate.api.nvidia.com/v1/meta/llama-4-maverick-17b-128e-instruct"
7
  headers = {
8
+ "Authorization": "Bearer YOUR_API_KEY_HERE", # Replace with your actual API key
9
  "Content-Type": "application/json"
10
  }
11
  data = {
 
23
  fn=talk_to_llama,
24
  inputs="text",
25
  outputs="text",
26
+ title="Chat with LLaMA 4 Maverick",
27
+ description="Ask anything! This chatbot uses NVIDIA’s LLaMA 4 Maverick 17B 128E Instruct model."
28
  )
29
 
30
  chat.launch()