Gajendra5490 commited on
Commit
902a9ef
·
verified ·
1 Parent(s): 69f8379

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -2,8 +2,10 @@ import gradio as gr
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
- # Load model and tokenizer
6
  model_name = "MONAI/Llama3-VILA-M3-8B"
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
8
  model = AutoModelForCausalLM.from_pretrained(
9
  model_name,
@@ -18,7 +20,7 @@ def generate_response(prompt):
18
  output = model.generate(**inputs, max_length=200)
19
  return tokenizer.decode(output[0], skip_special_tokens=True)
20
 
21
- # Create Gradio Interface
22
  iface = gr.Interface(
23
  fn=generate_response,
24
  inputs=gr.Textbox(lines=2, placeholder="Enter your prompt..."),
@@ -28,4 +30,3 @@ iface = gr.Interface(
28
  )
29
 
30
  iface.launch()
31
-
 
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
+ # Model name
6
  model_name = "MONAI/Llama3-VILA-M3-8B"
7
+
8
+ # Load tokenizer and model with trust_remote_code=True
9
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
10
  model = AutoModelForCausalLM.from_pretrained(
11
  model_name,
 
20
  output = model.generate(**inputs, max_length=200)
21
  return tokenizer.decode(output[0], skip_special_tokens=True)
22
 
23
+ # Gradio Interface
24
  iface = gr.Interface(
25
  fn=generate_response,
26
  inputs=gr.Textbox(lines=2, placeholder="Enter your prompt..."),
 
30
  )
31
 
32
  iface.launch()