ShubhamD95 commited on
Commit
838b693
·
verified ·
1 Parent(s): d88f550

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -9
app.py CHANGED
@@ -1,32 +1,25 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import os
3
- from huggingface_hub import login
4
  import gradio as gr
5
 
6
- # 1. Authenticate with Hugging Face token from secrets
7
  hf_token = os.environ.get("hf_space_token")
8
- login(token=hf_token)
9
 
10
- # 2. Load Gemma model and tokenizer (GATED model needs token)
11
  model_name = "google/gemma-3-1b-it"
12
  tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
13
  model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_token)
14
 
15
- # 3. Define response generation function
16
  def generate_response(prompt):
17
  inputs = tokenizer(prompt, return_tensors="pt")
18
  outputs = model.generate(**inputs, max_new_tokens=100, do_sample=True, temperature=0.7)
19
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
20
 
21
- # 4. Create Gradio interface
22
  iface = gr.Interface(
23
  fn=generate_response,
24
  inputs=gr.Textbox(lines=2, placeholder="Ask something..."),
25
  outputs="text",
26
  title="Chat with Gemma",
27
- description="This chatbot is powered by Google's Gemma model running in Hugging Face Spaces."
28
  )
29
 
30
- # 5. Launch app
31
  iface.launch()
32
-
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import os
 
3
  import gradio as gr
4
 
5
+ # Get Hugging Face token from environment variable (safe in secrets)
6
  hf_token = os.environ.get("hf_space_token")
 
7
 
 
8
  model_name = "google/gemma-3-1b-it"
9
  tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
10
  model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_token)
11
 
 
12
  def generate_response(prompt):
13
  inputs = tokenizer(prompt, return_tensors="pt")
14
  outputs = model.generate(**inputs, max_new_tokens=100, do_sample=True, temperature=0.7)
15
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
16
 
 
17
  iface = gr.Interface(
18
  fn=generate_response,
19
  inputs=gr.Textbox(lines=2, placeholder="Ask something..."),
20
  outputs="text",
21
  title="Chat with Gemma",
22
+ description="Chatbot powered by Google's Gemma model"
23
  )
24
 
 
25
  iface.launch()