Lokesh Jirati commited on
Commit
03e2f07
·
1 Parent(s): b659f9e

Add application file

Browse files
Files changed (2) hide show
  1. app.py +42 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+
5
+ # Load tokenizer and model from Hugging Face Hub
6
+ tokenizer = AutoTokenizer.from_pretrained("bilalRahib/TinyLLama-NSFW-Chatbot")
7
+ model = AutoModelForCausalLM.from_pretrained("bilalRahib/TinyLLama-NSFW-Chatbot")
8
+ model.eval()
9
+
10
+ # Text generation function
11
+ def generate_response(prompt, max_new_tokens=100, temperature=0.8, top_p=0.95):
12
+ inputs = tokenizer(prompt, return_tensors="pt")
13
+ with torch.no_grad():
14
+ output = model.generate(
15
+ inputs["input_ids"],
16
+ attention_mask=inputs.get("attention_mask", None),
17
+ max_new_tokens=max_new_tokens,
18
+ temperature=temperature,
19
+ top_p=top_p,
20
+ do_sample=True,
21
+ pad_token_id=tokenizer.eos_token_id,
22
+ )
23
+ return tokenizer.decode(output[0], skip_special_tokens=True)
24
+
25
+ # Gradio interface
26
+ iface = gr.Interface(
27
+ fn=generate_response,
28
+ inputs=[
29
+ gr.Textbox(label="Enter your prompt", placeholder="Type a message..."),
30
+ gr.Slider(20, 300, value=100, label="Max New Tokens"),
31
+ gr.Slider(0.1, 1.5, value=0.8, label="Temperature"),
32
+ gr.Slider(0.5, 1.0, value=0.95, label="Top-p"),
33
+ ],
34
+ outputs="text",
35
+ title="TinyLLama NSFW Chatbot",
36
+ description="A chatbot using TinyLLama NSFW fine-tuned model.",
37
+ )
38
+
39
+ # Launch app
40
+ if __name__ == "__main__":
41
+ iface.launch()
42
+
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ gradio
4
+ sentencepiece