Futuresony commited on
Commit
c2c7eb4
·
verified ·
1 Parent(s): 6d9c19c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -64
app.py CHANGED
@@ -1,75 +1,35 @@
1
- import os
2
- import torch
3
  import gradio as gr
4
- from transformers import AutoModelForCausalLM, AutoTokenizer
5
- from peft import PeftModel, PeftConfig
6
 
7
- # Set the HF repo and LoRA model location
8
- base_model_id = "unsloth/gemma-2-9b"
9
- lora_model_id = "Futuresony/gemma2-9b-lora-alpaca"
10
 
11
- # Load base model on CPU
12
- base_model = AutoModelForCausalLM.from_pretrained(
13
- base_model_id,
14
- device_map="cpu",
15
- torch_dtype=torch.float32,
16
- )
17
 
18
- # Load tokenizer from base model
19
- tokenizer = AutoTokenizer.from_pretrained(base_model_id)
20
-
21
- # Load LoRA adapter
22
- model = PeftModel.from_pretrained(base_model, lora_model_id)
23
- model.eval()
24
-
25
- # === Alpaca-style formatter ===
26
- def format_alpaca_prompt(user_input, system_prompt, history):
27
- history_str = "\n".join([f"### Instruction:\n{h[0]}\n### Response:\n{h[1]}" for h in history])
28
- prompt = f"""{system_prompt}
29
- {history_str}
30
-
31
- ### Instruction:
32
- {user_input}
33
 
34
  ### Response:"""
35
- return prompt
36
-
37
- # === Chat logic ===
38
- def respond(message, history, system_message, max_tokens, temperature, top_p):
39
- prompt = format_alpaca_prompt(message, system_message, history)
40
- inputs = tokenizer(prompt, return_tensors="pt").to("cpu")
41
 
42
- with torch.no_grad():
43
- outputs = model.generate(
44
- **inputs,
45
- max_new_tokens=max_tokens,
46
- temperature=temperature,
47
- top_p=top_p,
48
- do_sample=True,
49
- pad_token_id=tokenizer.eos_token_id,
50
- )
51
 
52
- response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
53
- # Only return the part after "### Response:"
54
- if "### Response:" in response_text:
55
- final_output = response_text.split("### Response:")[-1].strip()
56
- else:
57
- final_output = response_text.strip()
58
 
59
- history.append((message, final_output))
60
- yield final_output
61
 
62
- # === Gradio Interface ===
63
- demo = gr.ChatInterface(
64
- fn=respond,
65
- additional_inputs=[
66
- gr.Textbox(value="You are a friendly chatbot.", label="System message"),
67
- gr.Slider(minimum=1, maximum=1024, value=256, step=1, label="Max new tokens"),
68
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
69
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.01, label="Top-p"),
70
- ],
71
- title="Offline Gemma-2B Alpaca Chatbot (LoRA)",
72
- )
73
 
74
- if __name__ == "__main__":
75
- demo.launch()
 
 
 
1
  import gradio as gr
2
+ from llama_cpp import Llama
 
3
 
4
+ # Path to your GGUF model inside the space
5
+ MODEL_PATH = "your-model.gguf"
 
6
 
7
+ # Load model
8
+ llm = Llama(model_path=MODEL_PATH, n_ctx=2048, n_threads=4, verbose=True)
 
 
 
 
9
 
10
+ # Function to format the prompt
11
+ def format_prompt(user_message):
12
+ return f"""### Instruction:
13
+ {user_message}
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  ### Response:"""
 
 
 
 
 
 
16
 
17
+ # Chat handler
18
+ def respond(user_message, chat_history):
19
+ prompt = format_prompt(user_message)
20
+ output = llm(prompt, max_tokens=300, stop=["###"])
21
+ response = output["choices"][0]["text"].strip()
22
+ chat_history.append((user_message, response))
23
+ return "", chat_history
 
 
24
 
25
+ # Gradio UI
26
+ with gr.Blocks() as demo:
27
+ gr.Markdown("## 🤖 DStv AI Assistant (Offline - GGUF)")
28
+ chatbot = gr.Chatbot()
29
+ msg = gr.Textbox(placeholder="Ask your question...")
30
+ state = gr.State([])
31
 
32
+ msg.submit(respond, [msg, state], [msg, chatbot])
 
33
 
34
+ demo.launch()
 
 
 
 
 
 
 
 
 
 
35