FuturesonyAi / app.py
Futuresony's picture
Update app.py
0b37678 verified
raw
history blame
2.75 kB
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
import gradio as gr
# --------------------
# Load Base Model and LoRA Adapter
# --------------------
def load_model_and_adapter():
base_model_name = "unsloth/Llama-3.2-3B-Instruct" # Replace with your base model name
adapter_repo = "Futuresony/future_ai_12_10_2024" # Your Hugging Face LoRA repo
# Load tokenizer and base model
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
base_model = AutoModelForCausalLM.from_pretrained(
base_model_name,
torch_dtype=torch.float16, # Use float16 for efficiency if GPU is available
device_map="auto" # Automatically map to GPU or CPU
)
# Load LoRA adapter
model = PeftModel.from_pretrained(base_model, adapter_repo)
model.eval() # Set to evaluation mode
return tokenizer, model
# Load the model and tokenizer once
tokenizer, model = load_model_and_adapter()
# --------------------
# Generate Response Function
# --------------------
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
# Prepare input prompt for generation
prompt = "\n".join([f"{m['role']}: {m['content']}" for m in messages])
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_length=max_tokens,
temperature=temperature,
top_p=top_p,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
response = response.split("assistant:")[-1].strip() # Clean response
return response
# --------------------
# Gradio Interface
# --------------------
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a helpful assistant.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
],
)
# --------------------
# Launch the Interface
# --------------------
if __name__ == "__main__":
demo.launch()