File size: 659 Bytes
d3f1de3
 
 
 
 
 
 
 
 
 
 
 
 
 
fb0fa48
 
d3f1de3
 
fb0fa48
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

model_id = "heyIamUmair/llama3-3b-merged-legal"

tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    device_map="auto",
    torch_dtype="auto"
)

pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)

def chat(message, history):  # βœ… Fix: Add history
    output = pipe(message, max_new_tokens=200, do_sample=True, temperature=0.7)
    return output[0]["generated_text"]

gr.ChatInterface(fn=chat, title="πŸ§‘β€βš–οΈ Pakistan Law Chatbot (LLama 3.2)").launch()