File size: 1,584 Bytes
a31c67b
e18e0ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ea1871
e18e0ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "sarvamai/sarvam-m"

# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name, torch_dtype="auto", device_map="auto"
)

def generate_response(prompt):
    messages = [{"role": "user", "content": prompt}]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        enable_thinking=True,
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

    # Generate output with temperature=0.2
    generated_ids = model.generate(
        **model_inputs,
        max_new_tokens=8192,
        temperature=0.2
    )

    output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
    output_text = tokenizer.decode(output_ids)

    if "</think>" in output_text:
        reasoning_content = output_text.split("</think>")[0].rstrip("\n")
        content = output_text.split("</think>")[-1].lstrip("\n").rstrip("</s>")
    else:
        reasoning_content = ""
        content = output_text.rstrip("</s>")

    return reasoning_content, content

# Gradio UI
iface = gr.Interface(
    fn=generate_response,
    inputs=gr.Textbox(lines=5, label="Enter your prompt"),
    outputs=[
        gr.Textbox(label="Reasoning"),
        gr.Textbox(label="Response")
    ],
    title="Sarvam-M Chat Interface",
    description="Enter a prompt and receive both the internal reasoning and the final answer from the Sarvam-M model."
)

iface.launch()