Spaces:
Running
Running
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
# Load same or different models for each agent | |
tokenizer1 = AutoTokenizer.from_pretrained("gpt2") | |
model1 = AutoModelForCausalLM.from_pretrained("gpt2") | |
tokenizer2 = AutoTokenizer.from_pretrained("gpt2-medium") | |
model2 = AutoModelForCausalLM.from_pretrained("gpt2-medium") | |
tokenizer3 = AutoTokenizer.from_pretrained("gpt2-large") | |
model3 = AutoModelForCausalLM.from_pretrained("gpt2-large") | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
model1, model2, model3 = model1.to(device), model2.to(device), model3.to(device) | |
def generate_response(model, tokenizer, prompt): | |
inputs = tokenizer(prompt, return_tensors="pt").to(device) | |
outputs = model.generate(inputs["input_ids"], max_length=100, pad_token_id=tokenizer.eos_token_id) | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
import gradio as gr | |
def multi_agent_chat(user_input): | |
res1 = generate_response(model1, tokenizer1, user_input) | |
res2 = generate_response(model2, tokenizer2, user_input) | |
res3 = generate_response(model3, tokenizer3, user_input) | |
return res1, res2, res3 | |
interface = gr.Interface( | |
fn=multi_agent_chat, | |
inputs=gr.Textbox(lines=2, placeholder="Ask something..."), | |
outputs=[ | |
gr.Textbox(label="Agent 1 (GPT-2)"), | |
gr.Textbox(label="Agent 2 (GPT-2 Medium)"), | |
gr.Textbox(label="Agent 3 (GPT-2 Large)") | |
], | |
title="3-Agent AI Chatbot" | |
) | |
interface.launch() | |