Spaces:
Running
Running
File size: 1,495 Bytes
c960062 02e32ba c960062 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Load same or different models for each agent
tokenizer1 = AutoTokenizer.from_pretrained("gpt2")
model1 = AutoModelForCausalLM.from_pretrained("gpt2")
tokenizer2 = AutoTokenizer.from_pretrained("gpt2-medium")
model2 = AutoModelForCausalLM.from_pretrained("gpt2-medium")
tokenizer3 = AutoTokenizer.from_pretrained("gpt2-large")
model3 = AutoModelForCausalLM.from_pretrained("gpt2-large")
device = "cuda" if torch.cuda.is_available() else "cpu"
model1, model2, model3 = model1.to(device), model2.to(device), model3.to(device)
def generate_response(model, tokenizer, prompt):
inputs = tokenizer(prompt, return_tensors="pt").to(device)
outputs = model.generate(inputs["input_ids"], max_length=100, pad_token_id=tokenizer.eos_token_id)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
import gradio as gr
def multi_agent_chat(user_input):
res1 = generate_response(model1, tokenizer1, user_input)
res2 = generate_response(model2, tokenizer2, user_input)
res3 = generate_response(model3, tokenizer3, user_input)
return res1, res2, res3
interface = gr.Interface(
fn=multi_agent_chat,
inputs=gr.Textbox(lines=2, placeholder="Ask something..."),
outputs=[
gr.Textbox(label="Agent 1 (GPT-2)"),
gr.Textbox(label="Agent 2 (GPT-2 Medium)"),
gr.Textbox(label="Agent 3 (GPT-2 Large)")
],
title="3-Agent AI Chatbot"
)
interface.launch()
|