from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import gradio as gr model_id = "TheBloke/MelloGPT-AWQ" # or replace with another MelloGPT variant # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) # Create a text generation pipeline generator = pipeline("text-generation", model=model, tokenizer=tokenizer) # Define chatbot logic def chatbot(prompt): response = generator(prompt, max_new_tokens=150, temperature=0.7) return response[0]["generated_text"] # Gradio interface demo = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="🧠 MelloGPT Mental Health Bot") demo.launch()