Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| # Load the model and tokenizer | |
| model_name = 'FridayMaster/fine_tune_embedding' | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) # Use the appropriate class | |
| # Define a function to generate responses | |
| def generate_response(prompt): | |
| # Tokenize the input prompt | |
| inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=512) | |
| with torch.no_grad(): | |
| # Generate a response using the model | |
| outputs = model.generate(inputs['input_ids'], max_length=150, num_return_sequences=1) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return response | |
| # Create a Gradio interface | |
| iface = gr.Interface( | |
| fn=generate_response, | |
| inputs=gr.Textbox(label="Enter your message", placeholder="Type something here..."), | |
| outputs=gr.Textbox(label="Response"), | |
| title="Chatbot Interface", | |
| description="Interact with the fine-tuned chatbot model." | |
| ) | |
| # Launch the Gradio app | |
| if __name__ == "__main__": | |
| iface.launch() | |