Spaces:
Runtime error
Runtime error
from transformers import pipeline | |
# Load the pre-trained or fine-tuned model | |
model_pipeline = pipeline("text-generation", model="your_model_name") | |
# Function to generate responses | |
def respond(message, history=[], max_tokens=100, temperature=0.8, top_p=0.9): | |
""" | |
Generates a meaningful response while avoiding repetition. | |
Parameters: | |
- message (str): User's input message. | |
- history (list): List of previous (user, model) interactions. | |
- max_tokens (int): Maximum number of tokens to generate. | |
- temperature (float): Controls randomness (higher = more creative). | |
- top_p (float): Controls nucleus sampling. | |
Returns: | |
- response (str): Generated response. | |
""" | |
message = message.strip() | |
# Prevent repetition by checking history | |
if history and message.lower() == history[-1][0].lower(): | |
return "Tafadhali uliza swali tofauti." | |
# Generate response | |
response = model_pipeline( | |
message, | |
max_length=max_tokens, | |
temperature=temperature, | |
top_p=top_p, | |
pad_token_id=50256 # Ensures clean output for some models | |
)[0]["generated_text"].strip() | |
# Ensure response is meaningful and not just repeating input | |
if response.lower() == message.lower(): | |
response = "Samahani, siwezi kujibu hilo kwa sasa." | |
# Store history for context (optional) | |
history.append((message, response)) | |
return response | |
# Example conversation loop | |
if __name__ == "__main__": | |
print("🤖 Chatbot Ready! Type 'quit' to exit.") | |
chat_history = [] | |
while True: | |
user_input = input("You: ") | |
if user_input.lower() == "quit": | |
print("Chatbot: Karibu tena!") | |
break | |
bot_response = respond(user_input, chat_history) | |
print(f"Chatbot: {bot_response}") | |