Spaces:
Runtime error
Runtime error
File size: 1,830 Bytes
a32839b 52eb24f a32839b 52eb24f a32839b 52eb24f a32839b fe49e01 a32839b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
from transformers import pipeline
# Load the pre-trained or fine-tuned model
model_pipeline = pipeline("text-generation", model="your_model_name")
# Function to generate responses
def respond(message, history=[], max_tokens=100, temperature=0.8, top_p=0.9):
"""
Generates a meaningful response while avoiding repetition.
Parameters:
- message (str): User's input message.
- history (list): List of previous (user, model) interactions.
- max_tokens (int): Maximum number of tokens to generate.
- temperature (float): Controls randomness (higher = more creative).
- top_p (float): Controls nucleus sampling.
Returns:
- response (str): Generated response.
"""
message = message.strip()
# Prevent repetition by checking history
if history and message.lower() == history[-1][0].lower():
return "Tafadhali uliza swali tofauti."
# Generate response
response = model_pipeline(
message,
max_length=max_tokens,
temperature=temperature,
top_p=top_p,
pad_token_id=50256 # Ensures clean output for some models
)[0]["generated_text"].strip()
# Ensure response is meaningful and not just repeating input
if response.lower() == message.lower():
response = "Samahani, siwezi kujibu hilo kwa sasa."
# Store history for context (optional)
history.append((message, response))
return response
# Example conversation loop
if __name__ == "__main__":
print("🤖 Chatbot Ready! Type 'quit' to exit.")
chat_history = []
while True:
user_input = input("You: ")
if user_input.lower() == "quit":
print("Chatbot: Karibu tena!")
break
bot_response = respond(user_input, chat_history)
print(f"Chatbot: {bot_response}")
|