Futuresony commited on
Commit
a32839b
·
verified ·
1 Parent(s): 52eb24f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -15
app.py CHANGED
@@ -1,19 +1,58 @@
1
- def respond(message, history, max_tokens, temperature, top_p):
2
- """Generate a response while preventing unnecessary repetition."""
3
-
4
- # Ensure the message isn't directly repeated
5
- if history and message.strip() == history[-1][0].strip():
6
- return "Tafadhali uliza swali tofauti." # Prevent direct repetition
 
 
 
7
 
8
- response = client.text_generation(
9
- message.strip(),
10
- max_new_tokens=max_tokens,
11
- temperature=temperature,
12
- top_p=top_p,
13
- ).strip()
 
 
 
 
 
 
14
 
15
- # Remove repeated input
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  if response.lower() == message.lower():
17
- response = "Samahani, naweza kusaidia vipi?"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- return response
 
 
1
+ from transformers import pipeline
2
+
3
+ # Load the pre-trained or fine-tuned model
4
+ model_pipeline = pipeline("text-generation", model="your_model_name")
5
+
6
+ # Function to generate responses
7
+ def respond(message, history=[], max_tokens=100, temperature=0.8, top_p=0.9):
8
+ """
9
+ Generates a meaningful response while avoiding repetition.
10
 
11
+ Parameters:
12
+ - message (str): User's input message.
13
+ - history (list): List of previous (user, model) interactions.
14
+ - max_tokens (int): Maximum number of tokens to generate.
15
+ - temperature (float): Controls randomness (higher = more creative).
16
+ - top_p (float): Controls nucleus sampling.
17
+
18
+ Returns:
19
+ - response (str): Generated response.
20
+ """
21
+
22
+ message = message.strip()
23
 
24
+ # Prevent repetition by checking history
25
+ if history and message.lower() == history[-1][0].lower():
26
+ return "Tafadhali uliza swali tofauti."
27
+
28
+ # Generate response
29
+ response = model_pipeline(
30
+ message,
31
+ max_length=max_tokens,
32
+ temperature=temperature,
33
+ top_p=top_p,
34
+ pad_token_id=50256 # Ensures clean output for some models
35
+ )[0]["generated_text"].strip()
36
+
37
+ # Ensure response is meaningful and not just repeating input
38
  if response.lower() == message.lower():
39
+ response = "Samahani, siwezi kujibu hilo kwa sasa."
40
+
41
+ # Store history for context (optional)
42
+ history.append((message, response))
43
+
44
+ return response
45
+
46
+ # Example conversation loop
47
+ if __name__ == "__main__":
48
+ print("🤖 Chatbot Ready! Type 'quit' to exit.")
49
+ chat_history = []
50
+
51
+ while True:
52
+ user_input = input("You: ")
53
+ if user_input.lower() == "quit":
54
+ print("Chatbot: Karibu tena!")
55
+ break
56
 
57
+ bot_response = respond(user_input, chat_history)
58
+ print(f"Chatbot: {bot_response}")