Hafiza Maham commited on
Commit
d2a7141
·
1 Parent(s): 6cfb658

update app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -246
app.py CHANGED
@@ -10,31 +10,31 @@ import random
10
  import datetime
11
  from fuzzywuzzy import fuzz
12
 
13
- app = Flask(__name__)
14
  CORS(app)
15
 
16
  class EnhancedMultilingualEidQABot:
17
- def __init__(self, data_file='dataSet.json'):
18
- print("🔄 Loading multilingual models...")
19
  self.bi_encoder = None
20
  self.cross_encoder = None
21
- print("📖 Processing dataset...")
22
  self.data = self._load_dataset(data_file)
23
  self.knowledge_chunks = self._create_chunks()
24
  self.chunk_embeddings = None
25
  self.question_patterns = self._initialize_question_patterns()
26
- print(" Bot ready!\n")
 
27
  def _ensure_embeddings(self):
28
  if self.chunk_embeddings is None:
29
  self._load_models()
30
- print("🧠 Creating embeddings...")
31
- self.chunk_embeddings = self.bi_encoder.encode(
32
- [chunk['text'] for chunk in self.knowledge_chunks],
33
- convert_to_tensor=True,
34
- show_progress_bar=True
35
- )
36
-
37
-
38
  def _load_dataset(self, data_file):
39
  try:
40
  with open(data_file, 'r', encoding='utf-8') as f:
@@ -42,7 +42,7 @@ class EnhancedMultilingualEidQABot:
42
  except Exception as e:
43
  print(f"Error loading dataset: {e}")
44
  return []
45
-
46
  def _create_chunks(self):
47
  chunks = []
48
  for item in self.data:
@@ -55,72 +55,53 @@ class EnhancedMultilingualEidQABot:
55
  'score_boost': 1.0
56
  })
57
  if 'eid' in text.lower() or 'عید' in text:
58
- chunks.append({
59
- 'text': f"Eid information: {text}",
60
- 'tag': tag,
61
- 'type': 'enhanced',
62
- 'score_boost': 1.1
63
- })
64
  if 'prayer' in text.lower() or 'نماز' in text:
65
- chunks.append({
66
- 'text': f"Prayer information: {text}",
67
- 'tag': tag,
68
- 'type': 'enhanced',
69
- 'score_boost': 1.2
70
- })
71
  if 'qurbani' in text.lower() or 'قربانی' in text or 'sacrifice' in text.lower():
72
- chunks.append({
73
- 'text': f"Qurbani rules: {text}",
74
- 'tag': tag,
75
- 'type': 'enhanced',
76
- 'score_boost': 1.2
77
- })
78
  if 'funny' in tag.lower() or 'shair' in tag.lower():
79
- chunks.append({
80
- 'text': f"Fun fact: {text}",
81
- 'tag': tag,
82
- 'type': 'enhanced',
83
- 'score_boost': 0.9
84
- })
85
  if 'gaza' in text.lower() or 'غزہ' in text:
86
- chunks.append({
87
- 'text': f"Gaza context: {text}",
88
- 'tag': tag,
89
- 'type': 'enhanced',
90
- 'score_boost': 1.3
91
- })
92
  return chunks
 
93
  def _load_models(self):
94
  if self.bi_encoder is None:
95
- print("🔄 Loading bi-encoder model...")
96
  self.bi_encoder = SentenceTransformer('paraphrase-multilingual-mpnet-base-v2')
97
  if self.cross_encoder is None:
98
- print("🔄 Loading cross-encoder model...")
99
  self.cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-12-v2')
100
 
101
-
102
  def _initialize_question_patterns(self):
103
- return {
104
- 'greeting': ['eid mubarak', 'عید مبارک', 'hello', 'hi', 'salaam', 'سلام', 'mubarak', 'eid maz', 'eid mub', 'id mubarak'],
105
- 'prayer': ['namaz', 'prayer', 'salah', 'eid ki namaz', 'نماز', 'how to pray', 'kaise parhein', 'nmaz', 'nmax', 'namaaz', 'salat'],
106
- 'qurbani': ['qurbani', 'sacrifice', 'bakra', 'janwar', 'قربانی', 'ذبح', 'qurbni', 'kurbani', 'sacrifise'],
107
- 'rules': ['rules', 'ahkam', 'قوانین', 'kya karna', 'what to do', 'kaise karna', 'rulez', 'ahkaam'],
108
- 'time': ['time', 'waqt', 'kab', 'وقت', 'when', 'konsa din', 'kab hai'],
109
- 'story': ['story', 'kahani', 'ibrahim', 'ismail', 'قصہ', 'واقعہ', 'history', 'kahaniya'],
110
- 'food': ['food', 'khana', 'mithai', 'کھانا', 'سویاں', 'biryani', 'khane', 'meethi'],
111
- 'funny': ['funny', 'shair', 'mazah', 'مزاح', 'joke', 'shairi', 'شاعری', 'mazak', 'maza'],
112
- 'gaza': ['gaza', 'palestine', 'غزہ', 'فلسطین', 'war zone', 'gazah'],
113
- 'general': ['kya hai', 'what is', 'بتائیں', 'معلومات', 'eid kya', 'عید کیا', 'eid hai']
114
- }
115
-
 
 
 
 
 
 
116
  def _clean_input(self, text: str) -> str:
117
  text = re.sub(r'\s+', ' ', text.strip().lower())
118
- text = re.sub(r'[^\w\s؟!]', '', text) # Keep Urdu/English chars, spaces, and basic punctuation
119
  return text
120
-
121
  def _fuzzy_match(self, word: str, keywords: List[str]) -> bool:
122
  return any(fuzz.ratio(word, keyword) > 80 for keyword in keywords)
123
-
124
  def _detect_question_type(self, question: str) -> str:
125
  cleaned_question = self._clean_input(question)
126
  words = cleaned_question.split()
@@ -128,27 +109,17 @@ class EnhancedMultilingualEidQABot:
128
  if any(self._fuzzy_match(word, keywords) for word in words):
129
  return category
130
  return 'general'
131
-
132
  def _get_contextual_boost(self, chunk: Dict, question_type: str) -> float:
133
  boost = chunk.get('score_boost', 1.0)
134
- if question_type == 'greeting' and 'greeting' in chunk['tag'].lower():
135
- boost *= 1.4
136
- elif question_type == 'prayer' and 'prayer' in chunk['tag'].lower():
137
- boost *= 1.3
138
- elif question_type == 'qurbani' and ('qurbani' in chunk['tag'].lower() or 'sacrifice' in chunk['tag'].lower()):
139
- boost *= 1.3
140
- elif question_type == 'story' and 'story' in chunk['tag'].lower():
141
- boost *= 1.2
142
- elif question_type == 'funny' and 'funny' in chunk['tag'].lower():
143
- boost *= 1.1
144
- elif question_type == 'gaza' and 'gaza' in chunk['tag'].lower():
145
  boost *= 1.3
146
  return boost
147
-
148
  def _is_time_sensitive(self, question: str) -> bool:
149
- time_keywords = ['time', 'waqt', 'kab', 'وقت', 'when', 'konsa din', 'kab hai']
150
  return any(self._fuzzy_match(word, time_keywords) for word in question.lower().split())
151
-
152
  def answer_question(self, question: str) -> str:
153
  self._load_models()
154
  self._ensure_embeddings()
@@ -156,191 +127,68 @@ class EnhancedMultilingualEidQABot:
156
  cleaned_question = self._clean_input(question)
157
  if not cleaned_question:
158
  return self._get_default_response('empty')
159
-
160
  question_type = self._detect_question_type(cleaned_question)
161
  question_embedding = self.bi_encoder.encode(cleaned_question, convert_to_tensor=True)
162
  cos_scores = util.cos_sim(question_embedding, self.chunk_embeddings)[0]
163
-
164
- boosted_scores = []
165
- for i, score in enumerate(cos_scores):
166
- boost = self._get_contextual_boost(self.knowledge_chunks[i], question_type)
167
- boosted_scores.append(score * boost)
168
-
169
- boosted_scores = torch.tensor(boosted_scores)
170
  top_k = min(15, len(self.knowledge_chunks))
171
- top_results = torch.topk(boosted_scores, k=top_k)
172
- top_indices = top_results.indices.tolist()
173
- top_chunks = [self.knowledge_chunks[i]['text'] for i in top_indices]
174
  top_scores = top_results.values.tolist()
175
-
176
  rerank_pairs = [(cleaned_question, chunk) for chunk in top_chunks]
177
  rerank_scores = self.cross_encoder.predict(rerank_pairs)
178
-
179
- combined_scores = []
180
- for i, rerank_score in enumerate(rerank_scores):
181
- combined_score = (rerank_score * 0.7) + (top_scores[i] * 0.3)
182
- combined_scores.append(combined_score)
183
-
184
  best_idx = max(range(len(combined_scores)), key=lambda i: combined_scores[i])
185
  best_chunk = top_chunks[best_idx]
186
- best_score = combined_scores[best_idx]
187
-
188
- avg_score = sum(combined_scores) / len(combined_scores)
189
- threshold = avg_score * 0.8
190
-
191
- if best_score < threshold:
192
- return self._get_default_response(question_type)
193
-
194
- # Clean the response - remove prefixes like "Eid information:", "Prayer information:", etc.
195
- response = best_chunk
196
- prefixes_to_remove = [
197
- "Eid information: ",
198
- "Prayer information: ",
199
- "Qurbani rules: ",
200
- "Fun fact: ",
201
- "Gaza context: "
202
- ]
203
-
204
- for prefix in prefixes_to_remove:
205
- if response.startswith(prefix):
206
- response = response[len(prefix):]
207
  break
208
-
209
  if self._is_time_sensitive(cleaned_question):
210
- current_date = datetime.datetime.now()
211
- islamic_date = "10th Dhul-Hijjah" # Placeholder
212
- response += f"\n\n🕒 آج {current_date.strftime('%B %d, %Y')} ہے۔ عید الاضحیٰ عام طور پر {islamic_date} کو ہوتی ہے۔"
213
-
214
- response += "\n\n This is a demo. I'm working on this project, and its continuation depends on user feedback. Please share your suggestions by visiting our 'Contact Us' screen."
215
- return response
216
-
217
  def _get_default_response(self, question_type: str) -> str:
218
- defaults = {
219
- 'greeting': "🌙Eid Mubarak! May Allah accept your prayers.",
220
- 'prayer': "🕌 Eid prayer is 2 rakahs with extra takbeerat. Consult scholars for details.",
221
- 'qurbani': "🐐 Qurbani is obligatory for those who meet nisab. The animal must be healthy.",
222
- 'rules': "📜 Qurbani rules: Animal age, health, and intention are key.",
223
- 'time': "⏰ Eid ul-Adha is from 10th to 12th Dhul-Hijjah.",
224
- 'story': "📖 Eid ul-Adha commemorates Prophet Ibrahim's (AS) sacrifice.",
225
- 'food': "🍲 Eid foods include sheer khurma, biryani, and sweets.",
226
- 'funny': "😄 Eid fun: Eat sweets, collect Eidi!",
227
- 'gaza': "🤲 Pray for the people of Gaza. They are in hardship.",
228
- 'empty': " Ask something about Eid!",
229
- 'general': "🌟I am your Eid Assistant, created by OCi Lab . I am currently in progress and have limited data, focusing on small fun activities for Eid. I will improve myself after Eid"
230
- }
231
- return defaults.get(question_type, defaults['general'])
232
-
233
- def get_random_eid_fact(self) -> str:
234
- facts = [chunk for chunk in self.knowledge_chunks if chunk['tag'] in ['Eid_Overview', 'Prophet_Story', 'Eid_Prayer', 'Qurbani_Rules']]
235
- if facts:
236
- fact_text = random.choice(facts)['text']
237
- # Clean prefixes from random facts too
238
- prefixes_to_remove = [
239
- "Eid information: ",
240
- "Prayer information: ",
241
- "Qurbani rules: ",
242
- "Fun fact: ",
243
- "Gaza context: "
244
- ]
245
- for prefix in prefixes_to_remove:
246
- if fact_text.startswith(prefix):
247
- fact_text = fact_text[len(prefix):]
248
- break
249
- return f"💡 {fact_text}"
250
- return "🌙 Eid Mubarak!"
251
-
252
- def get_random_greeting(self) -> str:
253
- greetings = [chunk for chunk in self.knowledge_chunks if 'greeting' in chunk['tag'].lower()]
254
- if greetings:
255
- greeting_text = random.choice(greetings)['text']
256
- # Clean prefixes from greetings too
257
- prefixes_to_remove = [
258
- "Eid information: ",
259
- "Prayer information: ",
260
- "Qurbani rules: ",
261
- "Fun fact: ",
262
- "Gaza context: "
263
- ]
264
- for prefix in prefixes_to_remove:
265
- if greeting_text.startswith(prefix):
266
- greeting_text = greeting_text[len(prefix):]
267
- break
268
- return f"🎉 {greeting_text}"
269
- return "🌙 Eid Mubarak!"
270
-
271
- def get_random_shair(self) -> str:
272
- shairs = [chunk for chunk in self.knowledge_chunks if 'funny_shair_o_shairi' in chunk['tag'].lower()]
273
- if shairs:
274
- shair_text = random.choice(shairs)['text']
275
- # Clean prefixes from shairs too
276
- prefixes_to_remove = [
277
- "Eid information: ",
278
- "Prayer information: ",
279
- "Qurbani rules: ",
280
- "Fun fact: ",
281
- "Gaza context: "
282
- ]
283
- for prefix in prefixes_to_remove:
284
- if shair_text.startswith(prefix):
285
- shair_text = shair_text[len(prefix):]
286
- break
287
- return f"😄 شاعری: {shair_text}"
288
- return "😂 No shairi found, just Eid Mubarak!"
289
-
290
- def get_contextual_info(self) -> str:
291
- current_date = datetime.datetime.now()
292
- islamic_date = "10th Dhul-Hijjah" # Placeholder
293
- return f"🕒 {current_date.strftime('%B %d, %Y')}۔{islamic_date} "
294
 
295
  # Instantiate the bot
296
  bot = EnhancedMultilingualEidQABot('dataSet.json')
297
 
298
- # Flask Routes
299
  @app.route('/ask', methods=['POST'])
300
- def ask_question():
301
- try:
302
- data = request.get_json()
303
- question = data.get('question', '')
304
- if not question:
305
- return jsonify({'answer': bot._get_default_response('empty')})
306
- answer = bot.answer_question(question)
307
- return jsonify({'answer': answer})
308
- except Exception as e:
309
- return jsonify({'error': str(e), 'answer': 'Sorry, something went wrong!'})
310
-
311
- @app.route('/random', methods=['GET'])
312
- def random_fact():
313
- fact = bot.get_random_eid_fact()
314
- return jsonify({'answer': fact})
315
-
316
- @app.route('/greet', methods=['GET'])
317
- def random_greeting():
318
- greeting = bot.get_random_greeting()
319
- return jsonify({'answer': greeting})
320
-
321
- @app.route('/shair', methods=['GET'])
322
- def random_shair():
323
- shair = bot.get_random_shair()
324
- return jsonify({'answer': shair})
325
-
326
- @app.route('/context', methods=['GET'])
327
- def contextual_info():
328
- info = bot.get_contextual_info()
329
- return jsonify({'answer': info})
330
- @app.route('/warmup', methods=['GET'])
331
- def warmup():
332
- try:
333
- bot._load_models()
334
- bot._ensure_embeddings()
335
- return jsonify({'status': 'Models warmed up and embeddings ready.'})
336
- except Exception as e:
337
- return jsonify({'error': str(e)})
338
-
339
  @app.route('/')
340
  def home():
341
- return "API is live!"
342
-
343
 
344
- if __name__ == '__main__':
345
- port = int(os.environ.get('PORT', 5000))
346
- app.run(host='0.0.0.0', port=port)
 
10
  import datetime
11
  from fuzzywuzzy import fuzz
12
 
13
+ app = Flask(_name) # Fixed: __name_ instead of name
14
  CORS(app)
15
 
16
  class EnhancedMultilingualEidQABot:
17
+ def _init(self, data_file='dataSet.json'): # Fixed: __init_ instead of init
18
+ print("\U0001F504 Loading multilingual models...")
19
  self.bi_encoder = None
20
  self.cross_encoder = None
21
+ print("\U0001F4D6 Processing dataset...")
22
  self.data = self._load_dataset(data_file)
23
  self.knowledge_chunks = self._create_chunks()
24
  self.chunk_embeddings = None
25
  self.question_patterns = self._initialize_question_patterns()
26
+ print("\u2705 Bot ready!\n")
27
+
28
  def _ensure_embeddings(self):
29
  if self.chunk_embeddings is None:
30
  self._load_models()
31
+ print("\U0001F9E0 Creating embeddings...")
32
+ self.chunk_embeddings = self.bi_encoder.encode(
33
+ [chunk['text'] for chunk in self.knowledge_chunks],
34
+ convert_to_tensor=True,
35
+ show_progress_bar=True
36
+ )
37
+
 
38
  def _load_dataset(self, data_file):
39
  try:
40
  with open(data_file, 'r', encoding='utf-8') as f:
 
42
  except Exception as e:
43
  print(f"Error loading dataset: {e}")
44
  return []
45
+
46
  def _create_chunks(self):
47
  chunks = []
48
  for item in self.data:
 
55
  'score_boost': 1.0
56
  })
57
  if 'eid' in text.lower() or 'عید' in text:
58
+ chunks.append({'text': f"Eid info: {text}", 'tag': tag, 'type': 'enhanced', 'score_boost': 1.1})
 
 
 
 
 
59
  if 'prayer' in text.lower() or 'نماز' in text:
60
+ chunks.append({'text': f"Prayer info: {text}", 'tag': tag, 'type': 'enhanced', 'score_boost': 1.2})
 
 
 
 
 
61
  if 'qurbani' in text.lower() or 'قربانی' in text or 'sacrifice' in text.lower():
62
+ chunks.append({'text': f"Qurbani info: {text}", 'tag': tag, 'type': 'enhanced', 'score_boost': 1.2})
 
 
 
 
 
63
  if 'funny' in tag.lower() or 'shair' in tag.lower():
64
+ chunks.append({'text': f"Fun: {text}", 'tag': tag, 'type': 'enhanced', 'score_boost': 0.9})
 
 
 
 
 
65
  if 'gaza' in text.lower() or 'غزہ' in text:
66
+ chunks.append({'text': f"Gaza info: {text}", 'tag': tag, 'type': 'enhanced', 'score_boost': 1.3})
 
 
 
 
 
67
  return chunks
68
+
69
  def _load_models(self):
70
  if self.bi_encoder is None:
71
+ print("\U0001F504 Loading bi-encoder...")
72
  self.bi_encoder = SentenceTransformer('paraphrase-multilingual-mpnet-base-v2')
73
  if self.cross_encoder is None:
74
+ print("\U0001F504 Loading cross-encoder...")
75
  self.cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-12-v2')
76
 
 
77
  def _initialize_question_patterns(self):
78
+ tag_keywords = {}
79
+ for chunk in self.data:
80
+ tag = chunk.get("tag", "").lower()
81
+ if tag not in tag_keywords:
82
+ tag_keywords[tag] = set()
83
+ tag_keywords[tag].update(tag.replace('_', ' ').split())
84
+
85
+ # Heuristics
86
+ if "greeting" in tag:
87
+ tag_keywords[tag].update(["hi", "hello", "salaam", "eid mubarak", "السلام"])
88
+ elif "prayer" in tag:
89
+ tag_keywords[tag].update(["prayer", "namaz", "salah", "نماز"])
90
+ elif "qurbani" in tag or "sacrifice" in tag:
91
+ tag_keywords[tag].update(["qurbani", "sacrifice", "janwar", "bakra", "قربانی"])
92
+ elif "gaza" in tag:
93
+ tag_keywords[tag].update(["gaza", "غزہ", "palestine", "فلسطین"])
94
+
95
+ return {k: list(v) for k, v in tag_keywords.items()}
96
+
97
  def _clean_input(self, text: str) -> str:
98
  text = re.sub(r'\s+', ' ', text.strip().lower())
99
+ text = re.sub(r'[^\w\s؟!]', '', text)
100
  return text
101
+
102
  def _fuzzy_match(self, word: str, keywords: List[str]) -> bool:
103
  return any(fuzz.ratio(word, keyword) > 80 for keyword in keywords)
104
+
105
  def _detect_question_type(self, question: str) -> str:
106
  cleaned_question = self._clean_input(question)
107
  words = cleaned_question.split()
 
109
  if any(self._fuzzy_match(word, keywords) for word in words):
110
  return category
111
  return 'general'
112
+
113
  def _get_contextual_boost(self, chunk: Dict, question_type: str) -> float:
114
  boost = chunk.get('score_boost', 1.0)
115
+ if question_type in chunk['tag'].lower():
 
 
 
 
 
 
 
 
 
 
116
  boost *= 1.3
117
  return boost
118
+
119
  def _is_time_sensitive(self, question: str) -> bool:
120
+ time_keywords = ['time', 'waqt', 'kab', 'when', 'کب', 'وقت']
121
  return any(self._fuzzy_match(word, time_keywords) for word in question.lower().split())
122
+
123
  def answer_question(self, question: str) -> str:
124
  self._load_models()
125
  self._ensure_embeddings()
 
127
  cleaned_question = self._clean_input(question)
128
  if not cleaned_question:
129
  return self._get_default_response('empty')
130
+
131
  question_type = self._detect_question_type(cleaned_question)
132
  question_embedding = self.bi_encoder.encode(cleaned_question, convert_to_tensor=True)
133
  cos_scores = util.cos_sim(question_embedding, self.chunk_embeddings)[0]
134
+
135
+ boosted_scores = [score * self._get_contextual_boost(self.knowledge_chunks[i], question_type)
136
+ for i, score in enumerate(cos_scores)]
137
+
 
 
 
138
  top_k = min(15, len(self.knowledge_chunks))
139
+ top_results = torch.topk(torch.tensor(boosted_scores), k=top_k)
140
+ top_chunks = [self.knowledge_chunks[i]['text'] for i in top_results.indices.tolist()]
 
141
  top_scores = top_results.values.tolist()
142
+
143
  rerank_pairs = [(cleaned_question, chunk) for chunk in top_chunks]
144
  rerank_scores = self.cross_encoder.predict(rerank_pairs)
145
+
146
+ combined_scores = [(rerank_scores[i] * 0.7 + top_scores[i] * 0.3) for i in range(len(rerank_scores))]
 
 
 
 
147
  best_idx = max(range(len(combined_scores)), key=lambda i: combined_scores[i])
148
  best_chunk = top_chunks[best_idx]
149
+
150
+ for prefix in ["Eid info: ", "Prayer info: ", "Qurbani info: ", "Fun: ", "Gaza info: "]:
151
+ if best_chunk.startswith(prefix):
152
+ best_chunk = best_chunk[len(prefix):]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  break
154
+
155
  if self._is_time_sensitive(cleaned_question):
156
+ date = datetime.datetime.now().strftime('%B %d, %Y')
157
+ best_chunk += f"\n\n🕒 آج {date} ہے۔ عید الاضحیٰ عام طور پر 10th Dhul-Hijjah کو ہوتی ہے۔"
158
+
159
+ return best_chunk + "\n\n This is a demo based on a limited dataset. Please visit our Contact Us screen and let us know if we should move forward with this project. Your feedback matters!"
160
+
 
 
161
  def _get_default_response(self, question_type: str) -> str:
162
+ return {
163
+ 'empty': " Ask something about Eid!",
164
+ 'general': "🌟 I'm your Eid Assistant. Ask me anything about Eid!"
165
+ }.get(question_type, "🌟 I'm your Eid Assistant. Ask me anything about Eid!")
166
+
167
+ def get_random_by_tag(self, tag_keyword: str) -> str:
168
+ matches = [c['text'] for c in self.knowledge_chunks if tag_keyword in c['tag'].lower()]
169
+ return random.choice(matches) if matches else "No info found."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
  # Instantiate the bot
172
  bot = EnhancedMultilingualEidQABot('dataSet.json')
173
 
 
174
  @app.route('/ask', methods=['POST'])
175
+ def ask():
176
+ question = request.get_json().get('question', '')
177
+ return jsonify({'answer': bot.answer_question(question)})
178
+
179
+ @app.route('/tags', methods=['GET'])
180
+ def tags():
181
+ unique_tags = sorted({chunk['tag'] for chunk in bot.knowledge_chunks})
182
+ return jsonify({'tags': unique_tags})
183
+
184
+ @app.route('/tag/<tag>', methods=['GET'])
185
+ def get_by_tag(tag):
186
+ results = [chunk['text'] for chunk in bot.knowledge_chunks if tag.lower() in chunk['tag'].lower()]
187
+ return jsonify({'results': results})
188
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  @app.route('/')
190
  def home():
191
+ return "✅ Eid Assistant API is running."
 
192
 
193
+ if _name_ == '_main': # Fixed: __name_ and _main_ instead of name and main
194
+ app.run(host='0.0.0.0', port=int(os.environ.get('PORT', 5000)))