Sathvika-Alla commited on
Commit
7b18ba8
·
verified ·
1 Parent(s): c253409

Upload folder using huggingface_hub

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.github/workflows/update_space.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run Python script
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout
14
+ uses: actions/checkout@v2
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: '3.9'
20
+
21
+ - name: Install Gradio
22
+ run: python -m pip install gradio
23
+
24
+ - name: Log in to Hugging Face
25
+ run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
26
+
27
+ - name: Deploy to Spaces
28
+ run: gradio deploy
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: TAL LocalRAG Chatbot
3
- emoji: 🐢
4
- colorFrom: pink
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.32.1
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: TAL-LocalRAG-Chatbot
3
+ app_file: RagImplementation.py
 
 
4
  sdk: gradio
5
+ sdk_version: 5.31.0
 
 
6
  ---
 
 
RagImplementation.py ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import re
4
+ import gradio as gr
5
+ from transformers import pipeline, AutoTokenizer
6
+ from langchain_core.documents import Document
7
+ from langchain_huggingface import HuggingFaceEmbeddings
8
+ from langchain_community.vectorstores import FAISS
9
+ from langchain_core.prompts import ChatPromptTemplate
10
+ from typing import List, TypedDict
11
+ from langgraph.graph import StateGraph, START
12
+ from dotenv import load_dotenv
13
+
14
+ # --- Configuration ---
15
+
16
+ load_dotenv()
17
+ os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN")
18
+ file_path = "/Users/alessiacolumban/TAL_Chatbot/DataPrep/converters_with_links_and_pricelist.json"
19
+ try:
20
+ with open(file_path, 'r', encoding='utf-8') as f:
21
+ product_data = json.load(f)
22
+ except Exception as e:
23
+ print(f"Error loading product data: {e}")
24
+ product_data = {}
25
+
26
+ tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
27
+ max_length = tokenizer.model_max_length
28
+
29
+ docs = [Document(page_content=str(value), metadata={"source": key}) for key, value in product_data.items()]
30
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
31
+ vector_store = FAISS.from_documents(docs, embeddings)
32
+ chatbot = pipeline("text-generation", model="facebook/blenderbot-400M-distill")
33
+
34
+ # --- Helper Functions ---
35
+
36
+ def parse_float(s):
37
+ """Convert a string with either dot or comma as decimal separator to float."""
38
+ try:
39
+ if isinstance(s, (list, tuple)):
40
+ s = s[0] # for size fields split by '*'
41
+ return float(str(s).replace(',', '.').strip())
42
+ except Exception:
43
+ return float('inf') # fallback for missing or invalid values
44
+
45
+ def normalize_artnr(artnr):
46
+ """Convert ARTNR to string for robust matching."""
47
+ try:
48
+ return str(int(float(artnr)))
49
+ except Exception:
50
+ return str(artnr)
51
+
52
+ def get_product_by_artnr(artnr, tech_info):
53
+ artnr_str = normalize_artnr(artnr)
54
+ for value in tech_info.values():
55
+ if normalize_artnr(value.get("ARTNR", "")) == artnr_str:
56
+ return value
57
+ return None
58
+
59
+ def extract_converter_and_lamp(user_message: str):
60
+ match = re.search(r"how many (\w+) lamps?.*converter (\d+)", user_message.lower())
61
+ if match:
62
+ lamp_name = match.group(1)
63
+ converter_number = match.group(2)
64
+ return lamp_name, converter_number
65
+ return None, None
66
+
67
+ def get_technical_fit_info(product_data: dict) -> dict:
68
+ results = {}
69
+ for key, value in product_data.items():
70
+ results[key] = {
71
+ "TYPE": value.get("TYPE", "N/A"),
72
+ "ARTNR": value.get("ARTNR", "N/A"),
73
+ "CONVERTER DESCRIPTION": value.get("CONVERTER DESCRIPTION:", "N/A"),
74
+ "STRAIN RELIEF": value.get("STRAIN RELIEF", "N/A"),
75
+ "LOCATION": value.get("LOCATION", "N/A"),
76
+ "DIMMABILITY": value.get("DIMMABILITY", "N/A"),
77
+ "EFFICIENCY": value.get("EFFICIENCY @full load", "N/A"),
78
+ "OUTPUT VOLTAGE": value.get("OUTPUT VOLTAGE (V)", "N/A"),
79
+ "INPUT VOLTAGE": value.get("NOM. INPUT VOLTAGE (V)", "N/A"),
80
+ "SIZE": value.get("SIZE: L*B*H (mm)", "N/A"),
81
+ "WEIGHT": value.get("Gross Weight", "N/A"),
82
+ "Listprice": value.get("Listprice", "N/A"),
83
+ "LAMPS": value.get("lamps", {}),
84
+ "PDF_LINK": value.get("pdf_link", "N/A")
85
+ }
86
+ return results
87
+
88
+ tech_info = get_technical_fit_info(product_data)
89
+
90
+ def get_lamp_quantity(converter_number: str, lamp_name: str, tech_info: dict) -> str:
91
+ v = get_product_by_artnr(converter_number, tech_info)
92
+ if not v:
93
+ return f"Sorry, I could not find converter {converter_number}."
94
+ for lamp_key, lamp_vals in v["LAMPS"].items():
95
+ if lamp_name.lower() in lamp_key.lower():
96
+ min_val = lamp_vals.get("min", "N/A")
97
+ max_val = lamp_vals.get("max", "N/A")
98
+ if min_val == max_val:
99
+ return f"You can use {min_val} {lamp_key} lamp(s) with converter {converter_number}."
100
+ else:
101
+ return f"You can use between {min_val} and {max_val} {lamp_key} lamp(s) with converter {converter_number}."
102
+ return f"Sorry, no data found for lamp '{lamp_name}' with converter {converter_number}."
103
+
104
+ def get_recommended_converter(user_message, tech_info):
105
+ # Example: "I need a 24V converter for 2x 14.4W LEDLINE. Which one should I use?"
106
+ match = re.search(r"(\d+)\s*x\s*([\d.,]+)\s*w\s*(\w+)", user_message.lower())
107
+ if not match:
108
+ return None
109
+ num_lamps = int(match.group(1))
110
+ wattage = float(match.group(2).replace(',', '.'))
111
+ lamp_type = match.group(3)
112
+ candidates = []
113
+ for v in tech_info.values():
114
+ if "24v" in v["TYPE"].lower():
115
+ for lamp, vals in v["LAMPS"].items():
116
+ lamp_norm = lamp.lower().replace(',', '.')
117
+ wattage_str = str(wattage).replace(',', '.')
118
+ if lamp_type.lower() in lamp_norm and wattage_str in lamp_norm:
119
+ max_lamps = float(str(vals.get("max", 0)).replace(',', '.'))
120
+ if max_lamps >= num_lamps:
121
+ candidates.append(v)
122
+ if not candidates:
123
+ return f"Sorry, I couldn't find a 24V converter that supports {num_lamps}x {wattage}W {lamp_type}."
124
+ else:
125
+ return "\n".join([
126
+ f"You can use {v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])}) for {num_lamps}x {wattage}W {lamp_type}."
127
+ for v in candidates
128
+ ])
129
+
130
+ def answer_technical_question(question: str, tech_info: dict) -> str:
131
+ q = question.lower()
132
+ # Use-case: "I need a 24V converter for 2x 14.4W LEDLINE" (and similar)
133
+ if re.search(r"\d+\s*x\s*[\d.,]+\s*w\s*\w+", q):
134
+ result = get_recommended_converter(question, tech_info)
135
+ if result:
136
+ return result
137
+ # Outdoor installation
138
+ if "outdoor" in q:
139
+ return "\n".join([f"{v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])})"
140
+ for v in tech_info.values()
141
+ if "outdoor" in v["LOCATION"].lower() or "in&outdoor" in v["LOCATION"].lower()])
142
+ # Most efficient 24V converter
143
+ if "most efficient" in q and "24v" in q:
144
+ candidates = [v for v in tech_info.values() if "24v" in v["TYPE"].lower()]
145
+ if not candidates:
146
+ return "No 24V converters found."
147
+ best = max(
148
+ candidates,
149
+ key=lambda x: float(str(x["EFFICIENCY"]).replace(',', '.')) if str(x["EFFICIENCY"]).replace('.', '').replace(',','').isdigit() else 0
150
+ )
151
+ return f"The most efficient 24V converter is {best['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(best['ARTNR'])}) with efficiency {best['EFFICIENCY']}."
152
+ # 24V converter with dimming
153
+ if "24v" in q and ("dimmable" in q or "dimming" in q or "supports dimming" in q):
154
+ candidates = [v for v in tech_info.values() if "24v" in v["TYPE"].lower() and "dimmable" in v["DIMMABILITY"].lower()]
155
+ if not candidates:
156
+ return "No 24V converters with dimming found."
157
+ return "\n".join([f"{v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])})" for v in candidates])
158
+ # Recommend for 19.2W LEDLINE
159
+ if "19.2w ledline" in q:
160
+ candidates = []
161
+ for v in tech_info.values():
162
+ for lamp, vals in v["LAMPS"].items():
163
+ if "ledline" in lamp.lower() and "19.2w" in lamp.lower():
164
+ candidates.append(f"{v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])}) supports {lamp}")
165
+ return "\n".join(candidates) if candidates else "No converter found for 19.2W LEDLINE."
166
+ # Strain relief
167
+ if "strain relief" in q:
168
+ candidates = [v for v in tech_info.values() if v["STRAIN RELIEF"].lower() == "yes"]
169
+ return "\n".join([f"{v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])})" for v in candidates])
170
+ # Comparison
171
+ if "compare" in q:
172
+ numbers = re.findall(r'\d+', question)
173
+ if len(numbers) >= 2:
174
+ a = get_product_by_artnr(numbers[0], tech_info)
175
+ b = get_product_by_artnr(numbers[1], tech_info)
176
+ if a and b:
177
+ return (f"Comparison:\n"
178
+ f"- {a['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(a['ARTNR'])}): {a['DIMMABILITY']}, {a['LOCATION']}, Efficiency {a['EFFICIENCY']}\n"
179
+ f"- {b['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(b['ARTNR'])}): {b['DIMMABILITY']}, {b['LOCATION']}, Efficiency {b['EFFICIENCY']}")
180
+ # IP20 vs IP67
181
+ if "ip20 and ip67" in q:
182
+ ip20 = [v for v in tech_info.values() if "ip20" in str(v["CONVERTER DESCRIPTION"]).lower()]
183
+ ip67 = [v for v in tech_info.values() if "ip67" in str(v["CONVERTER DESCRIPTION"]).lower()]
184
+ return (f"IP20 converters:\n" + "\n".join([f"- {v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])})" for v in ip20]) + "\n\n" +
185
+ f"IP67 converters:\n" + "\n".join([f"- {v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])})" for v in ip67]))
186
+ # More than 1 LEDLINE 9.6W
187
+ if "support more than 1 ledline 9.6w" in q:
188
+ candidates = []
189
+ for v in tech_info.values():
190
+ for lamp, vals in v["LAMPS"].items():
191
+ if "ledline" in lamp.lower() and "9.6w" in lamp.lower() and float(str(vals.get("max", 0)).replace(',', '.')) > 1:
192
+ candidates.append(f"{v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])}) supports up to {vals['max']} {lamp}")
193
+ return "\n".join(candidates) if candidates else "No converter supports more than 1 LEDLINE 9.6W lamp."
194
+ # Smallest 24V converters
195
+ if "smallest 24v" in q:
196
+ candidates = [v for v in tech_info.values() if "24v" in v["TYPE"].lower()]
197
+ if not candidates:
198
+ return "No 24V converters found."
199
+ smallest = min(
200
+ candidates,
201
+ key=lambda x: parse_float(str(x["SIZE"].split('*')[0]))
202
+ )
203
+ return f"Smallest 24V converter: {smallest['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(smallest['ARTNR'])}), size: {smallest['SIZE']}"
204
+ # Under 100mm length
205
+ if "under 100mm" in q or ("length" in q and "100" in q):
206
+ candidates = [v for v in tech_info.values() if parse_float(str(v["SIZE"].split('*')[0])) < 100]
207
+ return "\n".join([f"{v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])}), size: {v['SIZE']}" for v in candidates])
208
+ # Use-case: 2x 14.4W LEDLINE
209
+ if "2x 14.4w ledline" in q:
210
+ for v in tech_info.values():
211
+ for lamp, vals in v["LAMPS"].items():
212
+ if "ledline" in lamp.lower() and "14.4w" in lamp.lower() and float(str(vals.get("max", 0)).replace(',', '.')) >= 2:
213
+ return f"You can use {v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])}) for 2x 14.4W LEDLINE."
214
+ # Can I use converter X with Y lamp
215
+ if "can i use converter" in q and "ledline" in q:
216
+ numbers = re.findall(r'\d+', question)
217
+ if numbers:
218
+ v = get_product_by_artnr(numbers[0], tech_info)
219
+ if v:
220
+ for lamp, vals in v["LAMPS"].items():
221
+ if "ledline" in lamp.lower():
222
+ return f"Converter {numbers[0]} supports up to {vals.get('max', 0)} {lamp}."
223
+ # IP67 and 1-10V dimming
224
+ if "ip67" in q and "1-10v" in q:
225
+ candidates = [v for v in tech_info.values() if "ip67" in str(v["CONVERTER DESCRIPTION"]).lower() and "1-10v" in str(v["DIMMABILITY"].lower())]
226
+ if candidates:
227
+ return "\n".join([f"{v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])})" for v in candidates])
228
+ # Built-in strain relief
229
+ if "built-in strain relief" in q:
230
+ return answer_technical_question("Which converters have strain relief included?", tech_info)
231
+ # Indoor and outdoor
232
+ if "indoor and outdoor" in q:
233
+ candidates = [v for v in tech_info.values() if "in&outdoor" in v["LOCATION"].lower()]
234
+ return "\n".join([f"{v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])})" for v in candidates])
235
+ # Datasheet/documentation
236
+ if "datasheet" in q or "documentation" in q:
237
+ numbers = re.findall(r'\d+', question)
238
+ if numbers:
239
+ v = get_product_by_artnr(numbers[0], tech_info)
240
+ if v and v["PDF_LINK"] != "N/A":
241
+ return f"Datasheet for {v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])}): {v['PDF_LINK']}"
242
+ # Pricing
243
+ if "price" in q or "affordable" in q:
244
+ if "most affordable 24v" in q:
245
+ candidates = [v for v in tech_info.values() if "24v" in v["TYPE"].lower() and str(v["Listprice"]) != "N/A"]
246
+ if candidates:
247
+ cheapest = min(candidates, key=lambda x: float(str(x["Listprice"]).replace(',', '.')))
248
+ return f"Most affordable 24V converter: {cheapest['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(cheapest['ARTNR'])}), price: {cheapest['Listprice']}"
249
+ elif "price below" in q:
250
+ price_match = re.search(r'€(\d+)', question)
251
+ price = float(price_match.group(1)) if price_match else 65
252
+ candidates = [v for v in tech_info.values() if "24v" in v["TYPE"].lower() and str(v["Listprice"]) != "N/A" and float(str(v["Listprice"]).replace(',', '.')) < price]
253
+ return "\n".join([f"{v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])}), price: {v['Listprice']}" for v in candidates])
254
+ # Weight
255
+ if "weight" in q:
256
+ numbers = re.findall(r'\d+', question)
257
+ if numbers:
258
+ v = get_product_by_artnr(numbers[0], tech_info)
259
+ if v and v["WEIGHT"] != "N/A":
260
+ return f"Weight of {v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])}): {v['WEIGHT']} kg"
261
+ # Input voltage
262
+ if "input voltage" in q:
263
+ numbers = re.findall(r'\d+', question)
264
+ if numbers:
265
+ v = get_product_by_artnr(numbers[0], tech_info)
266
+ if v and v["INPUT VOLTAGE"] != "N/A":
267
+ return f"Input voltage range of {v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])}): {v['INPUT VOLTAGE']}"
268
+ # All 24V converters
269
+ if "show me all 24v converters" in q:
270
+ candidates = [v for v in tech_info.values() if "24v" in v["TYPE"].lower()]
271
+ return "\n".join([f"{v['CONVERTER DESCRIPTION']} (ARTNR: {normalize_artnr(v['ARTNR'])})" for v in candidates])
272
+ return None
273
+
274
+ # --- Prompt and Graph ---
275
+
276
+ custom_prompt = ChatPromptTemplate.from_messages([
277
+ ("system", "You are a helpful technical assistant for TAL BV and assist users in finding information. Use the provided documentation to answer questions accurately and with necessary sources."),
278
+ ("human", """Context: {context}
279
+ Question: {question}
280
+ Answer:""")
281
+ ])
282
+
283
+ class State(TypedDict):
284
+ question: str
285
+ context: List[Document]
286
+ answer: str
287
+
288
+ def retrieve(state: State):
289
+ retriever = vector_store.as_retriever(search_kwargs={"k": 3})
290
+ retrieved_docs = retriever.invoke(state["question"])
291
+ return {"context": retrieved_docs}
292
+
293
+ def generate(state: State):
294
+ docs_content = "\n\n".join(doc.page_content for doc in state["context"])
295
+ prompt = f"""
296
+ You are a helpful technical assistant for TAL BV and assist users in finding information. Use the provided documentation to answer questions accurately and with necessary sources.
297
+
298
+ Context: {docs_content}
299
+ Question: {state["question"]}
300
+ Answer:
301
+ """
302
+ input_ids = tokenizer.encode(prompt, truncation=True, max_length=max_length, return_tensors="pt")
303
+ truncated_prompt = tokenizer.decode(input_ids[0])
304
+ response = chatbot(truncated_prompt, max_new_tokens=32, do_sample=True, temperature=0.2)
305
+ answer = response[0]['generated_text'].split("Answer:", 1)[-1].strip()
306
+ return {"answer": answer}
307
+
308
+ graph_builder = StateGraph(State)
309
+ graph_builder.add_node("retrieve", retrieve)
310
+ graph_builder.add_node("generate", generate)
311
+ graph_builder.add_edge(START, "retrieve")
312
+ graph_builder.add_edge("retrieve", "generate")
313
+ graph = graph_builder.compile()
314
+
315
+ # --- Chatbot Function ---
316
+
317
+ def tal_langchain_chatbot(user_message, history):
318
+ lamp_name, converter_number = extract_converter_and_lamp(user_message)
319
+ if lamp_name and converter_number:
320
+ answer = get_lamp_quantity(converter_number, lamp_name, tech_info)
321
+ else:
322
+ answer = answer_technical_question(user_message, tech_info)
323
+ if not answer:
324
+ response = graph.invoke({"question": user_message})
325
+ answer = response["answer"]
326
+ history = history or []
327
+ history.append({"role": "user", "content": user_message})
328
+ history.append({"role": "assistant", "content": answer})
329
+ return history, history, ""
330
+
331
+ # --- Gradio UI ---
332
+
333
+ custom_css = """
334
+ #chatbot-toggle-btn {
335
+ position: fixed;
336
+ bottom: 30px;
337
+ right: 30px;
338
+ z-index: 10001;
339
+ background-color: #ED1C24;
340
+ color: white;
341
+ border: none;
342
+ border-radius: 50%;
343
+ width: 56px;
344
+ height: 56px;
345
+ font-size: 28px;
346
+ font-weight: bold;
347
+ cursor: pointer;
348
+ box-shadow: 0 4px 12px rgba 0,0,0,0.3;
349
+ display: flex;
350
+ align-items: center;
351
+ justify-content: center;
352
+ transition: all 0.3s ease;
353
+ }
354
+ #chatbot-panel {
355
+ position: fixed;
356
+ bottom: 100px;
357
+ right: 30px;
358
+ z-index: 10000;
359
+ width: 380px;
360
+ height: 560px;
361
+ background-color: #ffffff;
362
+ border-radius: 20px;
363
+ box-shadow: 0 4px 24px rgba(0,0,0,0.25);
364
+ overflow: hidden;
365
+ display: flex;
366
+ flex-direction: column;
367
+ font-family: 'Arial', sans-serif;
368
+ }
369
+ #chatbot-panel.hide {
370
+ display: none !important;
371
+ }
372
+ #chat-header {
373
+ background-color: #ED1C24;
374
+ color: white;
375
+ padding: 16px;
376
+ font-weight: bold;
377
+ font-size: 16px;
378
+ display: flex;
379
+ align-items: center;
380
+ gap: 12px;
381
+ }
382
+ #chat-header img {
383
+ border-radius: 50%;
384
+ width: 32px;
385
+ height: 32px;
386
+ }
387
+ .gr-chatbot {
388
+ flex: 1;
389
+ overflow-y: auto;
390
+ padding: 12px;
391
+ background-color: #f8f8f8;
392
+ border: none;
393
+ }
394
+ .gr-textbox {
395
+ padding: 10px;
396
+ border-top: 1px solid #eee;
397
+ }
398
+ .gr-textbox textarea {
399
+ background-color: white;
400
+ border: 1px solid #ccc;
401
+ border-radius: 8px;
402
+ }
403
+ footer {
404
+ display: none !important;
405
+ }
406
+ """
407
+
408
+ def toggle_visibility(current_state):
409
+ new_state = not current_state
410
+ return new_state, gr.update(visible=new_state)
411
+
412
+ with gr.Blocks(css=custom_css) as demo:
413
+ visibility_state = gr.State(False)
414
+ history = gr.State([])
415
+
416
+ chatbot_toggle = gr.Button("💬", elem_id="chatbot-toggle-btn")
417
+ with gr.Column(visible=False, elem_id="chatbot-panel") as chatbot_panel:
418
+ gr.HTML("""
419
+ <div id='chat-header'>
420
+ <img src="https://www.svgrepo.com/download/490283/pixar-lamp.svg" />
421
+ Lofty the TAL Bot
422
+ </div>
423
+ """)
424
+ chat = gr.Chatbot(label="Chat", elem_id="chat-window", type="messages")
425
+ msg = gr.Textbox(placeholder="Type your message here...", show_label=False)
426
+ send = gr.Button("Send")
427
+ send.click(
428
+ fn=tal_langchain_chatbot,
429
+ inputs=[msg, history],
430
+ outputs=[chat, history, msg]
431
+ )
432
+ msg.submit(
433
+ fn=tal_langchain_chatbot,
434
+ inputs=[msg, history],
435
+ outputs=[chat, history, msg]
436
+ )
437
+
438
+ chatbot_toggle.click(
439
+ fn=toggle_visibility,
440
+ inputs=visibility_state,
441
+ outputs=[visibility_state, chatbot_panel]
442
+ )
443
+
444
+ if __name__ == "__main__":
445
+ demo.launch()
converters_with_links_and_pricelist.json ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ transformers
2
+ langchain-core
3
+ langchain-huggingface
4
+ langchain-community
5
+ langgraph
6
+ python-dotenv