MLDeveloper commited on
Commit
4a2da89
·
verified ·
1 Parent(s): ae41e31

Upload app (5).py

Browse files
Files changed (1) hide show
  1. app (5).py +526 -0
app (5).py ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ import firebase_admin
4
+ from firebase_admin import credentials, db, auth
5
+ from PIL import Image
6
+ import numpy as np
7
+ from geopy.geocoders import Nominatim
8
+ from tensorflow.keras.applications import MobileNetV2
9
+ from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input
10
+ import json
11
+
12
+ # Initialize Firebase
13
+ if not firebase_admin._apps:
14
+ cred = credentials.Certificate("firebase_credentials.json")
15
+ firebase_admin.initialize_app(cred, {
16
+ 'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/'
17
+ })
18
+
19
+ # Load MobileNetV2 pre-trained model
20
+ mobilenet_model = MobileNetV2(weights="imagenet")
21
+
22
+ # Function to classify the uploaded image using MobileNetV2
23
+ def classify_image_with_mobilenet(image):
24
+ try:
25
+ img = image.resize((224, 224))
26
+ img_array = np.array(img)
27
+ img_array = np.expand_dims(img_array, axis=0)
28
+ img_array = preprocess_input(img_array)
29
+ predictions = mobilenet_model.predict(img_array)
30
+ labels = decode_predictions(predictions, top=5)[0]
31
+ return {label[1]: float(label[2]) for label in labels}
32
+ except Exception as e:
33
+ st.error(f"Error during image classification: {e}")
34
+ return {}
35
+
36
+ # Function to get user's location using geolocation API
37
+ def get_user_location():
38
+ st.write("Fetching location, please allow location access in your browser.")
39
+ geolocator = Nominatim(user_agent="binsight")
40
+ try:
41
+ ip_info = requests.get("https://ipinfo.io/json").json()
42
+ loc = ip_info.get("loc", "").split(",")
43
+ latitude, longitude = loc[0], loc[1] if len(loc) == 2 else (None, None)
44
+ if latitude and longitude:
45
+ address = geolocator.reverse(f"{latitude}, {longitude}").address
46
+ return latitude, longitude, address
47
+ except Exception as e:
48
+ st.error(f"Error retrieving location: {e}")
49
+ return None, None, None
50
+
51
+ # User Login
52
+ st.sidebar.header("User Login")
53
+ user_email = st.sidebar.text_input("Enter your email")
54
+ login_button = st.sidebar.button("Login")
55
+
56
+ if login_button:
57
+ if user_email:
58
+ st.session_state["user_email"] = user_email
59
+ st.sidebar.success(f"Logged in as {user_email}")
60
+
61
+ if "user_email" not in st.session_state:
62
+ st.warning("Please log in first.")
63
+ st.stop()
64
+
65
+ # Get user location and display details
66
+ latitude, longitude, address = get_user_location()
67
+ if latitude and longitude:
68
+ st.success(f"Location detected: {address}")
69
+ else:
70
+ st.warning("Unable to fetch location, please ensure location access is enabled.")
71
+ st.stop()
72
+
73
+ # Streamlit App
74
+ st.title("BinSight: Upload Dustbin Image")
75
+
76
+ uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"])
77
+ submit_button = st.button("Analyze and Upload")
78
+
79
+ if submit_button and uploaded_file:
80
+ image = Image.open(uploaded_file)
81
+ st.image(image, caption="Uploaded Image", use_container_width=True)
82
+
83
+ classification_results = classify_image_with_mobilenet(image)
84
+
85
+ if classification_results:
86
+ db_ref = db.reference("dustbins")
87
+ dustbin_data = {
88
+ "user_email": st.session_state["user_email"],
89
+ "latitude": latitude,
90
+ "longitude": longitude,
91
+ "address": address,
92
+ "classification": classification_results,
93
+ "allocated_truck": None,
94
+ "status": "Pending"
95
+ }
96
+ db_ref.push(dustbin_data)
97
+ st.success("Dustbin data uploaded successfully!")
98
+ st.write(f"**Location:** {address}")
99
+ st.write(f"**Latitude:** {latitude}, **Longitude:** {longitude}")
100
+ else:
101
+ st.error("Missing classification details. Cannot upload.")
102
+
103
+
104
+
105
+
106
+
107
+
108
+
109
+ # best with firebase but below code is not giving correct location of user.
110
+
111
+ # import streamlit as st
112
+ # import requests
113
+ # import firebase_admin
114
+ # from firebase_admin import credentials, db, auth
115
+ # from PIL import Image
116
+ # import numpy as np
117
+ # from geopy.geocoders import Nominatim
118
+ # from tensorflow.keras.applications import MobileNetV2
119
+ # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input
120
+
121
+ # # Initialize Firebase
122
+ # if not firebase_admin._apps:
123
+ # cred = credentials.Certificate("firebase_credentials.json")
124
+ # firebase_admin.initialize_app(cred, {
125
+ # 'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/'
126
+ # })
127
+
128
+ # # Load MobileNetV2 pre-trained model
129
+ # mobilenet_model = MobileNetV2(weights="imagenet")
130
+
131
+ # # Function to classify the uploaded image using MobileNetV2
132
+ # def classify_image_with_mobilenet(image):
133
+ # try:
134
+ # img = image.resize((224, 224))
135
+ # img_array = np.array(img)
136
+ # img_array = np.expand_dims(img_array, axis=0)
137
+ # img_array = preprocess_input(img_array)
138
+ # predictions = mobilenet_model.predict(img_array)
139
+ # labels = decode_predictions(predictions, top=5)[0]
140
+ # return {label[1]: float(label[2]) for label in labels}
141
+ # except Exception as e:
142
+ # st.error(f"Error during image classification: {e}")
143
+ # return {}
144
+
145
+ # # Function to get user's location
146
+ # def get_user_location():
147
+ # try:
148
+ # ip_info = requests.get("https://ipinfo.io/json").json()
149
+ # location = ip_info.get("loc", "").split(",")
150
+ # latitude = location[0] if len(location) > 0 else None
151
+ # longitude = location[1] if len(location) > 1 else None
152
+
153
+ # if latitude and longitude:
154
+ # geolocator = Nominatim(user_agent="binsight")
155
+ # address = geolocator.reverse(f"{latitude}, {longitude}").address
156
+ # return latitude, longitude, address
157
+ # return None, None, None
158
+ # except Exception as e:
159
+ # st.error(f"Unable to get location: {e}")
160
+ # return None, None, None
161
+
162
+ # # User Login
163
+ # st.sidebar.header("User Login")
164
+ # user_email = st.sidebar.text_input("Enter your email")
165
+ # login_button = st.sidebar.button("Login")
166
+
167
+ # if login_button:
168
+ # if user_email:
169
+ # st.session_state["user_email"] = user_email
170
+ # st.sidebar.success(f"Logged in as {user_email}")
171
+
172
+ # if "user_email" not in st.session_state:
173
+ # st.warning("Please log in first.")
174
+ # st.stop()
175
+
176
+ # # Streamlit App
177
+ # st.title("BinSight: Upload Dustbin Image")
178
+
179
+ # uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"])
180
+ # submit_button = st.button("Analyze and Upload")
181
+
182
+ # if submit_button and uploaded_file:
183
+ # image = Image.open(uploaded_file)
184
+ # st.image(image, caption="Uploaded Image", use_container_width=True)
185
+
186
+ # classification_results = classify_image_with_mobilenet(image)
187
+ # latitude, longitude, address = get_user_location()
188
+
189
+ # if latitude and longitude and classification_results:
190
+ # db_ref = db.reference("dustbins")
191
+ # dustbin_data = {
192
+ # "user_email": st.session_state["user_email"],
193
+ # "latitude": latitude,
194
+ # "longitude": longitude,
195
+ # "address": address,
196
+ # "classification": classification_results,
197
+ # "allocated_truck": None,
198
+ # "status": "Pending"
199
+ # }
200
+ # db_ref.push(dustbin_data)
201
+ # st.success("Dustbin data uploaded successfully!")
202
+ # else:
203
+ # st.error("Missing classification or location details. Cannot upload.")
204
+
205
+
206
+
207
+
208
+
209
+
210
+
211
+
212
+
213
+
214
+
215
+ # Below is the old version but it is without of firebase and here is the addition of gemini.
216
+
217
+ # import streamlit as st
218
+ # import os
219
+ # from PIL import Image
220
+ # import numpy as np
221
+ # from io import BytesIO
222
+ # from dotenv import load_dotenv
223
+ # from geopy.geocoders import Nominatim
224
+ # from tensorflow.keras.applications import MobileNetV2
225
+ # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input
226
+ # import requests
227
+ # import google.generativeai as genai
228
+
229
+ # # Load environment variables
230
+ # load_dotenv()
231
+
232
+ # # Configure Generative AI
233
+ # genai.configure(api_key='AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM')
234
+
235
+ # # Load MobileNetV2 pre-trained model
236
+ # mobilenet_model = MobileNetV2(weights="imagenet")
237
+
238
+ # # Function to classify the uploaded image using MobileNetV2
239
+ # def classify_image_with_mobilenet(image):
240
+ # try:
241
+ # img = image.resize((224, 224))
242
+ # img_array = np.array(img)
243
+ # img_array = np.expand_dims(img_array, axis=0)
244
+ # img_array = preprocess_input(img_array)
245
+ # predictions = mobilenet_model.predict(img_array)
246
+ # labels = decode_predictions(predictions, top=5)[0]
247
+ # return {label[1]: float(label[2]) for label in labels}
248
+ # except Exception as e:
249
+ # st.error(f"Error during image classification: {e}")
250
+ # return {}
251
+
252
+ # # Function to get user's location
253
+ # def get_user_location():
254
+ # try:
255
+ # ip_info = requests.get("https://ipinfo.io/json").json()
256
+ # location = ip_info.get("loc", "").split(",")
257
+ # latitude = location[0] if len(location) > 0 else None
258
+ # longitude = location[1] if len(location) > 1 else None
259
+
260
+ # if latitude and longitude:
261
+ # geolocator = Nominatim(user_agent="binsight")
262
+ # address = geolocator.reverse(f"{latitude}, {longitude}").address
263
+ # return latitude, longitude, address
264
+ # return None, None, None
265
+ # except Exception as e:
266
+ # st.error(f"Unable to get location: {e}")
267
+ # return None, None, None
268
+
269
+ # # Function to get nearest municipal details with contact info
270
+ # def get_nearest_municipal_details(latitude, longitude):
271
+ # try:
272
+ # if latitude and longitude:
273
+ # # Simulating municipal service retrieval
274
+ # municipal_services = [
275
+ # {"latitude": "12.9716", "longitude": "77.5946", "office": "Bangalore Municipal Office", "phone": "+91-80-12345678"},
276
+ # {"latitude": "28.7041", "longitude": "77.1025", "office": "Delhi Municipal Office", "phone": "+91-11-98765432"},
277
+ # {"latitude": "19.0760", "longitude": "72.8777", "office": "Mumbai Municipal Office", "phone": "+91-22-22334455"},
278
+ # ]
279
+
280
+ # # Find the nearest municipal service (mock logic: matching first two decimal points)
281
+ # for service in municipal_services:
282
+ # if str(latitude).startswith(service["latitude"][:5]) and str(longitude).startswith(service["longitude"][:5]):
283
+ # return f"""
284
+ # **Office**: {service['office']}
285
+ # **Phone**: {service['phone']}
286
+ # """
287
+ # return "No nearby municipal office found. Please check manually."
288
+ # else:
289
+ # return "Location not available. Unable to fetch municipal details."
290
+ # except Exception as e:
291
+ # st.error(f"Unable to fetch municipal details: {e}")
292
+ # return None
293
+
294
+ # # Function to interact with Generative AI
295
+ # def get_genai_response(classification_results, location):
296
+ # try:
297
+ # classification_summary = "\n".join([f"{label}: {score:.2f}" for label, score in classification_results.items()])
298
+ # location_summary = f"""
299
+ # Latitude: {location[0] if location[0] else 'N/A'}
300
+ # Longitude: {location[1] if location[1] else 'N/A'}
301
+ # Address: {location[2] if location[2] else 'N/A'}
302
+ # """
303
+ # prompt = f"""
304
+ # ### You are an environmental expert. Analyze the following:
305
+ # 1. **Image Classification**:
306
+ # - {classification_summary}
307
+ # 2. **Location**:
308
+ # - {location_summary}
309
+
310
+ # ### Output Required:
311
+ # 1. Detailed insights about the waste detected in the image.
312
+ # 2. Specific health risks associated with the detected waste type.
313
+ # 3. Precautions to mitigate these health risks.
314
+ # 4. Recommendations for proper disposal.
315
+ # """
316
+ # model = genai.GenerativeModel('gemini-pro')
317
+ # response = model.generate_content(prompt)
318
+ # return response
319
+ # except Exception as e:
320
+ # st.error(f"Error using Generative AI: {e}")
321
+ # return None
322
+
323
+ # # Function to display Generative AI response
324
+ # def display_genai_response(response):
325
+ # st.subheader("Detailed Analysis and Recommendations")
326
+ # if response and response.candidates:
327
+ # response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else ""
328
+ # st.write(response_content)
329
+ # else:
330
+ # st.write("No response received from Generative AI or quota exceeded.")
331
+
332
+ # # Streamlit App
333
+ # st.title("BinSight: AI-Powered Dustbin and Waste Analysis System")
334
+ # st.text("Upload a dustbin image and get AI-powered analysis of the waste and associated health recommendations.")
335
+
336
+ # uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"], help="Upload a clear image of a dustbin for analysis.")
337
+ # submit_button = st.button("Analyze Dustbin")
338
+
339
+ # if submit_button:
340
+ # if uploaded_file is not None:
341
+ # image = Image.open(uploaded_file)
342
+ # st.image(image, caption="Uploaded Image", use_container_width =True)
343
+
344
+ # # Classify the image using MobileNetV2
345
+ # st.subheader("Image Classification")
346
+ # classification_results = classify_image_with_mobilenet(image)
347
+ # for label, score in classification_results.items():
348
+ # st.write(f"- **{label}**: {score:.2f}")
349
+
350
+ # # Get user location
351
+ # location = get_user_location()
352
+ # latitude, longitude, address = location
353
+
354
+ # st.subheader("User Location")
355
+ # st.write(f"Latitude: {latitude if latitude else 'N/A'}")
356
+ # st.write(f"Longitude: {longitude if longitude else 'N/A'}")
357
+ # st.write(f"Address: {address if address else 'N/A'}")
358
+
359
+ # # Get nearest municipal details with contact info
360
+ # st.subheader("Nearest Municipal Details")
361
+ # municipal_details = get_nearest_municipal_details(latitude, longitude)
362
+ # st.write(municipal_details)
363
+
364
+ # # Generate detailed analysis with Generative AI
365
+ # if classification_results:
366
+ # response = get_genai_response(classification_results, location)
367
+ # display_genai_response(response)
368
+ # else:
369
+ # st.write("Please upload an image for analysis.")
370
+
371
+
372
+
373
+
374
+
375
+
376
+
377
+
378
+
379
+
380
+
381
+ # # import streamlit as st
382
+ # # import os
383
+ # # from PIL import Image
384
+ # # import numpy as np
385
+ # # from io import BytesIO
386
+ # # from dotenv import load_dotenv
387
+ # # from geopy.geocoders import Nominatim
388
+ # # from tensorflow.keras.applications import MobileNetV2
389
+ # # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input
390
+ # # import requests
391
+ # # import google.generativeai as genai
392
+
393
+ # # # Load environment variables
394
+ # # load_dotenv()
395
+
396
+ # # # Configure Generative AI
397
+ # # genai.configure(api_key='AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM')
398
+
399
+ # # # Load MobileNetV2 pre-trained model
400
+ # # mobilenet_model = MobileNetV2(weights="imagenet")
401
+
402
+ # # # Function to classify the uploaded image using MobileNetV2
403
+ # # def classify_image_with_mobilenet(image):
404
+ # # try:
405
+ # # # Resize the image to the input size of MobileNetV2
406
+ # # img = image.resize((224, 224))
407
+ # # img_array = np.array(img)
408
+ # # img_array = np.expand_dims(img_array, axis=0)
409
+ # # img_array = preprocess_input(img_array)
410
+
411
+ # # # Predict using the MobileNetV2 model
412
+ # # predictions = mobilenet_model.predict(img_array)
413
+ # # labels = decode_predictions(predictions, top=5)[0]
414
+ # # return {label[1]: float(label[2]) for label in labels}
415
+ # # except Exception as e:
416
+ # # st.error(f"Error during image classification: {e}")
417
+ # # return {}
418
+
419
+ # # # Function to get user's location
420
+ # # def get_user_location():
421
+ # # try:
422
+ # # # Fetch location using the IPInfo API
423
+ # # ip_info = requests.get("https://ipinfo.io/json").json()
424
+ # # location = ip_info.get("loc", "").split(",")
425
+ # # latitude = location[0] if len(location) > 0 else None
426
+ # # longitude = location[1] if len(location) > 1 else None
427
+
428
+ # # if latitude and longitude:
429
+ # # geolocator = Nominatim(user_agent="binsight")
430
+ # # address = geolocator.reverse(f"{latitude}, {longitude}").address
431
+ # # return latitude, longitude, address
432
+ # # return None, None, None
433
+ # # except Exception as e:
434
+ # # st.error(f"Unable to get location: {e}")
435
+ # # return None, None, None
436
+
437
+ # # # Function to get nearest municipal details
438
+ # # def get_nearest_municipal_details(latitude, longitude):
439
+ # # try:
440
+ # # if latitude and longitude:
441
+ # # # Simulating municipal service retrieval
442
+ # # return f"The nearest municipal office is at ({latitude}, {longitude}). Please contact your local authority for waste management services."
443
+ # # else:
444
+ # # return "Location not available. Unable to fetch municipal details."
445
+ # # except Exception as e:
446
+ # # st.error(f"Unable to fetch municipal details: {e}")
447
+ # # return None
448
+
449
+ # # # Function to interact with Generative AI
450
+ # # def get_genai_response(classification_results, location):
451
+ # # try:
452
+ # # # Construct prompt for Generative AI
453
+ # # classification_summary = "\n".join([f"{label}: {score:.2f}" for label, score in classification_results.items()])
454
+ # # location_summary = f"""
455
+ # # Latitude: {location[0] if location[0] else 'N/A'}
456
+ # # Longitude: {location[1] if location[1] else 'N/A'}
457
+ # # Address: {location[2] if location[2] else 'N/A'}
458
+ # # """
459
+ # # prompt = f"""
460
+ # # ### You are an environmental expert. Analyze the following:
461
+ # # 1. **Image Classification**:
462
+ # # - {classification_summary}
463
+ # # 2. **Location**:
464
+ # # - {location_summary}
465
+
466
+ # # ### Output Required:
467
+ # # 1. Detailed insights about the waste detected in the image.
468
+ # # 2. Specific health risks associated with the detected waste type.
469
+ # # 3. Precautions to mitigate these health risks.
470
+ # # 4. Recommendations for proper disposal.
471
+ # # """
472
+
473
+ # # model = genai.GenerativeModel('gemini-pro')
474
+ # # response = model.generate_content(prompt)
475
+ # # return response
476
+ # # except Exception as e:
477
+ # # st.error(f"Error using Generative AI: {e}")
478
+ # # return None
479
+
480
+ # # # Function to display Generative AI response
481
+ # # def display_genai_response(response):
482
+ # # st.subheader("Detailed Analysis and Recommendations")
483
+ # # if response and response.candidates:
484
+ # # response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else ""
485
+ # # st.write(response_content)
486
+ # # else:
487
+ # # st.write("No response received from Generative AI or quota exceeded.")
488
+
489
+ # # # Streamlit App
490
+ # # st.title("BinSight: AI-Powered Dustbin and Waste Analysis System")
491
+ # # st.text("Upload a dustbin image and get AI-powered analysis of the waste and associated health recommendations.")
492
+
493
+ # # uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"], help="Upload a clear image of a dustbin for analysis.")
494
+ # # submit_button = st.button("Analyze Dustbin")
495
+
496
+ # # if submit_button:
497
+ # # if uploaded_file is not None:
498
+ # # image = Image.open(uploaded_file)
499
+ # # st.image(image, caption="Uploaded Image", use_column_width=True)
500
+
501
+ # # # Classify the image using MobileNetV2
502
+ # # st.subheader("Image Classification")
503
+ # # classification_results = classify_image_with_mobilenet(image)
504
+ # # for label, score in classification_results.items():
505
+ # # st.write(f"- **{label}**: {score:.2f}")
506
+
507
+ # # # Get user location
508
+ # # location = get_user_location()
509
+ # # latitude, longitude, address = location
510
+
511
+ # # st.subheader("User Location")
512
+ # # st.write(f"Latitude: {latitude if latitude else 'N/A'}")
513
+ # # st.write(f"Longitude: {longitude if longitude else 'N/A'}")
514
+ # # st.write(f"Address: {address if address else 'N/A'}")
515
+
516
+ # # # Get nearest municipal details
517
+ # # st.subheader("Nearest Municipal Details")
518
+ # # municipal_details = get_nearest_municipal_details(latitude, longitude)
519
+ # # st.write(municipal_details)
520
+
521
+ # # # Generate detailed analysis with Generative AI
522
+ # # if classification_results:
523
+ # # response = get_genai_response(classification_results, location)
524
+ # # display_genai_response(response)
525
+ # # else:
526
+ # # st.write("Please upload an image for analysis.")