saakshigupta commited on
Commit
4c9c5f0
Β·
verified Β·
1 Parent(s): 398e440

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +678 -25
app.py CHANGED
@@ -69,9 +69,6 @@ custom_instruction = st.sidebar.text_area(
69
  help="Add specific instructions for the LLM analysis"
70
  )
71
 
72
- # Debug section - try adding this to see if it appears
73
- st.write("Debug: Initial app setup complete")
74
-
75
  # About section in sidebar
76
  st.sidebar.markdown("---")
77
  st.sidebar.subheader("About")
@@ -91,12 +88,358 @@ The system looks for:
91
  - Blending problems
92
  """)
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  # ----- BLIP Image Captioning -----
95
 
96
- # Define custom prompts for original and GradCAM images - simplified to avoid errors
97
- ORIGINAL_IMAGE_PROMPT = "Generate a detailed description of this image with the following structure: Subject, Appearance, Pose, Background, Lighting, Colors, Notable Elements"
 
 
 
 
 
 
 
98
 
99
- GRADCAM_IMAGE_PROMPT = "Describe the GradCAM visualization overlay with the following structure: Main Focus Area, High Activation Regions, Medium Activation Regions, Low Activation Regions, Activation Pattern"
 
 
 
 
 
100
 
101
  # Function to load BLIP captioning model
102
  @st.cache_resource
@@ -110,8 +453,22 @@ def load_blip_model():
110
  st.error(f"Error loading BLIP model: {str(e)}")
111
  return None, None
112
 
113
- # Simplified function to generate image caption
114
- def generate_image_caption(image, processor, model, is_gradcam=False, max_length=75, num_beams=5):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  try:
116
  # Select the appropriate prompt based on image type
117
  prompt = GRADCAM_IMAGE_PROMPT if is_gradcam else ORIGINAL_IMAGE_PROMPT
@@ -140,11 +497,101 @@ def generate_image_caption(image, processor, model, is_gradcam=False, max_length
140
  st.error(f"Error generating caption: {str(e)}")
141
  return "Error generating caption"
142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  # Main app
144
  def main():
145
- # Debug - add this to see if main function is being called
146
- st.write("Debug: Main function started")
147
-
148
  # Create placeholders for model state
149
  if 'clip_model_loaded' not in st.session_state:
150
  st.session_state.clip_model_loaded = False
@@ -170,45 +617,251 @@ def main():
170
  with clip_col:
171
  if not st.session_state.clip_model_loaded:
172
  if st.button("πŸ“₯ Load CLIP Model for Detection", type="primary"):
173
- st.success("CLIP model button clicked")
174
- # For now, just set as loaded for testing UI
175
- st.session_state.clip_model_loaded = True
 
 
 
 
 
176
  else:
177
  st.success("βœ… CLIP model loaded and ready!")
178
 
179
  with llm_col:
180
  if not st.session_state.llm_model_loaded:
181
  if st.button("πŸ“₯ Load Vision LLM for Analysis", type="primary"):
182
- st.success("LLM model button clicked")
183
- # For now, just set as loaded for testing UI
184
- st.session_state.llm_model_loaded = True
 
 
 
 
 
 
185
  else:
186
  st.success("βœ… Vision LLM loaded and ready!")
187
 
188
  with blip_col:
189
  if not st.session_state.blip_model_loaded:
190
  if st.button("πŸ“₯ Load BLIP for Captioning", type="primary"):
191
- st.success("BLIP model button clicked")
192
- # For now, just set as loaded for testing UI
193
- st.session_state.blip_model_loaded = True
 
 
 
 
 
 
194
  else:
195
  st.success("βœ… BLIP captioning model loaded and ready!")
196
 
197
  # Image upload section
198
- with st.expander("Stage 2: Image Upload", expanded=True):
199
  st.subheader("Upload an Image")
200
  uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
201
 
202
  if uploaded_file is not None:
203
- st.success("Image uploaded successfully!")
204
  # Display the uploaded image
205
  try:
206
  image = Image.open(uploaded_file).convert("RGB")
207
  st.image(image, caption="Uploaded Image", use_column_width=True)
208
- st.session_state.current_image = image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  except Exception as e:
210
- st.error(f"Error loading image: {str(e)}")
211
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  # Footer
213
  st.markdown("---")
214
  st.caption("Advanced Deepfake Image Analyzer with Structured BLIP Captioning")
 
69
  help="Add specific instructions for the LLM analysis"
70
  )
71
 
 
 
 
72
  # About section in sidebar
73
  st.sidebar.markdown("---")
74
  st.sidebar.subheader("About")
 
88
  - Blending problems
89
  """)
90
 
91
+ # ----- GradCAM Implementation -----
92
+
93
+ class ImageDataset(torch.utils.data.Dataset):
94
+ def __init__(self, image, transform=None, face_only=True, dataset_name=None):
95
+ self.image = image
96
+ self.transform = transform
97
+ self.face_only = face_only
98
+ self.dataset_name = dataset_name
99
+ # Load face detector
100
+ self.face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
101
+
102
+ def __len__(self):
103
+ return 1 # Only one image
104
+
105
+ def detect_face(self, image_np):
106
+ """Detect face in image and return the face region"""
107
+ gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
108
+ faces = self.face_detector.detectMultiScale(gray, 1.1, 5)
109
+
110
+ # If no face is detected, use the whole image
111
+ if len(faces) == 0:
112
+ st.info("No face detected, using whole image for analysis")
113
+ h, w = image_np.shape[:2]
114
+ return (0, 0, w, h), image_np
115
+
116
+ # Get the largest face
117
+ if len(faces) > 1:
118
+ # Choose the largest face by area
119
+ areas = [w*h for (x, y, w, h) in faces]
120
+ largest_idx = np.argmax(areas)
121
+ x, y, w, h = faces[largest_idx]
122
+ else:
123
+ x, y, w, h = faces[0]
124
+
125
+ # Add padding around the face (5% on each side)
126
+ padding_x = int(w * 0.05)
127
+ padding_y = int(h * 0.05)
128
+
129
+ # Ensure padding doesn't go outside image bounds
130
+ x1 = max(0, x - padding_x)
131
+ y1 = max(0, y - padding_y)
132
+ x2 = min(image_np.shape[1], x + w + padding_x)
133
+ y2 = min(image_np.shape[0], y + h + padding_y)
134
+
135
+ # Extract the face region
136
+ face_img = image_np[y1:y2, x1:x2]
137
+
138
+ return (x1, y1, x2-x1, y2-y1), face_img
139
+
140
+ def __getitem__(self, idx):
141
+ image_np = np.array(self.image)
142
+ label = 0 # Default label; will be overridden by prediction
143
+
144
+ # Store original image for visualization
145
+ original_image = self.image.copy()
146
+
147
+ # Detect face if required
148
+ if self.face_only:
149
+ face_box, face_img_np = self.detect_face(image_np)
150
+ face_img = Image.fromarray(face_img_np)
151
+
152
+ # Apply transform to face image
153
+ if self.transform:
154
+ face_tensor = self.transform(face_img)
155
+ else:
156
+ face_tensor = transforms.ToTensor()(face_img)
157
+
158
+ return face_tensor, label, "uploaded_image", original_image, face_box, self.dataset_name
159
+ else:
160
+ # Process the whole image
161
+ if self.transform:
162
+ image_tensor = self.transform(self.image)
163
+ else:
164
+ image_tensor = transforms.ToTensor()(self.image)
165
+
166
+ return image_tensor, label, "uploaded_image", original_image, None, self.dataset_name
167
+
168
+ class GradCAM:
169
+ def __init__(self, model, target_layer):
170
+ self.model = model
171
+ self.target_layer = target_layer
172
+ self.gradients = None
173
+ self.activations = None
174
+ self._register_hooks()
175
+
176
+ def _register_hooks(self):
177
+ def forward_hook(module, input, output):
178
+ if isinstance(output, tuple):
179
+ self.activations = output[0]
180
+ else:
181
+ self.activations = output
182
+
183
+ def backward_hook(module, grad_in, grad_out):
184
+ if isinstance(grad_out, tuple):
185
+ self.gradients = grad_out[0]
186
+ else:
187
+ self.gradients = grad_out
188
+
189
+ layer = dict([*self.model.named_modules()])[self.target_layer]
190
+ layer.register_forward_hook(forward_hook)
191
+ layer.register_backward_hook(backward_hook)
192
+
193
+ def generate(self, input_tensor, class_idx):
194
+ self.model.zero_grad()
195
+
196
+ try:
197
+ # Use only the vision part of the model for gradient calculation
198
+ vision_outputs = self.model.vision_model(pixel_values=input_tensor)
199
+
200
+ # Get the pooler output
201
+ features = vision_outputs.pooler_output
202
+
203
+ # Create a dummy gradient for the feature based on the class idx
204
+ one_hot = torch.zeros_like(features)
205
+ one_hot[0, class_idx] = 1
206
+
207
+ # Manually backpropagate
208
+ features.backward(gradient=one_hot)
209
+
210
+ # Check for None values
211
+ if self.gradients is None or self.activations is None:
212
+ st.warning("Warning: Gradients or activations are None. Using fallback CAM.")
213
+ return np.ones((14, 14), dtype=np.float32) * 0.5
214
+
215
+ # Process gradients and activations for transformer-based model
216
+ gradients = self.gradients.cpu().detach().numpy()
217
+ activations = self.activations.cpu().detach().numpy()
218
+
219
+ if len(activations.shape) == 3: # [batch, sequence_length, hidden_dim]
220
+ seq_len = activations.shape[1]
221
+
222
+ # CLIP ViT typically has 196 patch tokens (14Γ—14) + 1 class token = 197
223
+ if seq_len >= 197:
224
+ # Skip the class token (first token) and reshape the patch tokens into a square
225
+ patch_tokens = activations[0, 1:197, :] # Remove the class token
226
+ # Take the mean across the hidden dimension
227
+ token_importance = np.mean(np.abs(patch_tokens), axis=1)
228
+ # Reshape to the expected grid size (14Γ—14 for CLIP ViT)
229
+ cam = token_importance.reshape(14, 14)
230
+ else:
231
+ # Try to find factors close to a square
232
+ side_len = int(np.sqrt(seq_len))
233
+ # Use the mean across features as importance
234
+ token_importance = np.mean(np.abs(activations[0]), axis=1)
235
+ # Create as square-like shape as possible
236
+ cam = np.zeros((side_len, side_len))
237
+ # Fill the cam with available values
238
+ flat_cam = cam.flatten()
239
+ flat_cam[:min(len(token_importance), len(flat_cam))] = token_importance[:min(len(token_importance), len(flat_cam))]
240
+ cam = flat_cam.reshape(side_len, side_len)
241
+ else:
242
+ # Fallback
243
+ st.info("Using fallback CAM shape (14x14)")
244
+ cam = np.ones((14, 14), dtype=np.float32) * 0.5 # Default fallback
245
+
246
+ # Ensure we have valid values
247
+ cam = np.maximum(cam, 0)
248
+ if np.max(cam) > 0:
249
+ cam = cam / np.max(cam)
250
+
251
+ return cam
252
+
253
+ except Exception as e:
254
+ st.error(f"Error in GradCAM.generate: {str(e)}")
255
+ return np.ones((14, 14), dtype=np.float32) * 0.5
256
+
257
+ def overlay_cam_on_image(image, cam, face_box=None, alpha=0.5):
258
+ """Overlay the CAM on the image"""
259
+ if face_box is not None:
260
+ x, y, w, h = face_box
261
+ # Create a mask for the entire image (all zeros initially)
262
+ img_np = np.array(image)
263
+ full_h, full_w = img_np.shape[:2]
264
+ full_cam = np.zeros((full_h, full_w), dtype=np.float32)
265
+
266
+ # Resize CAM to match face region
267
+ face_cam = cv2.resize(cam, (w, h))
268
+
269
+ # Copy the face CAM into the full image CAM at the face position
270
+ full_cam[y:y+h, x:x+w] = face_cam
271
+
272
+ # Convert full CAM to image
273
+ cam_resized = Image.fromarray((full_cam * 255).astype(np.uint8))
274
+ cam_colormap = plt.cm.jet(np.array(cam_resized) / 255.0)[:, :, :3] # Apply colormap
275
+ cam_colormap = (cam_colormap * 255).astype(np.uint8)
276
+ else:
277
+ # Resize CAM to match image dimensions
278
+ img_np = np.array(image)
279
+ h, w = img_np.shape[:2]
280
+ cam_resized = cv2.resize(cam, (w, h))
281
+
282
+ # Apply colormap
283
+ cam_colormap = plt.cm.jet(cam_resized)[:, :, :3] # Apply colormap
284
+ cam_colormap = (cam_colormap * 255).astype(np.uint8)
285
+
286
+ # Blend the original image with the colormap
287
+ img_np_float = img_np.astype(float) / 255.0
288
+ cam_colormap_float = cam_colormap.astype(float) / 255.0
289
+
290
+ blended = img_np_float * (1 - alpha) + cam_colormap_float * alpha
291
+ blended = (blended * 255).astype(np.uint8)
292
+
293
+ return Image.fromarray(blended)
294
+
295
+ def save_comparison(image, cam, overlay, face_box=None):
296
+ """Create a side-by-side comparison of the original, CAM, and overlay"""
297
+ fig, axes = plt.subplots(1, 3, figsize=(15, 5))
298
+
299
+ # Original Image
300
+ axes[0].imshow(image)
301
+ axes[0].set_title("Original")
302
+ if face_box is not None:
303
+ x, y, w, h = face_box
304
+ rect = plt.Rectangle((x, y), w, h, edgecolor='lime', linewidth=2, fill=False)
305
+ axes[0].add_patch(rect)
306
+ axes[0].axis("off")
307
+
308
+ # CAM
309
+ if face_box is not None:
310
+ # Create a full image CAM that highlights only the face
311
+ img_np = np.array(image)
312
+ h, w = img_np.shape[:2]
313
+ full_cam = np.zeros((h, w))
314
+
315
+ x, y, fw, fh = face_box
316
+ # Resize CAM to face size
317
+ face_cam = cv2.resize(cam, (fw, fh))
318
+ # Place it in the right position
319
+ full_cam[y:y+fh, x:x+fw] = face_cam
320
+ axes[1].imshow(full_cam, cmap="jet")
321
+ else:
322
+ cam_resized = cv2.resize(cam, (image.width, image.height))
323
+ axes[1].imshow(cam_resized, cmap="jet")
324
+ axes[1].set_title("CAM")
325
+ axes[1].axis("off")
326
+
327
+ # Overlay
328
+ axes[2].imshow(overlay)
329
+ axes[2].set_title("Overlay")
330
+ axes[2].axis("off")
331
+
332
+ plt.tight_layout()
333
+
334
+ # Convert plot to PIL Image for Streamlit display
335
+ buf = io.BytesIO()
336
+ plt.savefig(buf, format="png", bbox_inches="tight")
337
+ plt.close()
338
+ buf.seek(0)
339
+ return Image.open(buf)
340
+
341
+ # Function to load GradCAM CLIP model
342
+ @st.cache_resource
343
+ def load_clip_model():
344
+ with st.spinner("Loading CLIP model for GradCAM..."):
345
+ try:
346
+ model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
347
+
348
+ # Apply a simple classification head
349
+ model.classification_head = nn.Linear(1024, 2)
350
+ model.classification_head.weight.data.normal_(mean=0.0, std=0.02)
351
+ model.classification_head.bias.data.zero_()
352
+
353
+ model.eval()
354
+ return model
355
+ except Exception as e:
356
+ st.error(f"Error loading CLIP model: {str(e)}")
357
+ return None
358
+
359
+ def get_target_layer_clip(model):
360
+ """Get the target layer for GradCAM"""
361
+ return "vision_model.encoder.layers.23"
362
+
363
+ def process_image_with_gradcam(image, model, device, pred_class):
364
+ """Process an image with GradCAM"""
365
+ # Set up transformations
366
+ transform = transforms.Compose([
367
+ transforms.Resize((224, 224)),
368
+ transforms.ToTensor(),
369
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
370
+ ])
371
+
372
+ # Create dataset for the single image
373
+ dataset = ImageDataset(image, transform=transform, face_only=True)
374
+
375
+ # Custom collate function
376
+ def custom_collate(batch):
377
+ tensors = [item[0] for item in batch]
378
+ labels = [item[1] for item in batch]
379
+ paths = [item[2] for item in batch]
380
+ images = [item[3] for item in batch]
381
+ face_boxes = [item[4] for item in batch]
382
+ dataset_names = [item[5] for item in batch]
383
+
384
+ tensors = torch.stack(tensors)
385
+ labels = torch.tensor(labels)
386
+
387
+ return tensors, labels, paths, images, face_boxes, dataset_names
388
+
389
+ # Create dataloader
390
+ dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=custom_collate)
391
+
392
+ # Extract the batch
393
+ for batch in dataloader:
394
+ input_tensor, label, img_paths, original_images, face_boxes, dataset_names = batch
395
+ original_image = original_images[0]
396
+ face_box = face_boxes[0]
397
+
398
+ # Move tensors and model to device
399
+ input_tensor = input_tensor.to(device)
400
+ model = model.to(device)
401
+
402
+ try:
403
+ # Create GradCAM extractor
404
+ target_layer = get_target_layer_clip(model)
405
+ cam_extractor = GradCAM(model, target_layer)
406
+
407
+ # Generate CAM
408
+ cam = cam_extractor.generate(input_tensor, pred_class)
409
+
410
+ # Create visualizations
411
+ overlay = overlay_cam_on_image(original_image, cam, face_box)
412
+ comparison = save_comparison(original_image, cam, overlay, face_box)
413
+
414
+ # Return results
415
+ return cam, overlay, comparison, face_box
416
+
417
+ except Exception as e:
418
+ st.error(f"Error processing image with GradCAM: {str(e)}")
419
+ # Return default values
420
+ default_cam = np.ones((14, 14), dtype=np.float32) * 0.5
421
+ overlay = overlay_cam_on_image(original_image, default_cam, face_box)
422
+ comparison = save_comparison(original_image, default_cam, overlay, face_box)
423
+ return default_cam, overlay, comparison, face_box
424
+
425
  # ----- BLIP Image Captioning -----
426
 
427
+ # Define custom prompts for original and GradCAM images
428
+ ORIGINAL_IMAGE_PROMPT = """Generate a detailed description of this image with the following structure:
429
+ Subject: [Describe the person/main subject]
430
+ Appearance: [Describe clothing, hair, facial features]
431
+ Pose: [Describe the person's pose and expression]
432
+ Background: [Describe the environment and setting]
433
+ Lighting: [Describe lighting conditions and shadows]
434
+ Colors: [Note dominant colors and color palette]
435
+ Notable Elements: [Any distinctive objects or visual elements]"""
436
 
437
+ GRADCAM_IMAGE_PROMPT = """Describe the GradCAM visualization overlay with the following structure:
438
+ Main Focus Area: [Identify the primary region highlighted]
439
+ High Activation Regions: [Describe red/yellow areas and corresponding image features]
440
+ Medium Activation Regions: [Describe green/cyan areas and corresponding image features]
441
+ Low Activation Regions: [Describe blue/dark blue areas and corresponding image features]
442
+ Activation Pattern: [Describe the overall pattern of the heatmap]"""
443
 
444
  # Function to load BLIP captioning model
445
  @st.cache_resource
 
453
  st.error(f"Error loading BLIP model: {str(e)}")
454
  return None, None
455
 
456
+ # Function to generate image caption
457
+ def generate_image_caption(image, processor, model, is_gradcam=False, max_length=100, num_beams=5):
458
+ """
459
+ Generate a caption for the input image using BLIP model
460
+
461
+ Args:
462
+ image (PIL.Image): Input image
463
+ processor: BLIP processor
464
+ model: BLIP model
465
+ is_gradcam (bool): Whether the image is a GradCAM visualization
466
+ max_length (int): Maximum length of the caption
467
+ num_beams (int): Number of beams for beam search
468
+
469
+ Returns:
470
+ str: Generated caption
471
+ """
472
  try:
473
  # Select the appropriate prompt based on image type
474
  prompt = GRADCAM_IMAGE_PROMPT if is_gradcam else ORIGINAL_IMAGE_PROMPT
 
497
  st.error(f"Error generating caption: {str(e)}")
498
  return "Error generating caption"
499
 
500
+ # ----- Fine-tuned Vision LLM -----
501
+
502
+ # Function to fix cross-attention masks
503
+ def fix_cross_attention_mask(inputs):
504
+ if 'cross_attention_mask' in inputs and 0 in inputs['cross_attention_mask'].shape:
505
+ batch_size, seq_len, _, num_tiles = inputs['cross_attention_mask'].shape
506
+ visual_features = 6404 # Critical dimension
507
+ new_mask = torch.ones((batch_size, seq_len, visual_features, num_tiles),
508
+ device=inputs['cross_attention_mask'].device)
509
+ inputs['cross_attention_mask'] = new_mask
510
+ st.success("Fixed cross-attention mask dimensions")
511
+ return inputs
512
+
513
+ # Load model function
514
+ @st.cache_resource
515
+ def load_llm_model():
516
+ with st.spinner("Loading LLM vision model... This may take a few minutes. Please be patient..."):
517
+ try:
518
+ # Check for GPU
519
+ has_gpu = check_gpu()
520
+
521
+ # Load base model and tokenizer using Unsloth
522
+ base_model_id = "unsloth/llama-3.2-11b-vision-instruct"
523
+ model, tokenizer = FastVisionModel.from_pretrained(
524
+ base_model_id,
525
+ load_in_4bit=True,
526
+ )
527
+
528
+ # Load the adapter
529
+ adapter_id = "saakshigupta/deepfake-explainer-1"
530
+ model = PeftModel.from_pretrained(model, adapter_id)
531
+
532
+ # Set to inference mode
533
+ FastVisionModel.for_inference(model)
534
+
535
+ return model, tokenizer
536
+ except Exception as e:
537
+ st.error(f"Error loading model: {str(e)}")
538
+ return None, None
539
+
540
+ # Analyze image function
541
+ def analyze_image_with_llm(image, gradcam_overlay, face_box, pred_label, confidence, question, model, tokenizer, temperature=0.7, max_tokens=500, custom_instruction=""):
542
+ # Create a prompt that includes GradCAM information
543
+ if custom_instruction.strip():
544
+ full_prompt = f"{question}\n\nThe image has been processed with GradCAM and classified as {pred_label} with confidence {confidence:.2f}. Focus on the highlighted regions in red/yellow which show the areas the detection model found suspicious.\n\n{custom_instruction}"
545
+ else:
546
+ full_prompt = f"{question}\n\nThe image has been processed with GradCAM and classified as {pred_label} with confidence {confidence:.2f}. Focus on the highlighted regions in red/yellow which show the areas the detection model found suspicious."
547
+
548
+ # Format the message to include both the original image and the GradCAM visualization
549
+ messages = [
550
+ {"role": "user", "content": [
551
+ {"type": "image", "image": image}, # Original image
552
+ {"type": "image", "image": gradcam_overlay}, # GradCAM overlay
553
+ {"type": "text", "text": full_prompt}
554
+ ]}
555
+ ]
556
+
557
+ # Apply chat template
558
+ input_text = tokenizer.apply_chat_template(messages, add_generation_prompt=True)
559
+
560
+ # Process with image
561
+ inputs = tokenizer(
562
+ [image, gradcam_overlay], # Send both images
563
+ input_text,
564
+ add_special_tokens=False,
565
+ return_tensors="pt",
566
+ ).to(model.device)
567
+
568
+ # Fix cross-attention mask if needed
569
+ inputs = fix_cross_attention_mask(inputs)
570
+
571
+ # Generate response
572
+ with st.spinner("Generating detailed analysis... (this may take 15-30 seconds)"):
573
+ with torch.no_grad():
574
+ output_ids = model.generate(
575
+ **inputs,
576
+ max_new_tokens=max_tokens,
577
+ use_cache=True,
578
+ temperature=temperature,
579
+ top_p=0.9
580
+ )
581
+
582
+ # Decode the output
583
+ response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
584
+
585
+ # Try to extract just the model's response (after the prompt)
586
+ if full_prompt in response:
587
+ result = response.split(full_prompt)[-1].strip()
588
+ else:
589
+ result = response
590
+
591
+ return result
592
+
593
  # Main app
594
  def main():
 
 
 
595
  # Create placeholders for model state
596
  if 'clip_model_loaded' not in st.session_state:
597
  st.session_state.clip_model_loaded = False
 
617
  with clip_col:
618
  if not st.session_state.clip_model_loaded:
619
  if st.button("πŸ“₯ Load CLIP Model for Detection", type="primary"):
620
+ # Load CLIP model
621
+ model = load_clip_model()
622
+ if model is not None:
623
+ st.session_state.clip_model = model
624
+ st.session_state.clip_model_loaded = True
625
+ st.success("βœ… CLIP model loaded successfully!")
626
+ else:
627
+ st.error("❌ Failed to load CLIP model.")
628
  else:
629
  st.success("βœ… CLIP model loaded and ready!")
630
 
631
  with llm_col:
632
  if not st.session_state.llm_model_loaded:
633
  if st.button("πŸ“₯ Load Vision LLM for Analysis", type="primary"):
634
+ # Load LLM model
635
+ model, tokenizer = load_llm_model()
636
+ if model is not None and tokenizer is not None:
637
+ st.session_state.llm_model = model
638
+ st.session_state.tokenizer = tokenizer
639
+ st.session_state.llm_model_loaded = True
640
+ st.success("βœ… Vision LLM loaded successfully!")
641
+ else:
642
+ st.error("❌ Failed to load Vision LLM.")
643
  else:
644
  st.success("βœ… Vision LLM loaded and ready!")
645
 
646
  with blip_col:
647
  if not st.session_state.blip_model_loaded:
648
  if st.button("πŸ“₯ Load BLIP for Captioning", type="primary"):
649
+ # Load BLIP model
650
+ processor, model = load_blip_model()
651
+ if model is not None and processor is not None:
652
+ st.session_state.blip_processor = processor
653
+ st.session_state.blip_model = model
654
+ st.session_state.blip_model_loaded = True
655
+ st.success("βœ… BLIP captioning model loaded successfully!")
656
+ else:
657
+ st.error("❌ Failed to load BLIP model.")
658
  else:
659
  st.success("βœ… BLIP captioning model loaded and ready!")
660
 
661
  # Image upload section
662
+ with st.expander("Stage 2: Image Upload & Initial Detection", expanded=True):
663
  st.subheader("Upload an Image")
664
  uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
665
 
666
  if uploaded_file is not None:
 
667
  # Display the uploaded image
668
  try:
669
  image = Image.open(uploaded_file).convert("RGB")
670
  st.image(image, caption="Uploaded Image", use_column_width=True)
671
+
672
+ # Generate detailed caption for original image if BLIP model is loaded
673
+ if st.session_state.blip_model_loaded:
674
+ with st.spinner("Generating detailed image description..."):
675
+ caption = generate_image_caption(
676
+ image,
677
+ st.session_state.blip_processor,
678
+ st.session_state.blip_model,
679
+ is_gradcam=False
680
+ )
681
+ st.session_state.image_caption = caption
682
+ st.success(f"πŸ“ Image Description Generated")
683
+
684
+ # Format the caption nicely
685
+ st.markdown("### Image Description:")
686
+ st.markdown(caption)
687
+
688
+ # Detect with CLIP model if loaded
689
+ if st.session_state.clip_model_loaded:
690
+ with st.spinner("Analyzing image with CLIP model..."):
691
+ # Preprocess image for CLIP
692
+ transform = transforms.Compose([
693
+ transforms.Resize((224, 224)),
694
+ transforms.ToTensor(),
695
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
696
+ ])
697
+
698
+ # Create a simple dataset for the image
699
+ dataset = ImageDataset(image, transform=transform, face_only=True)
700
+ tensor, _, _, _, face_box, _ = dataset[0]
701
+ tensor = tensor.unsqueeze(0)
702
+
703
+ # Get device
704
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
705
+
706
+ # Move model and tensor to device
707
+ model = st.session_state.clip_model.to(device)
708
+ tensor = tensor.to(device)
709
+
710
+ # Forward pass
711
+ with torch.no_grad():
712
+ outputs = model.vision_model(pixel_values=tensor).pooler_output
713
+ logits = model.classification_head(outputs)
714
+ probs = torch.softmax(logits, dim=1)[0]
715
+ pred_class = torch.argmax(probs).item()
716
+ confidence = probs[pred_class].item()
717
+ pred_label = "Fake" if pred_class == 1 else "Real"
718
+
719
+ # Display results
720
+ result_col1, result_col2 = st.columns(2)
721
+ with result_col1:
722
+ st.metric("Prediction", pred_label)
723
+ with result_col2:
724
+ st.metric("Confidence", f"{confidence:.2%}")
725
+
726
+ # GradCAM visualization
727
+ st.subheader("GradCAM Visualization")
728
+ cam, overlay, comparison, detected_face_box = process_image_with_gradcam(
729
+ image, model, device, pred_class
730
+ )
731
+
732
+ # Display GradCAM results
733
+ st.image(comparison, caption="Original | CAM | Overlay", use_column_width=True)
734
+
735
+ # Generate caption for GradCAM overlay image if BLIP model is loaded
736
+ if st.session_state.blip_model_loaded:
737
+ with st.spinner("Analyzing GradCAM visualization..."):
738
+ gradcam_caption = generate_image_caption(
739
+ overlay,
740
+ st.session_state.blip_processor,
741
+ st.session_state.blip_model,
742
+ is_gradcam=True,
743
+ max_length=150 # Longer for detailed analysis
744
+ )
745
+ st.session_state.gradcam_caption = gradcam_caption
746
+ st.success("βœ… GradCAM analysis complete")
747
+
748
+ # Format the GradCAM caption nicely
749
+ st.markdown("### GradCAM Analysis:")
750
+ st.markdown(gradcam_caption)
751
+
752
+ # Save results in session state for LLM analysis
753
+ st.session_state.current_image = image
754
+ st.session_state.current_overlay = overlay
755
+ st.session_state.current_face_box = detected_face_box
756
+ st.session_state.current_pred_label = pred_label
757
+ st.session_state.current_confidence = confidence
758
+
759
+ st.success("βœ… Initial detection and GradCAM visualization complete!")
760
+ else:
761
+ st.warning("⚠️ Please load the CLIP model first to perform initial detection.")
762
  except Exception as e:
763
+ st.error(f"Error processing image: {str(e)}")
764
+
765
+ # LLM Analysis section
766
+ with st.expander("Stage 3: Detailed Analysis with Vision LLM", expanded=False):
767
+ if hasattr(st.session_state, 'current_image') and st.session_state.llm_model_loaded:
768
+ st.subheader("Detailed Deepfake Analysis")
769
+
770
+ # Include both captions in the prompt if available
771
+ caption_text = ""
772
+ if hasattr(st.session_state, 'image_caption'):
773
+ caption_text += f"\n\nImage Description:\n{st.session_state.image_caption}"
774
+
775
+ if hasattr(st.session_state, 'gradcam_caption'):
776
+ caption_text += f"\n\nGradCAM Analysis:\n{st.session_state.gradcam_caption}"
777
+
778
+ # Default question with option to customize
779
+ default_question = f"This image has been classified as {st.session_state.current_pred_label}.{caption_text} Analyze the key features that led to this classification, focusing on the highlighted areas in the GradCAM visualization. Provide both a technical explanation for experts and a simple explanation for non-technical users."
780
+ question = st.text_area("Question/Prompt:", value=default_question, height=100)
781
+
782
+ # Analyze button
783
+ if st.button("πŸ” Perform Detailed Analysis", type="primary"):
784
+ try:
785
+ result = analyze_image_with_llm(
786
+ st.session_state.current_image,
787
+ st.session_state.current_overlay,
788
+ st.session_state.current_face_box,
789
+ st.session_state.current_pred_label,
790
+ st.session_state.current_confidence,
791
+ question,
792
+ st.session_state.llm_model,
793
+ st.session_state.tokenizer,
794
+ temperature=temperature,
795
+ max_tokens=max_tokens,
796
+ custom_instruction=custom_instruction
797
+ )
798
+
799
+ # Display results
800
+ st.success("βœ… Analysis complete!")
801
+
802
+ # Check if the result contains both technical and non-technical explanations
803
+ if "Technical" in result and "Non-Technical" in result:
804
+ try:
805
+ # Split the result into technical and non-technical sections
806
+ parts = result.split("Non-Technical")
807
+ technical = parts[0]
808
+ non_technical = "Non-Technical" + parts[1]
809
+
810
+ # Display in two columns
811
+ col1, col2 = st.columns(2)
812
+ with col1:
813
+ st.subheader("Technical Analysis")
814
+ st.markdown(technical)
815
+
816
+ with col2:
817
+ st.subheader("Simple Explanation")
818
+ st.markdown(non_technical)
819
+ except Exception as e:
820
+ # Fallback if splitting fails
821
+ st.subheader("Analysis Result")
822
+ st.markdown(result)
823
+ else:
824
+ # Just display the whole result
825
+ st.subheader("Analysis Result")
826
+ st.markdown(result)
827
+ except Exception as e:
828
+ st.error(f"Error during LLM analysis: {str(e)}")
829
+
830
+ elif not hasattr(st.session_state, 'current_image'):
831
+ st.warning("⚠️ Please upload an image and complete the initial detection first.")
832
+ else:
833
+ st.warning("⚠️ Please load the Vision LLM to perform detailed analysis.")
834
+
835
+ # Summary section with caption
836
+ if hasattr(st.session_state, 'current_image') and (hasattr(st.session_state, 'image_caption') or hasattr(st.session_state, 'gradcam_caption')):
837
+ with st.expander("Image Analysis Summary", expanded=True):
838
+ st.subheader("Generated Descriptions and Analysis")
839
+
840
+ # Display image, captions, and results in organized layout
841
+ col1, col2 = st.columns([1, 2])
842
+
843
+ with col1:
844
+ # Display original image and overlay side by side
845
+ st.image(st.session_state.current_image, caption="Original Image", use_column_width=True)
846
+ if hasattr(st.session_state, 'current_overlay'):
847
+ st.image(st.session_state.current_overlay, caption="GradCAM Overlay", use_column_width=True)
848
+
849
+ with col2:
850
+ # Detection result
851
+ if hasattr(st.session_state, 'current_pred_label'):
852
+ st.markdown(f"### Detection Result:")
853
+ st.markdown(f"Classification: **{st.session_state.current_pred_label}** (Confidence: {st.session_state.current_confidence:.2%})")
854
+
855
+ # Image description
856
+ if hasattr(st.session_state, 'image_caption'):
857
+ st.markdown("### Image Description:")
858
+ st.markdown(st.session_state.image_caption)
859
+
860
+ # GradCAM analysis
861
+ if hasattr(st.session_state, 'gradcam_caption'):
862
+ st.markdown("### GradCAM Analysis:")
863
+ st.markdown(st.session_state.gradcam_caption)
864
+
865
  # Footer
866
  st.markdown("---")
867
  st.caption("Advanced Deepfake Image Analyzer with Structured BLIP Captioning")