saakshigupta commited on
Commit
d9f8f2f
·
verified ·
1 Parent(s): 0546177

Upload app-9.py

Browse files
Files changed (1) hide show
  1. app-9.py +936 -0
app-9.py ADDED
@@ -0,0 +1,936 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.utils.data import DataLoader
5
+ from torchvision import transforms
6
+ from transformers import CLIPModel, BlipProcessor, BlipForConditionalGeneration
7
+ from transformers.models.clip import CLIPModel
8
+ from PIL import Image
9
+ import numpy as np
10
+ import io
11
+ import base64
12
+ import cv2
13
+ import matplotlib.pyplot as plt
14
+ from peft import PeftModel
15
+ from unsloth import FastVisionModel
16
+ import os
17
+ import tempfile
18
+ import warnings
19
+ warnings.filterwarnings("ignore", category=UserWarning)
20
+
21
+ # App title and description
22
+ st.set_page_config(
23
+ page_title="Deepfake Analyzer",
24
+ layout="wide",
25
+ page_icon="🔍"
26
+ )
27
+
28
+ # Main title and description
29
+ st.title("Deepfake Image Analyser")
30
+ st.markdown("Analyse images for deepfake manipulation")
31
+
32
+ # Check for GPU availability
33
+ def check_gpu():
34
+ if torch.cuda.is_available():
35
+ gpu_info = torch.cuda.get_device_properties(0)
36
+ st.sidebar.success(f"✅ GPU available: {gpu_info.name} ({gpu_info.total_memory / (1024**3):.2f} GB)")
37
+ return True
38
+ else:
39
+ st.sidebar.warning("⚠️ No GPU detected. Analysis will be slower.")
40
+ return False
41
+
42
+ # Sidebar components
43
+ st.sidebar.title("About")
44
+ st.sidebar.markdown("""
45
+ This tool detects deepfakes using four AI models:
46
+ - **CLIP**: Initial Real/Fake classification
47
+ - **GradCAM**: Highlights suspicious regions
48
+ - **BLIP**: Describes image content
49
+ - **Llama 3.2**: Explains potential manipulations
50
+
51
+ ### Quick Start
52
+ 1. **Load Models** - Start with CLIP, add others as needed
53
+ 2. **Upload Image** - View classification and heat map
54
+ 3. **Analyze** - Get explanations and ask questions
55
+
56
+ *GPU recommended for better performance*
57
+ """)
58
+
59
+ # Fixed values for temperature and max tokens
60
+ temperature = 0.7
61
+ max_tokens = 500
62
+
63
+ # Custom instruction text area in sidebar
64
+ use_custom_instructions = st.sidebar.toggle("Enable Custom Instructions", value=False, help="Toggle to enable/disable custom instructions")
65
+
66
+ if use_custom_instructions:
67
+ custom_instruction = st.sidebar.text_area(
68
+ "Custom Instructions (Advanced)",
69
+ value="Specify your preferred style of explanation (e.g., 'Provide technical, detailed explanations' or 'Use simple, non-technical language'). You can also specify what aspects of the image to focus on.",
70
+ help="Add specific instructions for the analysis"
71
+ )
72
+ else:
73
+ custom_instruction = ""
74
+
75
+ # ----- GradCAM Implementation -----
76
+
77
+ class ImageDataset(torch.utils.data.Dataset):
78
+ def __init__(self, image, transform=None, face_only=True, dataset_name=None):
79
+ self.image = image
80
+ self.transform = transform
81
+ self.face_only = face_only
82
+ self.dataset_name = dataset_name
83
+ # Load face detector
84
+ self.face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
85
+
86
+ def __len__(self):
87
+ return 1 # Only one image
88
+
89
+ def detect_face(self, image_np):
90
+ """Detect face in image and return the face region"""
91
+ gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
92
+ faces = self.face_detector.detectMultiScale(gray, 1.1, 5)
93
+
94
+ # If no face is detected, use the whole image
95
+ if len(faces) == 0:
96
+ st.info("No face detected, using whole image for analysis")
97
+ h, w = image_np.shape[:2]
98
+ return (0, 0, w, h), image_np
99
+
100
+ # Get the largest face
101
+ if len(faces) > 1:
102
+ # Choose the largest face by area
103
+ areas = [w*h for (x, y, w, h) in faces]
104
+ largest_idx = np.argmax(areas)
105
+ x, y, w, h = faces[largest_idx]
106
+ else:
107
+ x, y, w, h = faces[0]
108
+
109
+ # Add padding around the face (5% on each side)
110
+ padding_x = int(w * 0.05)
111
+ padding_y = int(h * 0.05)
112
+
113
+ # Ensure padding doesn't go outside image bounds
114
+ x1 = max(0, x - padding_x)
115
+ y1 = max(0, y - padding_y)
116
+ x2 = min(image_np.shape[1], x + w + padding_x)
117
+ y2 = min(image_np.shape[0], y + h + padding_y)
118
+
119
+ # Extract the face region
120
+ face_img = image_np[y1:y2, x1:x2]
121
+
122
+ return (x1, y1, x2-x1, y2-y1), face_img
123
+
124
+ def __getitem__(self, idx):
125
+ image_np = np.array(self.image)
126
+ label = 0 # Default label; will be overridden by prediction
127
+
128
+ # Store original image for visualization
129
+ original_image = self.image.copy()
130
+
131
+ # Detect face if required
132
+ if self.face_only:
133
+ face_box, face_img_np = self.detect_face(image_np)
134
+ face_img = Image.fromarray(face_img_np)
135
+
136
+ # Apply transform to face image
137
+ if self.transform:
138
+ face_tensor = self.transform(face_img)
139
+ else:
140
+ face_tensor = transforms.ToTensor()(face_img)
141
+
142
+ return face_tensor, label, "uploaded_image", original_image, face_box, self.dataset_name
143
+ else:
144
+ # Process the whole image
145
+ if self.transform:
146
+ image_tensor = self.transform(self.image)
147
+ else:
148
+ image_tensor = transforms.ToTensor()(self.image)
149
+
150
+ return image_tensor, label, "uploaded_image", original_image, None, self.dataset_name
151
+
152
+ class GradCAM:
153
+ def __init__(self, model, target_layer):
154
+ self.model = model
155
+ self.target_layer = target_layer
156
+ self.gradients = None
157
+ self.activations = None
158
+ self._register_hooks()
159
+
160
+ def _register_hooks(self):
161
+ def forward_hook(module, input, output):
162
+ if isinstance(output, tuple):
163
+ self.activations = output[0]
164
+ else:
165
+ self.activations = output
166
+
167
+ def backward_hook(module, grad_in, grad_out):
168
+ if isinstance(grad_out, tuple):
169
+ self.gradients = grad_out[0]
170
+ else:
171
+ self.gradients = grad_out
172
+
173
+ layer = dict([*self.model.named_modules()])[self.target_layer]
174
+ layer.register_forward_hook(forward_hook)
175
+ layer.register_backward_hook(backward_hook)
176
+
177
+ def generate(self, input_tensor, class_idx):
178
+ self.model.zero_grad()
179
+
180
+ try:
181
+ # Use only the vision part of the model for gradient calculation
182
+ vision_outputs = self.model.vision_model(pixel_values=input_tensor)
183
+
184
+ # Get the pooler output
185
+ features = vision_outputs.pooler_output
186
+
187
+ # Create a dummy gradient for the feature based on the class idx
188
+ one_hot = torch.zeros_like(features)
189
+ one_hot[0, class_idx] = 1
190
+
191
+ # Manually backpropagate
192
+ features.backward(gradient=one_hot)
193
+
194
+ # Check for None values
195
+ if self.gradients is None or self.activations is None:
196
+ st.warning("Warning: Gradients or activations are None. Using fallback CAM.")
197
+ return np.ones((14, 14), dtype=np.float32) * 0.5
198
+
199
+ # Process gradients and activations for transformer-based model
200
+ gradients = self.gradients.cpu().detach().numpy()
201
+ activations = self.activations.cpu().detach().numpy()
202
+
203
+ if len(activations.shape) == 3: # [batch, sequence_length, hidden_dim]
204
+ seq_len = activations.shape[1]
205
+
206
+ # CLIP ViT typically has 196 patch tokens (14×14) + 1 class token = 197
207
+ if seq_len >= 197:
208
+ # Skip the class token (first token) and reshape the patch tokens into a square
209
+ patch_tokens = activations[0, 1:197, :] # Remove the class token
210
+ # Take the mean across the hidden dimension
211
+ token_importance = np.mean(np.abs(patch_tokens), axis=1)
212
+ # Reshape to the expected grid size (14×14 for CLIP ViT)
213
+ cam = token_importance.reshape(14, 14)
214
+ else:
215
+ # Try to find factors close to a square
216
+ side_len = int(np.sqrt(seq_len))
217
+ # Use the mean across features as importance
218
+ token_importance = np.mean(np.abs(activations[0]), axis=1)
219
+ # Create as square-like shape as possible
220
+ cam = np.zeros((side_len, side_len))
221
+ # Fill the cam with available values
222
+ flat_cam = cam.flatten()
223
+ flat_cam[:min(len(token_importance), len(flat_cam))] = token_importance[:min(len(token_importance), len(flat_cam))]
224
+ cam = flat_cam.reshape(side_len, side_len)
225
+ else:
226
+ # Fallback
227
+ st.info("Using fallback CAM shape (14x14)")
228
+ cam = np.ones((14, 14), dtype=np.float32) * 0.5 # Default fallback
229
+
230
+ # Ensure we have valid values
231
+ cam = np.maximum(cam, 0)
232
+ if np.max(cam) > 0:
233
+ cam = cam / np.max(cam)
234
+
235
+ return cam
236
+
237
+ except Exception as e:
238
+ st.error(f"Error in GradCAM.generate: {str(e)}")
239
+ return np.ones((14, 14), dtype=np.float32) * 0.5
240
+
241
+ def overlay_cam_on_image(image, cam, face_box=None, alpha=0.5):
242
+ """Overlay the CAM on the image"""
243
+ if face_box is not None:
244
+ x, y, w, h = face_box
245
+ # Create a mask for the entire image (all zeros initially)
246
+ img_np = np.array(image)
247
+ full_h, full_w = img_np.shape[:2]
248
+ full_cam = np.zeros((full_h, full_w), dtype=np.float32)
249
+
250
+ # Resize CAM to match face region
251
+ face_cam = cv2.resize(cam, (w, h))
252
+
253
+ # Copy the face CAM into the full image CAM at the face position
254
+ full_cam[y:y+h, x:x+w] = face_cam
255
+
256
+ # Convert full CAM to image
257
+ cam_resized = Image.fromarray((full_cam * 255).astype(np.uint8))
258
+ cam_colormap = plt.cm.jet(np.array(cam_resized) / 255.0)[:, :, :3] # Apply colormap
259
+ cam_colormap = (cam_colormap * 255).astype(np.uint8)
260
+ else:
261
+ # Resize CAM to match image dimensions
262
+ img_np = np.array(image)
263
+ h, w = img_np.shape[:2]
264
+ cam_resized = cv2.resize(cam, (w, h))
265
+
266
+ # Apply colormap
267
+ cam_colormap = plt.cm.jet(cam_resized)[:, :, :3] # Apply colormap
268
+ cam_colormap = (cam_colormap * 255).astype(np.uint8)
269
+
270
+ # Blend the original image with the colormap
271
+ img_np_float = img_np.astype(float) / 255.0
272
+ cam_colormap_float = cam_colormap.astype(float) / 255.0
273
+
274
+ blended = img_np_float * (1 - alpha) + cam_colormap_float * alpha
275
+ blended = (blended * 255).astype(np.uint8)
276
+
277
+ return Image.fromarray(blended)
278
+
279
+ def save_comparison(image, cam, overlay, face_box=None):
280
+ """Create a side-by-side comparison of the original, CAM, and overlay"""
281
+ fig, axes = plt.subplots(1, 3, figsize=(15, 5))
282
+
283
+ # Original Image
284
+ axes[0].imshow(image)
285
+ axes[0].set_title("Original")
286
+ if face_box is not None:
287
+ x, y, w, h = face_box
288
+ rect = plt.Rectangle((x, y), w, h, edgecolor='lime', linewidth=2, fill=False)
289
+ axes[0].add_patch(rect)
290
+ axes[0].axis("off")
291
+
292
+ # CAM
293
+ if face_box is not None:
294
+ # Create a full image CAM that highlights only the face
295
+ img_np = np.array(image)
296
+ h, w = img_np.shape[:2]
297
+ full_cam = np.zeros((h, w))
298
+
299
+ x, y, fw, fh = face_box
300
+ # Resize CAM to face size
301
+ face_cam = cv2.resize(cam, (fw, fh))
302
+ # Place it in the right position
303
+ full_cam[y:y+fh, x:x+fw] = face_cam
304
+ axes[1].imshow(full_cam, cmap="jet")
305
+ else:
306
+ cam_resized = cv2.resize(cam, (image.width, image.height))
307
+ axes[1].imshow(cam_resized, cmap="jet")
308
+ axes[1].set_title("CAM")
309
+ axes[1].axis("off")
310
+
311
+ # Overlay
312
+ axes[2].imshow(overlay)
313
+ axes[2].set_title("Overlay")
314
+ axes[2].axis("off")
315
+
316
+ plt.tight_layout()
317
+
318
+ # Convert plot to PIL Image for Streamlit display
319
+ buf = io.BytesIO()
320
+ plt.savefig(buf, format="png", bbox_inches="tight")
321
+ plt.close()
322
+ buf.seek(0)
323
+ return Image.open(buf)
324
+
325
+ # Function to load GradCAM CLIP model
326
+ @st.cache_resource
327
+ def load_clip_model():
328
+ with st.spinner("Loading CLIP model for GradCAM..."):
329
+ try:
330
+ model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
331
+
332
+ # Apply a simple classification head
333
+ model.classification_head = nn.Linear(1024, 2)
334
+ model.classification_head.weight.data.normal_(mean=0.0, std=0.02)
335
+ model.classification_head.bias.data.zero_()
336
+
337
+ model.eval()
338
+ return model
339
+ except Exception as e:
340
+ st.error(f"Error loading CLIP model: {str(e)}")
341
+ return None
342
+
343
+ def get_target_layer_clip(model):
344
+ """Get the target layer for GradCAM"""
345
+ return "vision_model.encoder.layers.23"
346
+
347
+ def process_image_with_gradcam(image, model, device, pred_class):
348
+ """Process an image with GradCAM"""
349
+ # Set up transformations
350
+ transform = transforms.Compose([
351
+ transforms.Resize((224, 224)),
352
+ transforms.ToTensor(),
353
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
354
+ ])
355
+
356
+ # Create dataset for the single image
357
+ dataset = ImageDataset(image, transform=transform, face_only=True)
358
+
359
+ # Custom collate function
360
+ def custom_collate(batch):
361
+ tensors = [item[0] for item in batch]
362
+ labels = [item[1] for item in batch]
363
+ paths = [item[2] for item in batch]
364
+ images = [item[3] for item in batch]
365
+ face_boxes = [item[4] for item in batch]
366
+ dataset_names = [item[5] for item in batch]
367
+
368
+ tensors = torch.stack(tensors)
369
+ labels = torch.tensor(labels)
370
+
371
+ return tensors, labels, paths, images, face_boxes, dataset_names
372
+
373
+ # Create dataloader
374
+ dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=custom_collate)
375
+
376
+ # Extract the batch
377
+ for batch in dataloader:
378
+ input_tensor, label, img_paths, original_images, face_boxes, dataset_names = batch
379
+ original_image = original_images[0]
380
+ face_box = face_boxes[0]
381
+
382
+ # Move tensors and model to device
383
+ input_tensor = input_tensor.to(device)
384
+ model = model.to(device)
385
+
386
+ try:
387
+ # Create GradCAM extractor
388
+ target_layer = get_target_layer_clip(model)
389
+ cam_extractor = GradCAM(model, target_layer)
390
+
391
+ # Generate CAM
392
+ cam = cam_extractor.generate(input_tensor, pred_class)
393
+
394
+ # Create visualizations
395
+ overlay = overlay_cam_on_image(original_image, cam, face_box)
396
+ comparison = save_comparison(original_image, cam, overlay, face_box)
397
+
398
+ # Return results
399
+ return cam, overlay, comparison, face_box
400
+
401
+ except Exception as e:
402
+ st.error(f"Error processing image with GradCAM: {str(e)}")
403
+ # Return default values
404
+ default_cam = np.ones((14, 14), dtype=np.float32) * 0.5
405
+ overlay = overlay_cam_on_image(original_image, default_cam, face_box)
406
+ comparison = save_comparison(original_image, default_cam, overlay, face_box)
407
+ return default_cam, overlay, comparison, face_box
408
+
409
+ # ----- BLIP Image Captioning -----
410
+
411
+ # Function to load BLIP captioning models
412
+ @st.cache_resource
413
+ def load_blip_models():
414
+ with st.spinner("Loading BLIP captioning models..."):
415
+ try:
416
+ # Load original BLIP model for general image captioning
417
+ original_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
418
+ original_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
419
+
420
+ # Load fine-tuned BLIP model for GradCAM analysis
421
+ finetuned_processor = BlipProcessor.from_pretrained("saakshigupta/deepfake-blip-large")
422
+ finetuned_model = BlipForConditionalGeneration.from_pretrained("saakshigupta/deepfake-blip-large")
423
+
424
+ return original_processor, original_model, finetuned_processor, finetuned_model
425
+ except Exception as e:
426
+ st.error(f"Error loading BLIP models: {str(e)}")
427
+ return None, None, None, None
428
+
429
+ # Function to generate image caption using BLIP's VQA approach for GradCAM
430
+ def generate_gradcam_caption(image, processor, model, max_length=60):
431
+ """
432
+ Generate a detailed analysis of GradCAM visualization using the fine-tuned BLIP model
433
+ """
434
+ try:
435
+ # Process image first
436
+ inputs = processor(image, return_tensors="pt")
437
+
438
+ # Check for available GPU and move model and inputs
439
+ device = "cuda" if torch.cuda.is_available() else "cpu"
440
+ model = model.to(device)
441
+ inputs = {k: v.to(device) if hasattr(v, 'to') else v for k, v in inputs.items()}
442
+
443
+ # Generate caption
444
+ with torch.no_grad():
445
+ output = model.generate(**inputs, max_length=max_length, num_beams=5)
446
+
447
+ # Decode the output
448
+ caption = processor.decode(output[0], skip_special_tokens=True)
449
+
450
+ # Extract descriptions using the full text
451
+ high_match = caption.split("high activation :")[1].split("moderate")[0] if "high activation :" in caption else ""
452
+ moderate_match = caption.split("moderate activation :")[1].split("low")[0] if "moderate activation :" in caption else ""
453
+ low_match = caption.split("low activation :")[1] if "low activation :" in caption else ""
454
+
455
+ # Format the output
456
+ formatted_text = ""
457
+ if high_match:
458
+ formatted_text += f"**High activation**:\n{high_match.strip()}\n\n"
459
+ if moderate_match:
460
+ formatted_text += f"**Moderate activation**:\n{moderate_match.strip()}\n\n"
461
+ if low_match:
462
+ formatted_text += f"**Low activation**:\n{low_match.strip()}"
463
+
464
+ return formatted_text.strip()
465
+
466
+ except Exception as e:
467
+ st.error(f"Error analyzing GradCAM: {str(e)}")
468
+ return "Error analyzing GradCAM visualization"
469
+
470
+ # Function to generate caption for original image
471
+ def generate_image_caption(image, processor, model, max_length=75, num_beams=5):
472
+ """Generate a caption for the original image using the original BLIP model"""
473
+ try:
474
+ # Check for available GPU
475
+ device = "cuda" if torch.cuda.is_available() else "cpu"
476
+ model = model.to(device)
477
+
478
+ # For original image, use unconditional captioning
479
+ inputs = processor(image, return_tensors="pt").to(device)
480
+
481
+ # Generate caption
482
+ with torch.no_grad():
483
+ output = model.generate(**inputs, max_length=max_length, num_beams=num_beams)
484
+
485
+ # Decode the output
486
+ caption = processor.decode(output[0], skip_special_tokens=True)
487
+
488
+ # Format into structured description
489
+ structured_caption = f"""
490
+ **Subject**: The image shows a person in a photograph.
491
+
492
+ **Appearance**: {caption}
493
+
494
+ **Background**: The background appears to be a controlled environment.
495
+
496
+ **Lighting**: The lighting appears to be professional with even illumination.
497
+
498
+ **Colors**: The image contains natural skin tones and colors typical of photography.
499
+
500
+ **Notable Elements**: The facial features and expression are the central focus of the image.
501
+ """
502
+ return structured_caption.strip()
503
+
504
+ except Exception as e:
505
+ st.error(f"Error generating caption: {str(e)}")
506
+ return "Error generating caption"
507
+
508
+ # ----- Fine-tuned Vision LLM -----
509
+
510
+ # Function to fix cross-attention masks
511
+ def fix_cross_attention_mask(inputs):
512
+ if 'cross_attention_mask' in inputs and 0 in inputs['cross_attention_mask'].shape:
513
+ batch_size, seq_len, _, num_tiles = inputs['cross_attention_mask'].shape
514
+ visual_features = 6404 # Critical dimension
515
+ new_mask = torch.ones((batch_size, seq_len, visual_features, num_tiles),
516
+ device=inputs['cross_attention_mask'].device)
517
+ inputs['cross_attention_mask'] = new_mask
518
+ return inputs
519
+
520
+ # Load model function
521
+ @st.cache_resource
522
+ def load_llm_model():
523
+ with st.spinner("Loading LLM vision model... This may take a few minutes. Please be patient..."):
524
+ try:
525
+ # Check for GPU
526
+ has_gpu = check_gpu()
527
+
528
+ # Load base model and tokenizer using Unsloth
529
+ base_model_id = "unsloth/llama-3.2-11b-vision-instruct"
530
+ model, tokenizer = FastVisionModel.from_pretrained(
531
+ base_model_id,
532
+ load_in_4bit=True,
533
+ )
534
+
535
+ # Load the adapter
536
+ adapter_id = "saakshigupta/deepfake-explainer-2"
537
+ model = PeftModel.from_pretrained(model, adapter_id)
538
+
539
+ # Set to inference mode
540
+ FastVisionModel.for_inference(model)
541
+
542
+ return model, tokenizer
543
+ except Exception as e:
544
+ st.error(f"Error loading model: {str(e)}")
545
+ return None, None
546
+
547
+ # Analyze image function
548
+ def analyze_image_with_llm(image, gradcam_overlay, face_box, pred_label, confidence, question, model, tokenizer, temperature=0.7, max_tokens=500, custom_instruction=""):
549
+ # Create a prompt that includes GradCAM information
550
+ if custom_instruction.strip():
551
+ full_prompt = f"{question}\n\nThe image has been processed with GradCAM and classified as {pred_label} with confidence {confidence:.2f}. Focus on the highlighted regions in red/yellow which show the areas the detection model found suspicious.\n\n{custom_instruction}"
552
+ else:
553
+ full_prompt = f"{question}\n\nThe image has been processed with GradCAM and classified as {pred_label} with confidence {confidence:.2f}. Focus on the highlighted regions in red/yellow which show the areas the detection model found suspicious."
554
+
555
+ try:
556
+ # Format the message to include all available images
557
+ message_content = [{"type": "text", "text": full_prompt}]
558
+
559
+ # Add original image
560
+ message_content.insert(0, {"type": "image", "image": image})
561
+
562
+ # Add GradCAM overlay
563
+ message_content.insert(1, {"type": "image", "image": gradcam_overlay})
564
+
565
+ # Add comparison image if available
566
+ if hasattr(st.session_state, 'comparison_image'):
567
+ message_content.insert(2, {"type": "image", "image": st.session_state.comparison_image})
568
+
569
+ messages = [{"role": "user", "content": message_content}]
570
+
571
+ # Apply chat template
572
+ input_text = tokenizer.apply_chat_template(messages, add_generation_prompt=True)
573
+
574
+ # Create list of images to process
575
+ image_list = [image, gradcam_overlay]
576
+ if hasattr(st.session_state, 'comparison_image'):
577
+ image_list.append(st.session_state.comparison_image)
578
+
579
+ try:
580
+ # Try with multiple images first
581
+ inputs = tokenizer(
582
+ image_list,
583
+ input_text,
584
+ add_special_tokens=False,
585
+ return_tensors="pt",
586
+ ).to(model.device)
587
+ except Exception as e:
588
+ st.warning(f"Multiple image analysis encountered an issue: {str(e)}")
589
+ st.info("Falling back to single image analysis")
590
+ # Fallback to single image
591
+ inputs = tokenizer(
592
+ image,
593
+ input_text,
594
+ add_special_tokens=False,
595
+ return_tensors="pt",
596
+ ).to(model.device)
597
+
598
+ # Fix cross-attention mask if needed
599
+ inputs = fix_cross_attention_mask(inputs)
600
+
601
+ # Generate response
602
+ with st.spinner("Generating detailed analysis... (this may take 15-30 seconds)"):
603
+ with torch.no_grad():
604
+ output_ids = model.generate(
605
+ **inputs,
606
+ max_new_tokens=max_tokens,
607
+ use_cache=True,
608
+ temperature=temperature,
609
+ top_p=0.9
610
+ )
611
+
612
+ # Decode the output
613
+ response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
614
+
615
+ # Try to extract just the model's response (after the prompt)
616
+ if full_prompt in response:
617
+ result = response.split(full_prompt)[-1].strip()
618
+ else:
619
+ result = response
620
+
621
+ return result
622
+
623
+ except Exception as e:
624
+ st.error(f"Error during LLM analysis: {str(e)}")
625
+ return f"Error analyzing image: {str(e)}"
626
+
627
+ # Main app
628
+ def main():
629
+ # Initialize session state variables
630
+ if 'clip_model_loaded' not in st.session_state:
631
+ st.session_state.clip_model_loaded = False
632
+ st.session_state.clip_model = None
633
+
634
+ if 'llm_model_loaded' not in st.session_state:
635
+ st.session_state.llm_model_loaded = False
636
+ st.session_state.llm_model = None
637
+ st.session_state.tokenizer = None
638
+
639
+ if 'blip_model_loaded' not in st.session_state:
640
+ st.session_state.blip_model_loaded = False
641
+ st.session_state.original_processor = None
642
+ st.session_state.original_model = None
643
+ st.session_state.finetuned_processor = None
644
+ st.session_state.finetuned_model = None
645
+
646
+ # Initialize chat history
647
+ if 'chat_history' not in st.session_state:
648
+ st.session_state.chat_history = []
649
+
650
+ # Create expanders for each stage
651
+ with st.expander("Stage 1: Model Loading", expanded=True):
652
+ st.write("Please load the models using the buttons below:")
653
+
654
+ # Button for loading models
655
+ clip_col, blip_col, llm_col = st.columns(3)
656
+
657
+ with clip_col:
658
+ if not st.session_state.clip_model_loaded:
659
+ if st.button("📥 Load CLIP Model for Detection", type="primary"):
660
+ # Load CLIP model
661
+ model = load_clip_model()
662
+ if model is not None:
663
+ st.session_state.clip_model = model
664
+ st.session_state.clip_model_loaded = True
665
+ st.success("✅ CLIP model loaded successfully!")
666
+ else:
667
+ st.error("❌ Failed to load CLIP model.")
668
+ else:
669
+ st.success("✅ CLIP model loaded and ready!")
670
+
671
+ with blip_col:
672
+ if not st.session_state.blip_model_loaded:
673
+ if st.button("📥 Load BLIP for Captioning", type="primary"):
674
+ # Load BLIP models
675
+ original_processor, original_model, finetuned_processor, finetuned_model = load_blip_models()
676
+ if all([original_processor, original_model, finetuned_processor, finetuned_model]):
677
+ st.session_state.original_processor = original_processor
678
+ st.session_state.original_model = original_model
679
+ st.session_state.finetuned_processor = finetuned_processor
680
+ st.session_state.finetuned_model = finetuned_model
681
+ st.session_state.blip_model_loaded = True
682
+ st.success("✅ BLIP captioning models loaded successfully!")
683
+ else:
684
+ st.error("❌ Failed to load BLIP models.")
685
+ else:
686
+ st.success("✅ BLIP captioning models loaded and ready!")
687
+
688
+ with llm_col:
689
+ if not st.session_state.llm_model_loaded:
690
+ if st.button("📥 Load Vision LLM for Analysis", type="primary"):
691
+ # Load LLM model
692
+ model, tokenizer = load_llm_model()
693
+ if model is not None and tokenizer is not None:
694
+ st.session_state.llm_model = model
695
+ st.session_state.tokenizer = tokenizer
696
+ st.session_state.llm_model_loaded = True
697
+ st.success("✅ Vision LLM loaded successfully!")
698
+ else:
699
+ st.error("❌ Failed to load Vision LLM.")
700
+ else:
701
+ st.success("✅ Vision LLM loaded and ready!")
702
+
703
+ # Image upload section
704
+ with st.expander("Stage 2: Image Upload & Initial Detection", expanded=True):
705
+ st.subheader("Upload an Image")
706
+ uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
707
+
708
+ if uploaded_file is not None:
709
+ try:
710
+ # Load and display the image (with controlled size)
711
+ image = Image.open(uploaded_file).convert("RGB")
712
+
713
+ # Display the image with a controlled width
714
+ col1, col2 = st.columns([1, 2])
715
+ with col1:
716
+ st.image(image, caption="Uploaded Image", width=300)
717
+
718
+ # Generate detailed caption for original image if BLIP model is loaded
719
+ if st.session_state.blip_model_loaded:
720
+ with st.spinner("Generating image description..."):
721
+ caption = generate_image_caption(
722
+ image,
723
+ st.session_state.original_processor,
724
+ st.session_state.original_model
725
+ )
726
+ st.session_state.image_caption = caption
727
+
728
+ # Store caption but don't display it yet
729
+
730
+ # Detect with CLIP model if loaded
731
+ if st.session_state.clip_model_loaded:
732
+ with st.spinner("Analyzing image with CLIP model..."):
733
+ # Preprocess image for CLIP
734
+ transform = transforms.Compose([
735
+ transforms.Resize((224, 224)),
736
+ transforms.ToTensor(),
737
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
738
+ ])
739
+
740
+ # Create a simple dataset for the image
741
+ dataset = ImageDataset(image, transform=transform, face_only=True)
742
+ tensor, _, _, _, face_box, _ = dataset[0]
743
+ tensor = tensor.unsqueeze(0)
744
+
745
+ # Get device
746
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
747
+
748
+ # Move model and tensor to device
749
+ model = st.session_state.clip_model.to(device)
750
+ tensor = tensor.to(device)
751
+
752
+ # Forward pass
753
+ with torch.no_grad():
754
+ outputs = model.vision_model(pixel_values=tensor).pooler_output
755
+ logits = model.classification_head(outputs)
756
+ probs = torch.softmax(logits, dim=1)[0]
757
+ pred_class = torch.argmax(probs).item()
758
+ confidence = probs[pred_class].item()
759
+ pred_label = "Fake" if pred_class == 1 else "Real"
760
+
761
+ # Display results
762
+ with col2:
763
+ st.markdown("### Detection Result")
764
+ st.markdown(f"**Classification:** {pred_label} (Confidence: {confidence:.2%})")
765
+
766
+ # GradCAM visualization
767
+ st.subheader("GradCAM Visualization")
768
+ cam, overlay, comparison, detected_face_box = process_image_with_gradcam(
769
+ image, model, device, pred_class
770
+ )
771
+
772
+ # Display GradCAM results (controlled size)
773
+ st.image(comparison, caption="Original | CAM | Overlay", width=700)
774
+
775
+ # Generate caption for GradCAM overlay image if BLIP model is loaded
776
+ if st.session_state.blip_model_loaded:
777
+ with st.spinner("Analyzing GradCAM visualization..."):
778
+ gradcam_caption = generate_gradcam_caption(
779
+ overlay,
780
+ st.session_state.finetuned_processor,
781
+ st.session_state.finetuned_model
782
+ )
783
+ st.session_state.gradcam_caption = gradcam_caption
784
+
785
+ # Store caption but don't display it yet
786
+
787
+ # Save results in session state for LLM analysis
788
+ st.session_state.current_image = image
789
+ st.session_state.current_overlay = overlay
790
+ st.session_state.current_face_box = detected_face_box
791
+ st.session_state.current_pred_label = pred_label
792
+ st.session_state.current_confidence = confidence
793
+
794
+ st.success("✅ Initial detection and GradCAM visualization complete!")
795
+ else:
796
+ st.warning("⚠️ Please load the CLIP model first to perform initial detection.")
797
+ except Exception as e:
798
+ st.error(f"Error processing image: {str(e)}")
799
+ import traceback
800
+ st.error(traceback.format_exc()) # This will show the full error traceback
801
+
802
+ # Image Analysis Summary section - AFTER Stage 2
803
+ if hasattr(st.session_state, 'current_image') and (hasattr(st.session_state, 'image_caption') or hasattr(st.session_state, 'gradcam_caption')):
804
+ with st.expander("Image Analysis Summary", expanded=True):
805
+ # Display images and analysis in organized layout
806
+ col1, col2 = st.columns([1, 2])
807
+
808
+ with col1:
809
+ # Display original image
810
+ st.image(st.session_state.current_image, caption="Original Image", width=300)
811
+ # Display GradCAM overlay
812
+ if hasattr(st.session_state, 'current_overlay'):
813
+ st.image(st.session_state.current_overlay, caption="GradCAM Visualization", width=300)
814
+
815
+ with col2:
816
+ # Image description
817
+ if hasattr(st.session_state, 'image_caption'):
818
+ st.markdown("### Image Description")
819
+ st.markdown(st.session_state.image_caption)
820
+ st.markdown("---")
821
+
822
+ # GradCAM analysis
823
+ if hasattr(st.session_state, 'gradcam_caption'):
824
+ st.markdown("### GradCAM Analysis")
825
+ st.markdown(st.session_state.gradcam_caption)
826
+ st.markdown("---")
827
+
828
+ # LLM Analysis section - AFTER Image Analysis Summary
829
+ with st.expander("Stage 3: Detailed Analysis with Vision LLM", expanded=False):
830
+ if hasattr(st.session_state, 'current_image') and st.session_state.llm_model_loaded:
831
+ st.subheader("Detailed Deepfake Analysis")
832
+
833
+ # Display chat history
834
+ for i, (question, answer) in enumerate(st.session_state.chat_history):
835
+ st.markdown(f"**Question {i+1}:** {question}")
836
+ st.markdown(f"**Answer:** {answer}")
837
+ st.markdown("---")
838
+
839
+ # Include both captions in the prompt if available
840
+ caption_text = ""
841
+ if hasattr(st.session_state, 'image_caption'):
842
+ caption_text += f"\n\nImage Description:\n{st.session_state.image_caption}"
843
+
844
+ if hasattr(st.session_state, 'gradcam_caption'):
845
+ caption_text += f"\n\nGradCAM Analysis:\n{st.session_state.gradcam_caption}"
846
+
847
+ # Default question with option to customize
848
+ default_question = f"This image has been classified as {{pred_label}}. Analyze all the provided images (original, GradCAM visualization, and comparison) to determine if this is a deepfake. Focus on highlighted areas in the GradCAM visualization. Provide both a technical explanation for experts and a simple explanation for non-technical users."
849
+
850
+ # User input for new question
851
+ new_question = st.text_area("Ask a question about the image:", value=default_question if not st.session_state.chat_history else "", height=100)
852
+
853
+ # Analyze button and Clear Chat button in the same row
854
+ col1, col2 = st.columns([3, 1])
855
+ with col1:
856
+ analyze_button = st.button("🔍 Send Question", type="primary")
857
+ with col2:
858
+ clear_button = st.button("🗑️ Clear Chat History")
859
+
860
+ if clear_button:
861
+ st.session_state.chat_history = []
862
+ st.experimental_rerun()
863
+
864
+ if analyze_button and new_question:
865
+ try:
866
+ # Add caption info if it's the first question
867
+ if not st.session_state.chat_history:
868
+ full_question = new_question + caption_text
869
+ else:
870
+ full_question = new_question
871
+
872
+ result = analyze_image_with_llm(
873
+ st.session_state.current_image,
874
+ st.session_state.current_overlay,
875
+ st.session_state.current_face_box,
876
+ st.session_state.current_pred_label,
877
+ st.session_state.current_confidence,
878
+ full_question,
879
+ st.session_state.llm_model,
880
+ st.session_state.tokenizer,
881
+ temperature=temperature,
882
+ max_tokens=max_tokens,
883
+ custom_instruction=custom_instruction
884
+ )
885
+
886
+ # Add to chat history
887
+ st.session_state.chat_history.append((new_question, result))
888
+
889
+ # Display the latest result too
890
+ st.success("✅ Analysis complete!")
891
+
892
+ # Check if the result contains both technical and non-technical explanations
893
+ if "Technical" in result and "Non-Technical" in result:
894
+ try:
895
+ # Split the result into technical and non-technical sections
896
+ parts = result.split("Non-Technical")
897
+ technical = parts[0]
898
+ non_technical = "Non-Technical" + parts[1]
899
+
900
+ # Display in two columns
901
+ tech_col, simple_col = st.columns(2)
902
+ with tech_col:
903
+ st.subheader("Technical Analysis")
904
+ st.markdown(technical)
905
+
906
+ with simple_col:
907
+ st.subheader("Simple Explanation")
908
+ st.markdown(non_technical)
909
+ except Exception as e:
910
+ # Fallback if splitting fails
911
+ st.subheader("Analysis Result")
912
+ st.markdown(result)
913
+ else:
914
+ # Just display the whole result
915
+ st.subheader("Analysis Result")
916
+ st.markdown(result)
917
+
918
+ # Rerun to update the chat history display
919
+ st.experimental_rerun()
920
+
921
+ except Exception as e:
922
+ st.error(f"Error during LLM analysis: {str(e)}")
923
+
924
+ elif not hasattr(st.session_state, 'current_image'):
925
+ st.warning("⚠️ Please upload an image and complete the initial detection first.")
926
+ else:
927
+ st.warning("⚠️ Please load the Vision LLM to perform detailed analysis.")
928
+
929
+ # Footer
930
+ st.markdown("---")
931
+
932
+ # Add model version indicator in sidebar
933
+ st.sidebar.info("Using deepfake-explainer-2 model")
934
+
935
+ if __name__ == "__main__":
936
+ main()