Wolowolo commited on
Commit
74711ac
·
verified ·
1 Parent(s): 1e770e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -58
app.py CHANGED
@@ -7,6 +7,7 @@
7
 
8
  import sys
9
  import os
 
10
  os.system(f'pip install dlib')
11
  import dlib
12
  import argparse
@@ -24,23 +25,26 @@ import matplotlib.pyplot as plt
24
  from torchvision import transforms
25
  import traceback
26
  from pytorch_grad_cam import (
27
- GradCAM,ScoreCAM,
28
  XGradCAM, EigenCAM
29
  )
30
  from pytorch_grad_cam import GuidedBackpropReLUModel
31
  from pytorch_grad_cam.utils.image import show_cam_on_image, preprocess_image
32
 
33
- def reshape_transform(tensor,height=14,width=14):
34
- result = tensor[:, 1:, :].reshape(tensor.size(0),height,width,tensor.size(2))
35
- result = result.transpose(2,3).transpose(1,2)
 
36
  return result
37
 
 
38
  def get_args_parser():
39
  parser = argparse.ArgumentParser('FSFM3C fine-tuning&Testing for image classification', add_help=False)
40
  parser.add_argument('--batch_size', default=64, type=int, help='Batch size per GPU')
41
  parser.add_argument('--epochs', default=50, type=int)
42
  parser.add_argument('--accum_iter', default=1, type=int, help='Accumulate gradient iterations')
43
- parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL', help='Name of model to train')
 
44
  parser.add_argument('--input_size', default=224, type=int, help='images input size')
45
  parser.add_argument('--normalize_from_IMN', action='store_true', help='cal mean and std from imagenet')
46
  parser.set_defaults(normalize_from_IMN=True)
@@ -59,7 +63,8 @@ def get_args_parser():
59
  parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob')
60
  parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode')
61
  parser.add_argument('--recount', type=int, default=1, help='Random erase count')
62
- parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first augmentation split')
 
63
  parser.add_argument('--mixup', type=float, default=0, help='mixup alpha')
64
  parser.add_argument('--cutmix', type=float, default=0, help='cutmix alpha')
65
  parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio')
@@ -69,7 +74,8 @@ def get_args_parser():
69
  parser.add_argument('--finetune', default='', help='finetune from checkpoint')
70
  parser.add_argument('--global_pool', action='store_true')
71
  parser.set_defaults(global_pool=True)
72
- parser.add_argument('--cls_token', action='store_false', dest='global_pool', help='Use class token for classification')
 
73
  parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path')
74
  parser.add_argument('--nb_classes', default=1000, type=int, help='number of the classification types')
75
  parser.add_argument('--output_dir', default='', help='path where to save')
@@ -116,10 +122,10 @@ def load_model(select_skpt):
116
  model.load_state_dict(checkpoint['model'], strict=False)
117
  model.eval()
118
  global cam
119
- cam = GradCAM(model = model,
120
- target_layers=[model.blocks[-1].norm1],
121
- reshape_transform=reshape_transform
122
- )
123
  return gr.update(), f"[Loaded Model Successfully:] {args.resume}] "
124
 
125
 
@@ -168,29 +174,27 @@ def extract_face_from_fixed_num_frames(src_video, dst_path, num_frames=None):
168
  img.save(os.path.join(dst_path, '0', save_img_name))
169
  video_capture.release()
170
  return frame_indices
 
 
171
  class TargetCategory:
172
  def __init__(self, category_index):
173
  self.category_index = category_index
174
-
175
  def __call__(self, output):
176
  return output[self.category_index]
177
- def preprocess_image_cam(pil_img,mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225]):
178
- # 将 PIL 图像转换为 numpy 数组
 
 
 
179
  img_np = np.array(pil_img)
180
-
181
- # 归一化到 [0, 1]
182
  img_np = img_np.astype(np.float32) / 255.0
183
-
184
- # 标准化
185
  img_np = (img_np - mean) / std
186
-
187
- # 调整维度顺序以适应模型输入 (C, H, W)
188
  img_np = np.transpose(img_np, (2, 0, 1))
189
-
190
- # 添加批次维度 (B, C, H, W)
191
  img_np = np.expand_dims(img_np, axis=0)
192
-
193
  return img_np
 
 
194
  def FSFM3C_image_detection(image):
195
  frame_path = os.path.join(FRAME_SAVE_PATH, str(len(os.listdir(FRAME_SAVE_PATH))))
196
  os.makedirs(frame_path, exist_ok=True)
@@ -204,7 +208,9 @@ def FSFM3C_image_detection(image):
204
  args.batch_size = 1
205
  dataset_val = build_dataset(is_train=False, args=args)
206
  sampler_val = torch.utils.data.SequentialSampler(dataset_val)
207
- data_loader_val = torch.utils.data.DataLoader(dataset_val, sampler=sampler_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False)
 
 
208
 
209
  if CKPT_CLASS[ckpt] > 2:
210
  frame_preds_list, video_pred_list = test_multi_class(data_loader_val, model, device)
@@ -217,10 +223,12 @@ def FSFM3C_image_detection(image):
217
 
218
  # Generate CAM heatmap for the detected class
219
  use_cuda = True
220
- input_tensor = preprocess_image(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
 
 
221
  if use_cuda:
222
  input_tensor = input_tensor.cuda()
223
-
224
  # Dynamically determine the target category based on the maximum probability class
225
  category_names_to_index = {
226
  'Real or Bonafide': 0,
@@ -229,7 +237,7 @@ def FSFM3C_image_detection(image):
229
  'Spoofing or Presentation-attack': 3
230
  }
231
  target_category = TargetCategory(category_names_to_index[max_prob_class])
232
-
233
  grayscale_cam = cam(input_tensor=input_tensor, targets=[target_category])
234
  grayscale_cam = 1 - grayscale_cam[0, :]
235
  img = np.array(img)
@@ -244,7 +252,7 @@ def FSFM3C_image_detection(image):
244
  # cv2.putText(visualization, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
245
  output_path = "./CAM_images/output_heatmap.png"
246
  cv2.imwrite(output_path, visualization)
247
- return image_results, output_path,probabilities[max_prob_index]
248
 
249
  if CKPT_CLASS[ckpt] == 2:
250
  frame_preds_list, video_pred_list = test_two_class(data_loader_val, model, device)
@@ -257,7 +265,7 @@ def FSFM3C_image_detection(image):
257
  label = "Spoofing" if prob <= 0.5 else "Bonafide"
258
  prob = prob if label == "Bonafide" else 1 - prob
259
  image_results = f"The largest face in this image may be {label} with probability {prob * 100:.1f}%"
260
- return image_results, None ,None
261
 
262
 
263
  def FSFM3C_video_detection(video, num_frames):
@@ -270,19 +278,25 @@ def FSFM3C_video_detection(video, num_frames):
270
  args.batch_size = num_frames
271
  dataset_val = build_dataset(is_train=False, args=args)
272
  sampler_val = torch.utils.data.SequentialSampler(dataset_val)
273
- data_loader_val = torch.utils.data.DataLoader(dataset_val, sampler=sampler_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False)
 
 
274
 
275
  if CKPT_CLASS[ckpt] > 2:
276
  frame_preds_list, video_pred_list = test_multi_class(data_loader_val, model, device)
277
- class_names = ['Real or Bonafide', 'Deepfake', 'Diffusion or AIGC generated', 'Spoofing or Presentation-attack']
 
278
  avg_video_pred = np.mean(video_pred_list, axis=0)
279
  max_prob_index = np.argmax(avg_video_pred)
280
  max_prob_class = class_names[max_prob_index]
281
  probabilities = [f"{class_names[i]}: {prob * 100:.1f}%" for i, prob in enumerate(avg_video_pred)]
282
 
283
- frame_results = {f"frame_{frame_indices[i]}": [f"{class_names[j]}: {prob * 100:.1f}%" for j, prob in enumerate(frame_preds_list[i])] for i in range(len(frame_indices))}
284
- video_results = (f"The largest face in this image may be {max_prob_class} with probability: \n [{', '.join(probabilities)}]\n \n"
285
- f"The frame-level detection results ['frame_index': 'probabilities']: \n{frame_results}")
 
 
 
286
  return video_results
287
 
288
  if CKPT_CLASS[ckpt] == 2:
@@ -292,23 +306,26 @@ def FSFM3C_video_detection(video, num_frames):
292
  label = "Deepfake" if prob <= 0.5 else "Real"
293
  prob = prob if label == "Real" else 1 - prob
294
  frame_results = {f"frame_{frame_indices[i]}": f"{(frame_preds_list[i]) * 100:.1f}%" for i in
295
- range(len(frame_indices))} if label == "Real" else {f"frame_{frame_indices[i]}": f"{(1 - frame_preds_list[i]) * 100:.1f}%" for i in
296
- range(len(frame_indices))}
 
297
 
298
  if ckpt == 'FAS-Checkpoint_Fine-tuned_on_MCIO':
299
  prob = sum(video_pred_list) / len(video_pred_list)
300
  label = "Spoofing" if prob <= 0.5 else "Bonafide"
301
  prob = prob if label == "Bonafide" else 1 - prob
302
  frame_results = {f"frame_{frame_indices[i]}": f"{(frame_preds_list[i]) * 100:.1f}%" for i in
303
- range(len(frame_indices))} if label == "Bonafide" else {f"frame_{frame_indices[i]}": f"{(1 - frame_preds_list[i]) * 100:.1f}%" for i in
304
- range(len(frame_indices))}
 
305
 
306
  video_results = (f"The largest face in this image may be {label} with probability {prob * 100:.1f}%\n \n"
307
- f"The frame-level detection results ['frame_index': 'real_face_probability']: \n{frame_results}")
308
  return video_results
309
  except Exception as e:
310
  return f"Error occurred. Please provide a clear face video or reduce the number of frames."
311
 
 
312
  # Paths and Constants
313
  P = os.path.abspath(__file__)
314
  FRAME_SAVE_PATH = os.path.join(os.path.dirname(P), 'frame')
@@ -337,14 +354,17 @@ CKPT_MODEL = {
337
  }
338
 
339
  with gr.Blocks(css=".custom-label { font-weight: bold !important; font-size: 16px !important; }") as demo:
340
- gr.HTML("<h1 style='text-align: center;'>🦱 Real Facial Image&Video Detection <br> Against Face Forgery (Deepfake/Diffusion) and Spoofing (Presentation-attacks)</h1>")
341
- gr.Markdown("<b>☉ Powered by the fine-tuned ViT models that is pre-trained from [FSFM-3C](https://fsfm-3c.github.io/)</b> <br> "
342
- "<b>☉ We do not and cannot access or store the data you have uploaded!</b> <br> "
343
- "<b>☉ Release (Continuously updating) </b> <br> <b>[V1.0] 2025/02/22-Current🎉</b>: "
344
- "1) Updated <b>[✨Unified-detector_v1] for Unified Physical-Digital Face Attack&Forgery Detection, a ViT-B/16-224 (FSFM Pre-trained) detector that could identify Real&Bonafide, Deepfake, Diffusion&AIGC, Spooing&Presentation-attacks facial images or videos </b> ; 2) Provided the selection of the number of video frames (uniformly sampling 1-32 frames, more frames may time-consuming for this page without GPU acceleration); 3) Fixed some errors of V0.1 including loading and prediction. <br>"
345
- "<b>[V0.1] 2024/12-2025/02/21</b>: "
346
- "Create this page with basic detectors [DfD-Checkpoint_Fine-tuned_on_FF++, FAS-Checkpoint_Fine-tuned_on_MCIO] that follow the paper implementation. <br> ")
347
- gr.Markdown("- Please <b>provide a facial image or video(<100s)</b>, and <b>select the model</b> for detection: <br> <b>[SUGGEST] [✨Unified-detector_v1_Fine-tuned_on_4_classes]</b> a (FSFM Pre-trained) ViT-B/16-224 for Both Real/Deepfake/Diffusion/Spoofing facial images&videos Detection <br> <b>[DfD-Checkpoint_Fine-tuned_on_FF++]</b> for deepfake detection, FSFM ViT-B/16-224 fine-tuned on the FF++_c23 train&val sets (4 manipulations, 32 frames per video) <br> <b>[FAS-Checkpoint_Fine-tuned_on_MCIO]</b> for face anti-spoofing, FSFM ViT-B/16-224 fine-tuned on the MCIO datasets (2 frames per video)")
 
 
 
348
 
349
  with gr.Row():
350
  ckpt_select_dropdown = gr.Dropdown(
@@ -358,11 +378,12 @@ with gr.Blocks(css=".custom-label { font-weight: bold !important; font-size: 16p
358
  model_loading_status = gr.Textbox(label="Model Loading Status")
359
  with gr.Row():
360
  with gr.Column(scale=5):
361
- gr.Markdown("### Image Detection (Fast Try: copying image from [whichfaceisreal](https://whichfaceisreal.com/))")
 
362
  image = gr.Image(label="Upload/Capture/Paste your image", type="pil")
363
  image_submit_btn = gr.Button("Submit")
364
  output_results_image = gr.Textbox(label="Detection Result")
365
-
366
  with gr.Row():
367
  output_heatmap = gr.Image(label="Grad_CAM")
368
  output_max_prob_class = gr.Textbox(label="Detected Class")
@@ -373,15 +394,14 @@ with gr.Blocks(css=".custom-label { font-weight: bold !important; font-size: 16p
373
  video_submit_btn = gr.Button("Submit")
374
  output_results_video = gr.Textbox(label="Detection Result")
375
 
376
- gr.HTML(
377
- '<div style="display: flex; justify-content: center; gap: 20px; margin-bottom: 20px;">'
378
- '<a href="https://mapmyvisitors.com/web/1bxvi" title="Visit tracker">'
379
- '<img src="https://mapmyvisitors.com/map.png?d=FYhBoxLDEaFAxdfRzk5TuchYOBGrnSa98Ky59EkEEpY&cl=ffffff">'
380
- '</a>'
381
- '</div>'
382
- )
383
 
384
-
385
  ckpt_select_dropdown.change(
386
  fn=load_model,
387
  inputs=[ckpt_select_dropdown],
@@ -390,7 +410,7 @@ with gr.Blocks(css=".custom-label { font-weight: bold !important; font-size: 16p
390
  image_submit_btn.click(
391
  fn=FSFM3C_image_detection,
392
  inputs=[image],
393
- outputs=[output_results_image, output_heatmap,output_max_prob_class],
394
  )
395
  video_submit_btn.click(
396
  fn=FSFM3C_video_detection,
 
7
 
8
  import sys
9
  import os
10
+
11
  os.system(f'pip install dlib')
12
  import dlib
13
  import argparse
 
25
  from torchvision import transforms
26
  import traceback
27
  from pytorch_grad_cam import (
28
+ GradCAM, ScoreCAM,
29
  XGradCAM, EigenCAM
30
  )
31
  from pytorch_grad_cam import GuidedBackpropReLUModel
32
  from pytorch_grad_cam.utils.image import show_cam_on_image, preprocess_image
33
 
34
+
35
+ def reshape_transform(tensor, height=14, width=14):
36
+ result = tensor[:, 1:, :].reshape(tensor.size(0), height, width, tensor.size(2))
37
+ result = result.transpose(2, 3).transpose(1, 2)
38
  return result
39
 
40
+
41
  def get_args_parser():
42
  parser = argparse.ArgumentParser('FSFM3C fine-tuning&Testing for image classification', add_help=False)
43
  parser.add_argument('--batch_size', default=64, type=int, help='Batch size per GPU')
44
  parser.add_argument('--epochs', default=50, type=int)
45
  parser.add_argument('--accum_iter', default=1, type=int, help='Accumulate gradient iterations')
46
+ parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL',
47
+ help='Name of model to train')
48
  parser.add_argument('--input_size', default=224, type=int, help='images input size')
49
  parser.add_argument('--normalize_from_IMN', action='store_true', help='cal mean and std from imagenet')
50
  parser.set_defaults(normalize_from_IMN=True)
 
63
  parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob')
64
  parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode')
65
  parser.add_argument('--recount', type=int, default=1, help='Random erase count')
66
+ parser.add_argument('--resplit', action='store_true', default=False,
67
+ help='Do not random erase first augmentation split')
68
  parser.add_argument('--mixup', type=float, default=0, help='mixup alpha')
69
  parser.add_argument('--cutmix', type=float, default=0, help='cutmix alpha')
70
  parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio')
 
74
  parser.add_argument('--finetune', default='', help='finetune from checkpoint')
75
  parser.add_argument('--global_pool', action='store_true')
76
  parser.set_defaults(global_pool=True)
77
+ parser.add_argument('--cls_token', action='store_false', dest='global_pool',
78
+ help='Use class token for classification')
79
  parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path')
80
  parser.add_argument('--nb_classes', default=1000, type=int, help='number of the classification types')
81
  parser.add_argument('--output_dir', default='', help='path where to save')
 
122
  model.load_state_dict(checkpoint['model'], strict=False)
123
  model.eval()
124
  global cam
125
+ cam = GradCAM(model=model,
126
+ target_layers=[model.blocks[-1].norm1],
127
+ reshape_transform=reshape_transform
128
+ )
129
  return gr.update(), f"[Loaded Model Successfully:] {args.resume}] "
130
 
131
 
 
174
  img.save(os.path.join(dst_path, '0', save_img_name))
175
  video_capture.release()
176
  return frame_indices
177
+
178
+
179
  class TargetCategory:
180
  def __init__(self, category_index):
181
  self.category_index = category_index
182
+
183
  def __call__(self, output):
184
  return output[self.category_index]
185
+
186
+
187
+ def preprocess_image_cam(pil_img,
188
+ mean=[0.5482207536697388, 0.42340534925460815, 0.3654651641845703],
189
+ std=[0.2789176106452942, 0.2438540756702423, 0.23493893444538116]):
190
  img_np = np.array(pil_img)
 
 
191
  img_np = img_np.astype(np.float32) / 255.0
 
 
192
  img_np = (img_np - mean) / std
 
 
193
  img_np = np.transpose(img_np, (2, 0, 1))
 
 
194
  img_np = np.expand_dims(img_np, axis=0)
 
195
  return img_np
196
+
197
+
198
  def FSFM3C_image_detection(image):
199
  frame_path = os.path.join(FRAME_SAVE_PATH, str(len(os.listdir(FRAME_SAVE_PATH))))
200
  os.makedirs(frame_path, exist_ok=True)
 
208
  args.batch_size = 1
209
  dataset_val = build_dataset(is_train=False, args=args)
210
  sampler_val = torch.utils.data.SequentialSampler(dataset_val)
211
+ data_loader_val = torch.utils.data.DataLoader(dataset_val, sampler=sampler_val, batch_size=args.batch_size,
212
+ num_workers=args.num_workers, pin_memory=args.pin_mem,
213
+ drop_last=False)
214
 
215
  if CKPT_CLASS[ckpt] > 2:
216
  frame_preds_list, video_pred_list = test_multi_class(data_loader_val, model, device)
 
223
 
224
  # Generate CAM heatmap for the detected class
225
  use_cuda = True
226
+ input_tensor = preprocess_image(img,
227
+ mean=[0.5482207536697388, 0.42340534925460815, 0.3654651641845703],
228
+ std=[0.2789176106452942, 0.2438540756702423, 0.23493893444538116])
229
  if use_cuda:
230
  input_tensor = input_tensor.cuda()
231
+
232
  # Dynamically determine the target category based on the maximum probability class
233
  category_names_to_index = {
234
  'Real or Bonafide': 0,
 
237
  'Spoofing or Presentation-attack': 3
238
  }
239
  target_category = TargetCategory(category_names_to_index[max_prob_class])
240
+
241
  grayscale_cam = cam(input_tensor=input_tensor, targets=[target_category])
242
  grayscale_cam = 1 - grayscale_cam[0, :]
243
  img = np.array(img)
 
252
  # cv2.putText(visualization, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
253
  output_path = "./CAM_images/output_heatmap.png"
254
  cv2.imwrite(output_path, visualization)
255
+ return image_results, output_path, probabilities[max_prob_index]
256
 
257
  if CKPT_CLASS[ckpt] == 2:
258
  frame_preds_list, video_pred_list = test_two_class(data_loader_val, model, device)
 
265
  label = "Spoofing" if prob <= 0.5 else "Bonafide"
266
  prob = prob if label == "Bonafide" else 1 - prob
267
  image_results = f"The largest face in this image may be {label} with probability {prob * 100:.1f}%"
268
+ return image_results, None, None
269
 
270
 
271
  def FSFM3C_video_detection(video, num_frames):
 
278
  args.batch_size = num_frames
279
  dataset_val = build_dataset(is_train=False, args=args)
280
  sampler_val = torch.utils.data.SequentialSampler(dataset_val)
281
+ data_loader_val = torch.utils.data.DataLoader(dataset_val, sampler=sampler_val, batch_size=args.batch_size,
282
+ num_workers=args.num_workers, pin_memory=args.pin_mem,
283
+ drop_last=False)
284
 
285
  if CKPT_CLASS[ckpt] > 2:
286
  frame_preds_list, video_pred_list = test_multi_class(data_loader_val, model, device)
287
+ class_names = ['Real or Bonafide', 'Deepfake', 'Diffusion or AIGC generated',
288
+ 'Spoofing or Presentation-attack']
289
  avg_video_pred = np.mean(video_pred_list, axis=0)
290
  max_prob_index = np.argmax(avg_video_pred)
291
  max_prob_class = class_names[max_prob_index]
292
  probabilities = [f"{class_names[i]}: {prob * 100:.1f}%" for i, prob in enumerate(avg_video_pred)]
293
 
294
+ frame_results = {f"frame_{frame_indices[i]}": [f"{class_names[j]}: {prob * 100:.1f}%" for j, prob in
295
+ enumerate(frame_preds_list[i])] for i in
296
+ range(len(frame_indices))}
297
+ video_results = (
298
+ f"The largest face in this image may be {max_prob_class} with probability: \n [{', '.join(probabilities)}]\n \n"
299
+ f"The frame-level detection results ['frame_index': 'probabilities']: \n{frame_results}")
300
  return video_results
301
 
302
  if CKPT_CLASS[ckpt] == 2:
 
306
  label = "Deepfake" if prob <= 0.5 else "Real"
307
  prob = prob if label == "Real" else 1 - prob
308
  frame_results = {f"frame_{frame_indices[i]}": f"{(frame_preds_list[i]) * 100:.1f}%" for i in
309
+ range(len(frame_indices))} if label == "Real" else {
310
+ f"frame_{frame_indices[i]}": f"{(1 - frame_preds_list[i]) * 100:.1f}%" for i in
311
+ range(len(frame_indices))}
312
 
313
  if ckpt == 'FAS-Checkpoint_Fine-tuned_on_MCIO':
314
  prob = sum(video_pred_list) / len(video_pred_list)
315
  label = "Spoofing" if prob <= 0.5 else "Bonafide"
316
  prob = prob if label == "Bonafide" else 1 - prob
317
  frame_results = {f"frame_{frame_indices[i]}": f"{(frame_preds_list[i]) * 100:.1f}%" for i in
318
+ range(len(frame_indices))} if label == "Bonafide" else {
319
+ f"frame_{frame_indices[i]}": f"{(1 - frame_preds_list[i]) * 100:.1f}%" for i in
320
+ range(len(frame_indices))}
321
 
322
  video_results = (f"The largest face in this image may be {label} with probability {prob * 100:.1f}%\n \n"
323
+ f"The frame-level detection results ['frame_index': 'real_face_probability']: \n{frame_results}")
324
  return video_results
325
  except Exception as e:
326
  return f"Error occurred. Please provide a clear face video or reduce the number of frames."
327
 
328
+
329
  # Paths and Constants
330
  P = os.path.abspath(__file__)
331
  FRAME_SAVE_PATH = os.path.join(os.path.dirname(P), 'frame')
 
354
  }
355
 
356
  with gr.Blocks(css=".custom-label { font-weight: bold !important; font-size: 16px !important; }") as demo:
357
+ gr.HTML(
358
+ "<h1 style='text-align: center;'>🦱 Real Facial Image&Video Detection <br> Against Face Forgery (Deepfake/Diffusion) and Spoofing (Presentation-attacks)</h1>")
359
+ gr.Markdown(
360
+ "<b>☉ Powered by the fine-tuned ViT models that is pre-trained from [FSFM-3C](https://fsfm-3c.github.io/)</b> <br> "
361
+ "<b>☉ We do not and cannot access or store the data you have uploaded!</b> <br> "
362
+ "<b>☉ Release (Continuously updating) </b> <br> <b>[V1.0] 2025/02/22-Current🎉</b>: "
363
+ "1) Updated <b>[✨Unified-detector_v1] for Unified Physical-Digital Face Attack&Forgery Detection, a ViT-B/16-224 (FSFM Pre-trained) detector that could identify Real&Bonafide, Deepfake, Diffusion&AIGC, Spooing&Presentation-attacks facial images or videos </b> ; 2) Provided the selection of the number of video frames (uniformly sampling 1-32 frames, more frames may time-consuming for this page without GPU acceleration); 3) Fixed some errors of V0.1 including loading and prediction. <br>"
364
+ "<b>[V0.1] 2024/12-2025/02/21</b>: "
365
+ "Create this page with basic detectors [DfD-Checkpoint_Fine-tuned_on_FF++, FAS-Checkpoint_Fine-tuned_on_MCIO] that follow the paper implementation. <br> ")
366
+ gr.Markdown(
367
+ "- Please <b>provide a facial image or video(<100s)</b>, and <b>select the model</b> for detection: <br> <b>[SUGGEST] [✨Unified-detector_v1_Fine-tuned_on_4_classes]</b> a (FSFM Pre-trained) ViT-B/16-224 for Both Real/Deepfake/Diffusion/Spoofing facial images&videos Detection <br> <b>[DfD-Checkpoint_Fine-tuned_on_FF++]</b> for deepfake detection, FSFM ViT-B/16-224 fine-tuned on the FF++_c23 train&val sets (4 manipulations, 32 frames per video) <br> <b>[FAS-Checkpoint_Fine-tuned_on_MCIO]</b> for face anti-spoofing, FSFM ViT-B/16-224 fine-tuned on the MCIO datasets (2 frames per video)")
368
 
369
  with gr.Row():
370
  ckpt_select_dropdown = gr.Dropdown(
 
378
  model_loading_status = gr.Textbox(label="Model Loading Status")
379
  with gr.Row():
380
  with gr.Column(scale=5):
381
+ gr.Markdown(
382
+ "### Image Detection (Fast Try: copying image from [whichfaceisreal](https://whichfaceisreal.com/))")
383
  image = gr.Image(label="Upload/Capture/Paste your image", type="pil")
384
  image_submit_btn = gr.Button("Submit")
385
  output_results_image = gr.Textbox(label="Detection Result")
386
+
387
  with gr.Row():
388
  output_heatmap = gr.Image(label="Grad_CAM")
389
  output_max_prob_class = gr.Textbox(label="Detected Class")
 
394
  video_submit_btn = gr.Button("Submit")
395
  output_results_video = gr.Textbox(label="Detection Result")
396
 
397
+ gr.HTML(
398
+ '<div style="display: flex; justify-content: center; gap: 20px; margin-bottom: 20px;">'
399
+ '<a href="https://mapmyvisitors.com/web/1bxvi" title="Visit tracker">'
400
+ '<img src="https://mapmyvisitors.com/map.png?d=FYhBoxLDEaFAxdfRzk5TuchYOBGrnSa98Ky59EkEEpY&cl=ffffff">'
401
+ '</a>'
402
+ '</div>'
403
+ )
404
 
 
405
  ckpt_select_dropdown.change(
406
  fn=load_model,
407
  inputs=[ckpt_select_dropdown],
 
410
  image_submit_btn.click(
411
  fn=FSFM3C_image_detection,
412
  inputs=[image],
413
+ outputs=[output_results_image, output_heatmap, output_max_prob_class],
414
  )
415
  video_submit_btn.click(
416
  fn=FSFM3C_video_detection,