Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,364 +1,364 @@
|
|
1 |
-
from PIL import Image
|
2 |
-
import numpy as np
|
3 |
-
import base64
|
4 |
-
from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
|
5 |
-
from flask import Flask, request, jsonify
|
6 |
-
from flask_cors import CORS
|
7 |
-
import matplotlib
|
8 |
-
matplotlib.use('Agg')
|
9 |
-
import matplotlib.pyplot as plt
|
10 |
-
import google.generativeai as genai
|
11 |
-
from langchain_core.messages import HumanMessage
|
12 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
13 |
-
from reportlab.lib.utils import ImageReader
|
14 |
-
from flask import send_file, jsonify, request
|
15 |
-
from reportlab.pdfgen import canvas
|
16 |
-
from reportlab.lib.pagesizes import A4
|
17 |
-
from reportlab.lib.units import inch
|
18 |
-
import io, torch, os
|
19 |
-
from reportlab.lib import colors
|
20 |
-
from datetime import datetime
|
21 |
-
|
22 |
-
os.environ['GOOGLE_API_KEY'] = "AIzaSyCv2dNQMCD3-9s3E5Th7bDy4ko0dyucRCc"
|
23 |
-
genai.configure(api_key=os.environ['GOOGLE_API_KEY'])
|
24 |
-
|
25 |
-
# Setup
|
26 |
-
app = Flask(__name__)
|
27 |
-
CORS(app)
|
28 |
-
|
29 |
-
# Initialize device
|
30 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
31 |
-
|
32 |
-
# Load model and processor
|
33 |
-
processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-tiny-ade-semantic")
|
34 |
-
model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-tiny-ade-semantic")
|
35 |
-
# model.load_state_dict(torch.load(r"E:\FYP Work\FYP_code\backend\mask2former-ade-(splicing1_2).pth", map_location=device))
|
36 |
-
model.load_state_dict(torch.load(r"mask2former-ade-(splicing1_2).pth", map_location=device))
|
37 |
-
model = model.to(device)
|
38 |
-
model.eval()
|
39 |
-
|
40 |
-
# ========== Flask routes ==========
|
41 |
-
|
42 |
-
@app.route('/')
|
43 |
-
def home():
|
44 |
-
return "Backend is running!"
|
45 |
-
|
46 |
-
@app.route('/predict', methods=['POST'])
|
47 |
-
def predict():
|
48 |
-
if 'image' not in request.files:
|
49 |
-
return jsonify({"error": "No image uploaded"}), 400
|
50 |
-
|
51 |
-
try:
|
52 |
-
file = request.files['image']
|
53 |
-
image = Image.open(io.BytesIO(file.read()))
|
54 |
-
|
55 |
-
# Convert to RGB if needed
|
56 |
-
if image.mode != 'RGB':
|
57 |
-
image = image.convert('RGB')
|
58 |
-
|
59 |
-
# Encode original image to base64
|
60 |
-
original_image_buffer = io.BytesIO()
|
61 |
-
image.save(original_image_buffer, format="PNG")
|
62 |
-
original_image_base64 = base64.b64encode(original_image_buffer.getvalue()).decode("utf-8")
|
63 |
-
|
64 |
-
# Process image using Mask2Former processor
|
65 |
-
inputs = processor(images=image, return_tensors="pt").to(device)
|
66 |
-
|
67 |
-
# Predict
|
68 |
-
with torch.no_grad():
|
69 |
-
outputs = model(**inputs)
|
70 |
-
|
71 |
-
# Process outputs
|
72 |
-
predicted_segmentation = processor.post_process_semantic_segmentation(
|
73 |
-
outputs, target_sizes=[image.size[::-1]]
|
74 |
-
)[0]
|
75 |
-
|
76 |
-
# Convert to numpy array for visualization
|
77 |
-
segmentation_mask = predicted_segmentation.cpu().numpy()
|
78 |
-
|
79 |
-
# ========== Create visualizations ==========
|
80 |
-
# Create side-by-side plot
|
81 |
-
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
|
82 |
-
axes[0].imshow(image)
|
83 |
-
axes[0].set_title("Input Image")
|
84 |
-
axes[1].imshow(segmentation_mask)
|
85 |
-
axes[1].set_title("Prediction")
|
86 |
-
|
87 |
-
for ax in axes:
|
88 |
-
ax.axis("off")
|
89 |
-
plt.tight_layout()
|
90 |
-
|
91 |
-
# Save visualization to buffer
|
92 |
-
buf = io.BytesIO()
|
93 |
-
plt.savefig(buf, format="png", bbox_inches='tight', pad_inches=0)
|
94 |
-
buf.seek(0)
|
95 |
-
visualization_base64 = base64.b64encode(buf.read()).decode('utf-8')
|
96 |
-
plt.close()
|
97 |
-
|
98 |
-
# ========== Encode mask separately ==========
|
99 |
-
# Normalize mask to 0-255 range
|
100 |
-
mask_normalized = (segmentation_mask - segmentation_mask.min()) * (255.0 / (segmentation_mask.max() - segmentation_mask.min()))
|
101 |
-
mask_image = Image.fromarray(mask_normalized.astype(np.uint8))
|
102 |
-
|
103 |
-
mask_buffer = io.BytesIO()
|
104 |
-
mask_image.save(mask_buffer, format="PNG")
|
105 |
-
mask_base64 = base64.b64encode(mask_buffer.getvalue()).decode("utf-8")
|
106 |
-
|
107 |
-
|
108 |
-
#VLM code
|
109 |
-
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash")
|
110 |
-
|
111 |
-
# Create multimodal message
|
112 |
-
message = HumanMessage(
|
113 |
-
content=[
|
114 |
-
{
|
115 |
-
"type": "text",
|
116 |
-
#"text": "Please explain briefly where the manipulation has been occured, don't use mask"
|
117 |
-
"text": " This is an image and its predicted binary mask showing manipulated regions in white. "
|
118 |
-
"Please explain briefly in 2-3 lines where the manipulation occurred and what might have been altered."
|
119 |
-
},
|
120 |
-
{
|
121 |
-
"type": "image_url",
|
122 |
-
"image_url": {
|
123 |
-
"url": f"data:image/jpeg;base64,{original_image_base64}"
|
124 |
-
},
|
125 |
-
},
|
126 |
-
{
|
127 |
-
"type": "image_url",
|
128 |
-
"image_url": {
|
129 |
-
"url": f"data:image/png;base64,{mask_base64}"
|
130 |
-
},
|
131 |
-
},
|
132 |
-
]
|
133 |
-
)
|
134 |
-
|
135 |
-
# Get response
|
136 |
-
response = llm.invoke([message])
|
137 |
-
print(response.content)
|
138 |
-
|
139 |
-
return jsonify({
|
140 |
-
"original_image": original_image_base64,
|
141 |
-
"mask": mask_base64,
|
142 |
-
"visualization": visualization_base64,
|
143 |
-
"message": response.content
|
144 |
-
})
|
145 |
-
|
146 |
-
except Exception as e:
|
147 |
-
return jsonify({"error": str(e)}), 500
|
148 |
-
|
149 |
-
import json
|
150 |
-
from threading import Lock
|
151 |
-
|
152 |
-
counter_file = "counter.json"
|
153 |
-
counter_lock = Lock()
|
154 |
-
|
155 |
-
def get_case_id():
|
156 |
-
today = datetime.now().strftime('%Y%m%d')
|
157 |
-
|
158 |
-
with counter_lock:
|
159 |
-
if os.path.exists(counter_file):
|
160 |
-
with open(counter_file, "r") as f:
|
161 |
-
data = json.load(f)
|
162 |
-
else:
|
163 |
-
data = {}
|
164 |
-
|
165 |
-
count = data.get(today, 0) + 1
|
166 |
-
data[today] = count
|
167 |
-
|
168 |
-
with open(counter_file, "w") as f:
|
169 |
-
json.dump(data, f)
|
170 |
-
|
171 |
-
return f"DFD-{today}-{count:03d}"
|
172 |
-
|
173 |
-
|
174 |
-
@app.route('/download-report', methods=['POST'])
|
175 |
-
def download_report():
|
176 |
-
try:
|
177 |
-
file = request.files['image']
|
178 |
-
image = Image.open(io.BytesIO(file.read())).convert("RGB")
|
179 |
-
|
180 |
-
# === Process Image ===
|
181 |
-
inputs = processor(images=image, return_tensors="pt").to(device)
|
182 |
-
with torch.no_grad():
|
183 |
-
outputs = model(**inputs)
|
184 |
-
predicted_segmentation = processor.post_process_semantic_segmentation(
|
185 |
-
outputs, target_sizes=[image.size[::-1]]
|
186 |
-
)[0]
|
187 |
-
segmentation_mask = predicted_segmentation.cpu().numpy()
|
188 |
-
|
189 |
-
# === Create Mask Image ===
|
190 |
-
mask_normalized = (segmentation_mask - segmentation_mask.min()) * (255.0 / (segmentation_mask.max() - segmentation_mask.min()))
|
191 |
-
mask_image = Image.fromarray(mask_normalized.astype(np.uint8)).convert("L")
|
192 |
-
|
193 |
-
# === Prepare Images ===
|
194 |
-
image.save("temp_input.png")
|
195 |
-
mask_image.save("temp_mask.png")
|
196 |
-
|
197 |
-
# === Get LLM Analysis ===
|
198 |
-
# Encode images for LLM
|
199 |
-
original_buffer = io.BytesIO()
|
200 |
-
image.save(original_buffer, format="PNG")
|
201 |
-
original_base64 = base64.b64encode(original_buffer.getvalue()).decode("utf-8")
|
202 |
-
|
203 |
-
mask_buffer = io.BytesIO()
|
204 |
-
mask_image.save(mask_buffer, format="PNG")
|
205 |
-
mask_base64 = base64.b64encode(mask_buffer.getvalue()).decode("utf-8")
|
206 |
-
|
207 |
-
# Get professional analysis from Gemini
|
208 |
-
llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash")
|
209 |
-
message = HumanMessage(
|
210 |
-
content=[
|
211 |
-
{
|
212 |
-
"type": "text",
|
213 |
-
"text": " This is an image and its predicted binary mask showing manipulated regions in white. "
|
214 |
-
"Please explain briefly where the manipulation occurred and what might have been altered."
|
215 |
-
},
|
216 |
-
{
|
217 |
-
"type": "image_url",
|
218 |
-
"image_url": {"url": f"data:image/jpeg;base64,{original_base64}"},
|
219 |
-
},
|
220 |
-
{
|
221 |
-
"type": "image_url",
|
222 |
-
"image_url": {"url": f"data:image/png;base64,{mask_base64}"},
|
223 |
-
},
|
224 |
-
]
|
225 |
-
)
|
226 |
-
llm_response = llm.invoke([message]).content
|
227 |
-
|
228 |
-
# === Generate PDF Report ===
|
229 |
-
buffer = io.BytesIO()
|
230 |
-
c = canvas.Canvas(buffer, pagesize=A4)
|
231 |
-
width, height = A4
|
232 |
-
|
233 |
-
# === Professional Report Design ===
|
234 |
-
# Light blue background
|
235 |
-
c.setFillColorRGB(0.96, 0.96, 1)
|
236 |
-
c.rect(0, 0, width, height, fill=1, stroke=0)
|
237 |
-
|
238 |
-
# Dark blue header
|
239 |
-
c.setFillColorRGB(0, 0.2, 0.4)
|
240 |
-
c.rect(0, height-80, width, 80, fill=1, stroke=0)
|
241 |
-
|
242 |
-
# Title
|
243 |
-
c.setFillColorRGB(1, 1, 1)
|
244 |
-
c.setFont("Helvetica-Bold", 18)
|
245 |
-
c.drawCentredString(width/2, height-50, "DIGITAL IMAGE AUTHENTICITY REPORT")
|
246 |
-
c.setFont("Helvetica", 10)
|
247 |
-
c.drawCentredString(width/2, height-70, "Forensic Analysis Report")
|
248 |
-
|
249 |
-
# Metadata
|
250 |
-
c.setFillColorRGB(0, 0, 0)
|
251 |
-
c.setFont("Helvetica", 9)
|
252 |
-
c.drawString(40, height-100, f"Report Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
253 |
-
case_id = get_case_id()
|
254 |
-
c.drawString(width-200, height-100, f"Case ID: {case_id}")
|
255 |
-
|
256 |
-
# Divider
|
257 |
-
c.setStrokeColorRGB(0, 0.4, 0.6)
|
258 |
-
c.setLineWidth(1)
|
259 |
-
c.line(40, height-110, width-40, height-110)
|
260 |
-
|
261 |
-
# === Analysis Summary ===
|
262 |
-
c.setFillColorRGB(0, 0.3, 0.6)
|
263 |
-
c.setFont("Helvetica-Bold", 12)
|
264 |
-
c.drawString(40, height-140, "EXECUTIVE SUMMARY")
|
265 |
-
|
266 |
-
c.setFillColorRGB(0, 0, 0)
|
267 |
-
c.setFont("Helvetica", 10)
|
268 |
-
summary_text = [
|
269 |
-
"This report presents forensic analysis of potential digital manipulations",
|
270 |
-
"using state-of-the-art AI detection models. Key findings are summarized below."
|
271 |
-
]
|
272 |
-
text_object = c.beginText(40, height-160)
|
273 |
-
text_object.setFont("Helvetica", 10)
|
274 |
-
text_object.setLeading(14)
|
275 |
-
for line in summary_text:
|
276 |
-
text_object.textLine(line)
|
277 |
-
c.drawText(text_object)
|
278 |
-
|
279 |
-
# === Image Evidence ===
|
280 |
-
img_y = height-420
|
281 |
-
img_width = 220
|
282 |
-
img_height = 220
|
283 |
-
|
284 |
-
# Original Image
|
285 |
-
c.drawImage("temp_input.png", 40, img_y, width=img_width, height=img_height)
|
286 |
-
c.setFillColorRGB(0, 0.3, 0.6)
|
287 |
-
c.setFont("Helvetica-Bold", 10)
|
288 |
-
c.drawString(40, img_y-20, "ORIGINAL IMAGE")
|
289 |
-
|
290 |
-
# Detection Result
|
291 |
-
c.drawImage("temp_mask.png", width-260, img_y, width=img_width, height=img_height)
|
292 |
-
c.drawString(width-260, img_y-20, "DETECTION HEATMAP")
|
293 |
-
|
294 |
-
# === AI Analysis Section ===
|
295 |
-
c.setFillColorRGB(0, 0.3, 0.6)
|
296 |
-
c.setFont("Helvetica-Bold", 12)
|
297 |
-
c.drawString(40, img_y-50, "AI FORENSIC ANALYSIS")
|
298 |
-
|
299 |
-
# Format LLM response with proper line breaks
|
300 |
-
from textwrap import wrap
|
301 |
-
analysis_lines = []
|
302 |
-
for paragraph in llm_response.split('\n'):
|
303 |
-
analysis_lines.extend(wrap(paragraph, width=90))
|
304 |
-
|
305 |
-
text_object = c.beginText(40, img_y-70)
|
306 |
-
text_object.setFont("Helvetica", 10)
|
307 |
-
text_object.setLeading(14)
|
308 |
-
|
309 |
-
# Show first 10 lines (adjust based on space)
|
310 |
-
for line in analysis_lines[:10]:
|
311 |
-
text_object.textLine(line)
|
312 |
-
|
313 |
-
if len(analysis_lines) > 10:
|
314 |
-
text_object.textLine("\n[Full analysis available in digital report]")
|
315 |
-
|
316 |
-
c.drawText(text_object)
|
317 |
-
|
318 |
-
# === Technical Details ===
|
319 |
-
c.setFillColorRGB(0, 0.3, 0.6)
|
320 |
-
c.setFont("Helvetica-Bold", 12)
|
321 |
-
c.drawString(40, img_y-180, "TECHNICAL SPECIFICATIONS")
|
322 |
-
|
323 |
-
c.setFillColorRGB(0, 0, 0)
|
324 |
-
c.setFont("Helvetica", 10)
|
325 |
-
tech_details = [
|
326 |
-
f"Analysis Model: Mask2Former-Swin (ADE20K Fine-tuned)",
|
327 |
-
#f"Detection Threshold: {segmentation_mask.max():.2f} confidence",
|
328 |
-
f"Processing Date: {datetime.now().strftime('%Y-%m-%d')}",
|
329 |
-
"Report Version: 1.1"
|
330 |
-
]
|
331 |
-
text_object = c.beginText(40, img_y-200)
|
332 |
-
text_object.setFont("Helvetica", 10)
|
333 |
-
text_object.setLeading(14)
|
334 |
-
for line in tech_details:
|
335 |
-
text_object.textLine(line)
|
336 |
-
c.drawText(text_object)
|
337 |
-
|
338 |
-
# === Footer ===
|
339 |
-
c.setFillColorRGB(0, 0.2, 0.4)
|
340 |
-
c.rect(0, 40, width, 40, fill=1, stroke=0)
|
341 |
-
c.setFillColorRGB(1, 1, 1)
|
342 |
-
c.setFont("Helvetica", 8)
|
343 |
-
c.drawCentredString(width/2, 65, "This report was generated by AI forensic tools and should be verified by human experts")
|
344 |
-
c.drawCentredString(width/2, 55, "Sukkur IBA University | Digital Forensics Lab | © 2024 Deepfake Research Project")
|
345 |
-
|
346 |
-
c.save()
|
347 |
-
buffer.seek(0)
|
348 |
-
|
349 |
-
# Cleanup
|
350 |
-
os.remove("temp_input.png")
|
351 |
-
os.remove("temp_mask.png")
|
352 |
-
|
353 |
-
return send_file(
|
354 |
-
buffer,
|
355 |
-
mimetype='application/pdf',
|
356 |
-
as_attachment=True,
|
357 |
-
download_name=f"forensic_report_{datetime.now().strftime('%Y%m%d_%H%M')}.pdf"
|
358 |
-
)
|
359 |
-
|
360 |
-
except Exception as e:
|
361 |
-
return jsonify({"error": str(e)}), 500
|
362 |
-
|
363 |
-
if __name__ == '__main__':
|
364 |
-
app.run(host='0.0.0.0', port=
|
|
|
1 |
+
from PIL import Image
|
2 |
+
import numpy as np
|
3 |
+
import base64
|
4 |
+
from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
|
5 |
+
from flask import Flask, request, jsonify
|
6 |
+
from flask_cors import CORS
|
7 |
+
import matplotlib
|
8 |
+
matplotlib.use('Agg')
|
9 |
+
import matplotlib.pyplot as plt
|
10 |
+
import google.generativeai as genai
|
11 |
+
from langchain_core.messages import HumanMessage
|
12 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
13 |
+
from reportlab.lib.utils import ImageReader
|
14 |
+
from flask import send_file, jsonify, request
|
15 |
+
from reportlab.pdfgen import canvas
|
16 |
+
from reportlab.lib.pagesizes import A4
|
17 |
+
from reportlab.lib.units import inch
|
18 |
+
import io, torch, os
|
19 |
+
from reportlab.lib import colors
|
20 |
+
from datetime import datetime
|
21 |
+
|
22 |
+
os.environ['GOOGLE_API_KEY'] = "AIzaSyCv2dNQMCD3-9s3E5Th7bDy4ko0dyucRCc"
|
23 |
+
genai.configure(api_key=os.environ['GOOGLE_API_KEY'])
|
24 |
+
|
25 |
+
# Setup
|
26 |
+
app = Flask(__name__)
|
27 |
+
CORS(app)
|
28 |
+
|
29 |
+
# Initialize device
|
30 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
31 |
+
|
32 |
+
# Load model and processor
|
33 |
+
processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-tiny-ade-semantic")
|
34 |
+
model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-tiny-ade-semantic")
|
35 |
+
# model.load_state_dict(torch.load(r"E:\FYP Work\FYP_code\backend\mask2former-ade-(splicing1_2).pth", map_location=device))
|
36 |
+
model.load_state_dict(torch.load(r"mask2former-ade-(splicing1_2).pth", map_location=device))
|
37 |
+
model = model.to(device)
|
38 |
+
model.eval()
|
39 |
+
|
40 |
+
# ========== Flask routes ==========
|
41 |
+
|
42 |
+
@app.route('/')
|
43 |
+
def home():
|
44 |
+
return "Backend is running!"
|
45 |
+
|
46 |
+
@app.route('/predict', methods=['POST'])
|
47 |
+
def predict():
|
48 |
+
if 'image' not in request.files:
|
49 |
+
return jsonify({"error": "No image uploaded"}), 400
|
50 |
+
|
51 |
+
try:
|
52 |
+
file = request.files['image']
|
53 |
+
image = Image.open(io.BytesIO(file.read()))
|
54 |
+
|
55 |
+
# Convert to RGB if needed
|
56 |
+
if image.mode != 'RGB':
|
57 |
+
image = image.convert('RGB')
|
58 |
+
|
59 |
+
# Encode original image to base64
|
60 |
+
original_image_buffer = io.BytesIO()
|
61 |
+
image.save(original_image_buffer, format="PNG")
|
62 |
+
original_image_base64 = base64.b64encode(original_image_buffer.getvalue()).decode("utf-8")
|
63 |
+
|
64 |
+
# Process image using Mask2Former processor
|
65 |
+
inputs = processor(images=image, return_tensors="pt").to(device)
|
66 |
+
|
67 |
+
# Predict
|
68 |
+
with torch.no_grad():
|
69 |
+
outputs = model(**inputs)
|
70 |
+
|
71 |
+
# Process outputs
|
72 |
+
predicted_segmentation = processor.post_process_semantic_segmentation(
|
73 |
+
outputs, target_sizes=[image.size[::-1]]
|
74 |
+
)[0]
|
75 |
+
|
76 |
+
# Convert to numpy array for visualization
|
77 |
+
segmentation_mask = predicted_segmentation.cpu().numpy()
|
78 |
+
|
79 |
+
# ========== Create visualizations ==========
|
80 |
+
# Create side-by-side plot
|
81 |
+
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
|
82 |
+
axes[0].imshow(image)
|
83 |
+
axes[0].set_title("Input Image")
|
84 |
+
axes[1].imshow(segmentation_mask)
|
85 |
+
axes[1].set_title("Prediction")
|
86 |
+
|
87 |
+
for ax in axes:
|
88 |
+
ax.axis("off")
|
89 |
+
plt.tight_layout()
|
90 |
+
|
91 |
+
# Save visualization to buffer
|
92 |
+
buf = io.BytesIO()
|
93 |
+
plt.savefig(buf, format="png", bbox_inches='tight', pad_inches=0)
|
94 |
+
buf.seek(0)
|
95 |
+
visualization_base64 = base64.b64encode(buf.read()).decode('utf-8')
|
96 |
+
plt.close()
|
97 |
+
|
98 |
+
# ========== Encode mask separately ==========
|
99 |
+
# Normalize mask to 0-255 range
|
100 |
+
mask_normalized = (segmentation_mask - segmentation_mask.min()) * (255.0 / (segmentation_mask.max() - segmentation_mask.min()))
|
101 |
+
mask_image = Image.fromarray(mask_normalized.astype(np.uint8))
|
102 |
+
|
103 |
+
mask_buffer = io.BytesIO()
|
104 |
+
mask_image.save(mask_buffer, format="PNG")
|
105 |
+
mask_base64 = base64.b64encode(mask_buffer.getvalue()).decode("utf-8")
|
106 |
+
|
107 |
+
|
108 |
+
#VLM code
|
109 |
+
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash")
|
110 |
+
|
111 |
+
# Create multimodal message
|
112 |
+
message = HumanMessage(
|
113 |
+
content=[
|
114 |
+
{
|
115 |
+
"type": "text",
|
116 |
+
#"text": "Please explain briefly where the manipulation has been occured, don't use mask"
|
117 |
+
"text": " This is an image and its predicted binary mask showing manipulated regions in white. "
|
118 |
+
"Please explain briefly in 2-3 lines where the manipulation occurred and what might have been altered."
|
119 |
+
},
|
120 |
+
{
|
121 |
+
"type": "image_url",
|
122 |
+
"image_url": {
|
123 |
+
"url": f"data:image/jpeg;base64,{original_image_base64}"
|
124 |
+
},
|
125 |
+
},
|
126 |
+
{
|
127 |
+
"type": "image_url",
|
128 |
+
"image_url": {
|
129 |
+
"url": f"data:image/png;base64,{mask_base64}"
|
130 |
+
},
|
131 |
+
},
|
132 |
+
]
|
133 |
+
)
|
134 |
+
|
135 |
+
# Get response
|
136 |
+
response = llm.invoke([message])
|
137 |
+
print(response.content)
|
138 |
+
|
139 |
+
return jsonify({
|
140 |
+
"original_image": original_image_base64,
|
141 |
+
"mask": mask_base64,
|
142 |
+
"visualization": visualization_base64,
|
143 |
+
"message": response.content
|
144 |
+
})
|
145 |
+
|
146 |
+
except Exception as e:
|
147 |
+
return jsonify({"error": str(e)}), 500
|
148 |
+
|
149 |
+
import json
|
150 |
+
from threading import Lock
|
151 |
+
|
152 |
+
counter_file = "counter.json"
|
153 |
+
counter_lock = Lock()
|
154 |
+
|
155 |
+
def get_case_id():
|
156 |
+
today = datetime.now().strftime('%Y%m%d')
|
157 |
+
|
158 |
+
with counter_lock:
|
159 |
+
if os.path.exists(counter_file):
|
160 |
+
with open(counter_file, "r") as f:
|
161 |
+
data = json.load(f)
|
162 |
+
else:
|
163 |
+
data = {}
|
164 |
+
|
165 |
+
count = data.get(today, 0) + 1
|
166 |
+
data[today] = count
|
167 |
+
|
168 |
+
with open(counter_file, "w") as f:
|
169 |
+
json.dump(data, f)
|
170 |
+
|
171 |
+
return f"DFD-{today}-{count:03d}"
|
172 |
+
|
173 |
+
|
174 |
+
@app.route('/download-report', methods=['POST'])
|
175 |
+
def download_report():
|
176 |
+
try:
|
177 |
+
file = request.files['image']
|
178 |
+
image = Image.open(io.BytesIO(file.read())).convert("RGB")
|
179 |
+
|
180 |
+
# === Process Image ===
|
181 |
+
inputs = processor(images=image, return_tensors="pt").to(device)
|
182 |
+
with torch.no_grad():
|
183 |
+
outputs = model(**inputs)
|
184 |
+
predicted_segmentation = processor.post_process_semantic_segmentation(
|
185 |
+
outputs, target_sizes=[image.size[::-1]]
|
186 |
+
)[0]
|
187 |
+
segmentation_mask = predicted_segmentation.cpu().numpy()
|
188 |
+
|
189 |
+
# === Create Mask Image ===
|
190 |
+
mask_normalized = (segmentation_mask - segmentation_mask.min()) * (255.0 / (segmentation_mask.max() - segmentation_mask.min()))
|
191 |
+
mask_image = Image.fromarray(mask_normalized.astype(np.uint8)).convert("L")
|
192 |
+
|
193 |
+
# === Prepare Images ===
|
194 |
+
image.save("temp_input.png")
|
195 |
+
mask_image.save("temp_mask.png")
|
196 |
+
|
197 |
+
# === Get LLM Analysis ===
|
198 |
+
# Encode images for LLM
|
199 |
+
original_buffer = io.BytesIO()
|
200 |
+
image.save(original_buffer, format="PNG")
|
201 |
+
original_base64 = base64.b64encode(original_buffer.getvalue()).decode("utf-8")
|
202 |
+
|
203 |
+
mask_buffer = io.BytesIO()
|
204 |
+
mask_image.save(mask_buffer, format="PNG")
|
205 |
+
mask_base64 = base64.b64encode(mask_buffer.getvalue()).decode("utf-8")
|
206 |
+
|
207 |
+
# Get professional analysis from Gemini
|
208 |
+
llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash")
|
209 |
+
message = HumanMessage(
|
210 |
+
content=[
|
211 |
+
{
|
212 |
+
"type": "text",
|
213 |
+
"text": " This is an image and its predicted binary mask showing manipulated regions in white. "
|
214 |
+
"Please explain briefly where the manipulation occurred and what might have been altered."
|
215 |
+
},
|
216 |
+
{
|
217 |
+
"type": "image_url",
|
218 |
+
"image_url": {"url": f"data:image/jpeg;base64,{original_base64}"},
|
219 |
+
},
|
220 |
+
{
|
221 |
+
"type": "image_url",
|
222 |
+
"image_url": {"url": f"data:image/png;base64,{mask_base64}"},
|
223 |
+
},
|
224 |
+
]
|
225 |
+
)
|
226 |
+
llm_response = llm.invoke([message]).content
|
227 |
+
|
228 |
+
# === Generate PDF Report ===
|
229 |
+
buffer = io.BytesIO()
|
230 |
+
c = canvas.Canvas(buffer, pagesize=A4)
|
231 |
+
width, height = A4
|
232 |
+
|
233 |
+
# === Professional Report Design ===
|
234 |
+
# Light blue background
|
235 |
+
c.setFillColorRGB(0.96, 0.96, 1)
|
236 |
+
c.rect(0, 0, width, height, fill=1, stroke=0)
|
237 |
+
|
238 |
+
# Dark blue header
|
239 |
+
c.setFillColorRGB(0, 0.2, 0.4)
|
240 |
+
c.rect(0, height-80, width, 80, fill=1, stroke=0)
|
241 |
+
|
242 |
+
# Title
|
243 |
+
c.setFillColorRGB(1, 1, 1)
|
244 |
+
c.setFont("Helvetica-Bold", 18)
|
245 |
+
c.drawCentredString(width/2, height-50, "DIGITAL IMAGE AUTHENTICITY REPORT")
|
246 |
+
c.setFont("Helvetica", 10)
|
247 |
+
c.drawCentredString(width/2, height-70, "Forensic Analysis Report")
|
248 |
+
|
249 |
+
# Metadata
|
250 |
+
c.setFillColorRGB(0, 0, 0)
|
251 |
+
c.setFont("Helvetica", 9)
|
252 |
+
c.drawString(40, height-100, f"Report Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
253 |
+
case_id = get_case_id()
|
254 |
+
c.drawString(width-200, height-100, f"Case ID: {case_id}")
|
255 |
+
|
256 |
+
# Divider
|
257 |
+
c.setStrokeColorRGB(0, 0.4, 0.6)
|
258 |
+
c.setLineWidth(1)
|
259 |
+
c.line(40, height-110, width-40, height-110)
|
260 |
+
|
261 |
+
# === Analysis Summary ===
|
262 |
+
c.setFillColorRGB(0, 0.3, 0.6)
|
263 |
+
c.setFont("Helvetica-Bold", 12)
|
264 |
+
c.drawString(40, height-140, "EXECUTIVE SUMMARY")
|
265 |
+
|
266 |
+
c.setFillColorRGB(0, 0, 0)
|
267 |
+
c.setFont("Helvetica", 10)
|
268 |
+
summary_text = [
|
269 |
+
"This report presents forensic analysis of potential digital manipulations",
|
270 |
+
"using state-of-the-art AI detection models. Key findings are summarized below."
|
271 |
+
]
|
272 |
+
text_object = c.beginText(40, height-160)
|
273 |
+
text_object.setFont("Helvetica", 10)
|
274 |
+
text_object.setLeading(14)
|
275 |
+
for line in summary_text:
|
276 |
+
text_object.textLine(line)
|
277 |
+
c.drawText(text_object)
|
278 |
+
|
279 |
+
# === Image Evidence ===
|
280 |
+
img_y = height-420
|
281 |
+
img_width = 220
|
282 |
+
img_height = 220
|
283 |
+
|
284 |
+
# Original Image
|
285 |
+
c.drawImage("temp_input.png", 40, img_y, width=img_width, height=img_height)
|
286 |
+
c.setFillColorRGB(0, 0.3, 0.6)
|
287 |
+
c.setFont("Helvetica-Bold", 10)
|
288 |
+
c.drawString(40, img_y-20, "ORIGINAL IMAGE")
|
289 |
+
|
290 |
+
# Detection Result
|
291 |
+
c.drawImage("temp_mask.png", width-260, img_y, width=img_width, height=img_height)
|
292 |
+
c.drawString(width-260, img_y-20, "DETECTION HEATMAP")
|
293 |
+
|
294 |
+
# === AI Analysis Section ===
|
295 |
+
c.setFillColorRGB(0, 0.3, 0.6)
|
296 |
+
c.setFont("Helvetica-Bold", 12)
|
297 |
+
c.drawString(40, img_y-50, "AI FORENSIC ANALYSIS")
|
298 |
+
|
299 |
+
# Format LLM response with proper line breaks
|
300 |
+
from textwrap import wrap
|
301 |
+
analysis_lines = []
|
302 |
+
for paragraph in llm_response.split('\n'):
|
303 |
+
analysis_lines.extend(wrap(paragraph, width=90))
|
304 |
+
|
305 |
+
text_object = c.beginText(40, img_y-70)
|
306 |
+
text_object.setFont("Helvetica", 10)
|
307 |
+
text_object.setLeading(14)
|
308 |
+
|
309 |
+
# Show first 10 lines (adjust based on space)
|
310 |
+
for line in analysis_lines[:10]:
|
311 |
+
text_object.textLine(line)
|
312 |
+
|
313 |
+
if len(analysis_lines) > 10:
|
314 |
+
text_object.textLine("\n[Full analysis available in digital report]")
|
315 |
+
|
316 |
+
c.drawText(text_object)
|
317 |
+
|
318 |
+
# === Technical Details ===
|
319 |
+
c.setFillColorRGB(0, 0.3, 0.6)
|
320 |
+
c.setFont("Helvetica-Bold", 12)
|
321 |
+
c.drawString(40, img_y-180, "TECHNICAL SPECIFICATIONS")
|
322 |
+
|
323 |
+
c.setFillColorRGB(0, 0, 0)
|
324 |
+
c.setFont("Helvetica", 10)
|
325 |
+
tech_details = [
|
326 |
+
f"Analysis Model: Mask2Former-Swin (ADE20K Fine-tuned)",
|
327 |
+
#f"Detection Threshold: {segmentation_mask.max():.2f} confidence",
|
328 |
+
f"Processing Date: {datetime.now().strftime('%Y-%m-%d')}",
|
329 |
+
"Report Version: 1.1"
|
330 |
+
]
|
331 |
+
text_object = c.beginText(40, img_y-200)
|
332 |
+
text_object.setFont("Helvetica", 10)
|
333 |
+
text_object.setLeading(14)
|
334 |
+
for line in tech_details:
|
335 |
+
text_object.textLine(line)
|
336 |
+
c.drawText(text_object)
|
337 |
+
|
338 |
+
# === Footer ===
|
339 |
+
c.setFillColorRGB(0, 0.2, 0.4)
|
340 |
+
c.rect(0, 40, width, 40, fill=1, stroke=0)
|
341 |
+
c.setFillColorRGB(1, 1, 1)
|
342 |
+
c.setFont("Helvetica", 8)
|
343 |
+
c.drawCentredString(width/2, 65, "This report was generated by AI forensic tools and should be verified by human experts")
|
344 |
+
c.drawCentredString(width/2, 55, "Sukkur IBA University | Digital Forensics Lab | © 2024 Deepfake Research Project")
|
345 |
+
|
346 |
+
c.save()
|
347 |
+
buffer.seek(0)
|
348 |
+
|
349 |
+
# Cleanup
|
350 |
+
os.remove("temp_input.png")
|
351 |
+
os.remove("temp_mask.png")
|
352 |
+
|
353 |
+
return send_file(
|
354 |
+
buffer,
|
355 |
+
mimetype='application/pdf',
|
356 |
+
as_attachment=True,
|
357 |
+
download_name=f"forensic_report_{datetime.now().strftime('%Y%m%d_%H%M')}.pdf"
|
358 |
+
)
|
359 |
+
|
360 |
+
except Exception as e:
|
361 |
+
return jsonify({"error": str(e)}), 500
|
362 |
+
|
363 |
+
if __name__ == '__main__':
|
364 |
+
app.run(host='0.0.0.0', port=7860, debug=False)
|