Spaces:
Sleeping
Sleeping
Update saliency_gradio.py
Browse files- saliency_gradio.py +26 -24
saliency_gradio.py
CHANGED
@@ -4,6 +4,7 @@ import tensorflow as tf
|
|
4 |
from huggingface_hub import snapshot_download, from_pretrained_keras
|
5 |
import gradio as gr
|
6 |
|
|
|
7 |
model = from_pretrained_keras("alexanderkroner/MSI-Net")
|
8 |
hf_dir = snapshot_download(repo_id="alexanderkroner/MSI-Net")
|
9 |
|
@@ -17,13 +18,11 @@ def get_target_shape(original_shape):
|
|
17 |
best_mode = min(square_mode, landscape_mode, portrait_mode)
|
18 |
|
19 |
if best_mode == square_mode:
|
20 |
-
|
21 |
elif best_mode == landscape_mode:
|
22 |
-
|
23 |
else:
|
24 |
-
|
25 |
-
|
26 |
-
return target_shape
|
27 |
|
28 |
def preprocess_input(input_image, target_shape):
|
29 |
input_tensor = tf.expand_dims(input_image, axis=0)
|
@@ -37,7 +36,6 @@ def preprocess_input(input_image, target_shape):
|
|
37 |
|
38 |
vertical_padding_1 = vertical_padding // 2
|
39 |
vertical_padding_2 = vertical_padding - vertical_padding_1
|
40 |
-
|
41 |
horizontal_padding_1 = horizontal_padding // 2
|
42 |
horizontal_padding_2 = horizontal_padding - horizontal_padding_1
|
43 |
|
@@ -66,39 +64,43 @@ def postprocess_output(output_tensor, vertical_padding, horizontal_padding, orig
|
|
66 |
]
|
67 |
|
68 |
output_tensor = tf.image.resize(output_tensor, original_shape)
|
|
|
69 |
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
def predict_saliency(image):
|
74 |
-
input_image = np.array(image, dtype=np.float32)
|
75 |
original_shape = input_image.shape[:2]
|
76 |
target_shape = get_target_shape(original_shape)
|
|
|
77 |
input_tensor, vertical_padding, horizontal_padding = preprocess_input(input_image, target_shape)
|
78 |
output_tensor = model(input_tensor)["output"]
|
79 |
saliency_gray = postprocess_output(output_tensor, vertical_padding, horizontal_padding, original_shape)
|
80 |
-
|
81 |
-
# Compute the sum of grayscale values
|
82 |
total_saliency = np.sum(saliency_gray)
|
83 |
|
84 |
-
|
85 |
-
saliency_map_rgb = plt.cm.inferno(saliency_gray)[..., :3]
|
86 |
-
|
87 |
-
# Blend with original image
|
88 |
alpha = 0.9
|
89 |
-
blended_image = alpha *
|
90 |
|
91 |
return blended_image, f"Total grayscale saliency: {total_saliency:.2f}"
|
92 |
|
|
|
|
|
|
|
|
|
|
|
93 |
iface = gr.Interface(
|
94 |
-
fn=
|
95 |
-
inputs=
|
|
|
|
|
|
|
96 |
outputs=[
|
97 |
-
gr.Image(type="numpy", label="Saliency Map"),
|
98 |
-
gr.Textbox(label="Grayscale
|
|
|
|
|
99 |
],
|
100 |
-
title="MSI-Net Saliency
|
101 |
-
description="Upload
|
102 |
)
|
103 |
|
104 |
iface.launch(share=True)
|
|
|
4 |
from huggingface_hub import snapshot_download, from_pretrained_keras
|
5 |
import gradio as gr
|
6 |
|
7 |
+
# Load the model
|
8 |
model = from_pretrained_keras("alexanderkroner/MSI-Net")
|
9 |
hf_dir = snapshot_download(repo_id="alexanderkroner/MSI-Net")
|
10 |
|
|
|
18 |
best_mode = min(square_mode, landscape_mode, portrait_mode)
|
19 |
|
20 |
if best_mode == square_mode:
|
21 |
+
return (320, 320)
|
22 |
elif best_mode == landscape_mode:
|
23 |
+
return (240, 320)
|
24 |
else:
|
25 |
+
return (320, 240)
|
|
|
|
|
26 |
|
27 |
def preprocess_input(input_image, target_shape):
|
28 |
input_tensor = tf.expand_dims(input_image, axis=0)
|
|
|
36 |
|
37 |
vertical_padding_1 = vertical_padding // 2
|
38 |
vertical_padding_2 = vertical_padding - vertical_padding_1
|
|
|
39 |
horizontal_padding_1 = horizontal_padding // 2
|
40 |
horizontal_padding_2 = horizontal_padding - horizontal_padding_1
|
41 |
|
|
|
64 |
]
|
65 |
|
66 |
output_tensor = tf.image.resize(output_tensor, original_shape)
|
67 |
+
return output_tensor.numpy().squeeze() # Return grayscale map
|
68 |
|
69 |
+
def process_image(input_image):
|
70 |
+
input_image = np.array(input_image, dtype=np.float32)
|
|
|
|
|
|
|
71 |
original_shape = input_image.shape[:2]
|
72 |
target_shape = get_target_shape(original_shape)
|
73 |
+
|
74 |
input_tensor, vertical_padding, horizontal_padding = preprocess_input(input_image, target_shape)
|
75 |
output_tensor = model(input_tensor)["output"]
|
76 |
saliency_gray = postprocess_output(output_tensor, vertical_padding, horizontal_padding, original_shape)
|
|
|
|
|
77 |
total_saliency = np.sum(saliency_gray)
|
78 |
|
79 |
+
saliency_rgb = plt.cm.inferno(saliency_gray)[..., :3]
|
|
|
|
|
|
|
80 |
alpha = 0.9
|
81 |
+
blended_image = alpha * saliency_rgb + (1 - alpha) * input_image / 255
|
82 |
|
83 |
return blended_image, f"Total grayscale saliency: {total_saliency:.2f}"
|
84 |
|
85 |
+
def predict_two_images(image1, image2):
|
86 |
+
result1_img, result1_val = process_image(image1)
|
87 |
+
result2_img, result2_val = process_image(image2)
|
88 |
+
return result1_img, result1_val, result2_img, result2_val
|
89 |
+
|
90 |
iface = gr.Interface(
|
91 |
+
fn=predict_two_images,
|
92 |
+
inputs=[
|
93 |
+
gr.Image(type="pil", label="Input Image 1"),
|
94 |
+
gr.Image(type="pil", label="Input Image 2"),
|
95 |
+
],
|
96 |
outputs=[
|
97 |
+
gr.Image(type="numpy", label="Saliency Map 1"),
|
98 |
+
gr.Textbox(label="Grayscale Sum 1"),
|
99 |
+
gr.Image(type="numpy", label="Saliency Map 2"),
|
100 |
+
gr.Textbox(label="Grayscale Sum 2"),
|
101 |
],
|
102 |
+
title="MSI-Net Saliency Maps for Two Images",
|
103 |
+
description="Upload two images to compare their saliency maps and total saliency values.",
|
104 |
)
|
105 |
|
106 |
iface.launch(share=True)
|