Spaces:
Sleeping
Sleeping
Update saliency_gradio.py
Browse files- saliency_gradio.py +19 -22
saliency_gradio.py
CHANGED
@@ -1,19 +1,12 @@
|
|
1 |
-
# prompt: save the gradio app into a file
|
2 |
-
|
3 |
import matplotlib.pyplot as plt
|
4 |
import numpy as np
|
5 |
import tensorflow as tf
|
6 |
from huggingface_hub import snapshot_download, from_pretrained_keras
|
7 |
import gradio as gr
|
8 |
|
9 |
-
# Note: 'keras<3.x' or 'tf_keras' must be installed (legacy)
|
10 |
-
# See https://github.com/keras-team/tf-keras for more details.
|
11 |
-
|
12 |
model = from_pretrained_keras("alexanderkroner/MSI-Net")
|
13 |
-
|
14 |
hf_dir = snapshot_download(repo_id="alexanderkroner/MSI-Net")
|
15 |
|
16 |
-
|
17 |
def get_target_shape(original_shape):
|
18 |
original_aspect_ratio = original_shape[0] / original_shape[1]
|
19 |
|
@@ -32,7 +25,6 @@ def get_target_shape(original_shape):
|
|
32 |
|
33 |
return target_shape
|
34 |
|
35 |
-
|
36 |
def preprocess_input(input_image, target_shape):
|
37 |
input_tensor = tf.expand_dims(input_image, axis=0)
|
38 |
|
@@ -65,10 +57,7 @@ def preprocess_input(input_image, target_shape):
|
|
65 |
[horizontal_padding_1, horizontal_padding_2],
|
66 |
)
|
67 |
|
68 |
-
|
69 |
-
def postprocess_output(
|
70 |
-
output_tensor, vertical_padding, horizontal_padding, original_shape
|
71 |
-
):
|
72 |
output_tensor = output_tensor[
|
73 |
:,
|
74 |
vertical_padding[0] : output_tensor.shape[1] - vertical_padding[1],
|
@@ -79,10 +68,7 @@ def postprocess_output(
|
|
79 |
output_tensor = tf.image.resize(output_tensor, original_shape)
|
80 |
|
81 |
output_array = output_tensor.numpy().squeeze()
|
82 |
-
output_array
|
83 |
-
|
84 |
-
return output_array
|
85 |
-
|
86 |
|
87 |
def predict_saliency(image):
|
88 |
input_image = np.array(image, dtype=np.float32)
|
@@ -90,18 +76,29 @@ def predict_saliency(image):
|
|
90 |
target_shape = get_target_shape(original_shape)
|
91 |
input_tensor, vertical_padding, horizontal_padding = preprocess_input(input_image, target_shape)
|
92 |
output_tensor = model(input_tensor)["output"]
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
alpha = 0.9
|
95 |
-
blended_image = alpha *
|
96 |
-
return blended_image
|
97 |
|
|
|
98 |
|
99 |
iface = gr.Interface(
|
100 |
fn=predict_saliency,
|
101 |
inputs=gr.Image(type="pil"),
|
102 |
-
outputs=
|
|
|
|
|
|
|
103 |
title="MSI-Net Saliency Map",
|
104 |
-
description="Upload an image to generate its saliency map.",
|
105 |
)
|
106 |
|
107 |
-
iface.launch(share=True)
|
|
|
|
|
|
|
1 |
import matplotlib.pyplot as plt
|
2 |
import numpy as np
|
3 |
import tensorflow as tf
|
4 |
from huggingface_hub import snapshot_download, from_pretrained_keras
|
5 |
import gradio as gr
|
6 |
|
|
|
|
|
|
|
7 |
model = from_pretrained_keras("alexanderkroner/MSI-Net")
|
|
|
8 |
hf_dir = snapshot_download(repo_id="alexanderkroner/MSI-Net")
|
9 |
|
|
|
10 |
def get_target_shape(original_shape):
|
11 |
original_aspect_ratio = original_shape[0] / original_shape[1]
|
12 |
|
|
|
25 |
|
26 |
return target_shape
|
27 |
|
|
|
28 |
def preprocess_input(input_image, target_shape):
|
29 |
input_tensor = tf.expand_dims(input_image, axis=0)
|
30 |
|
|
|
57 |
[horizontal_padding_1, horizontal_padding_2],
|
58 |
)
|
59 |
|
60 |
+
def postprocess_output(output_tensor, vertical_padding, horizontal_padding, original_shape):
|
|
|
|
|
|
|
61 |
output_tensor = output_tensor[
|
62 |
:,
|
63 |
vertical_padding[0] : output_tensor.shape[1] - vertical_padding[1],
|
|
|
68 |
output_tensor = tf.image.resize(output_tensor, original_shape)
|
69 |
|
70 |
output_array = output_tensor.numpy().squeeze()
|
71 |
+
return output_array # Keep as grayscale
|
|
|
|
|
|
|
72 |
|
73 |
def predict_saliency(image):
|
74 |
input_image = np.array(image, dtype=np.float32)
|
|
|
76 |
target_shape = get_target_shape(original_shape)
|
77 |
input_tensor, vertical_padding, horizontal_padding = preprocess_input(input_image, target_shape)
|
78 |
output_tensor = model(input_tensor)["output"]
|
79 |
+
saliency_gray = postprocess_output(output_tensor, vertical_padding, horizontal_padding, original_shape)
|
80 |
+
|
81 |
+
# Compute the sum of grayscale values
|
82 |
+
total_saliency = np.sum(saliency_gray)
|
83 |
+
|
84 |
+
# Convert to colormap for visualization
|
85 |
+
saliency_map_rgb = plt.cm.inferno(saliency_gray)[..., :3]
|
86 |
+
|
87 |
+
# Blend with original image
|
88 |
alpha = 0.9
|
89 |
+
blended_image = alpha * saliency_map_rgb + (1 - alpha) * input_image / 255
|
|
|
90 |
|
91 |
+
return blended_image, f"Total grayscale saliency: {total_saliency:.2f}"
|
92 |
|
93 |
iface = gr.Interface(
|
94 |
fn=predict_saliency,
|
95 |
inputs=gr.Image(type="pil"),
|
96 |
+
outputs=[
|
97 |
+
gr.Image(type="numpy", label="Saliency Map"),
|
98 |
+
gr.Textbox(label="Grayscale Pixel Sum")
|
99 |
+
],
|
100 |
title="MSI-Net Saliency Map",
|
101 |
+
description="Upload an image to generate its saliency map and view the total intensity.",
|
102 |
)
|
103 |
|
104 |
+
iface.launch(share=True)
|