import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from huggingface_hub import snapshot_download, from_pretrained_keras import gradio as gr model = from_pretrained_keras("alexanderkroner/MSI-Net") hf_dir = snapshot_download(repo_id="alexanderkroner/MSI-Net") def get_target_shape(original_shape): original_aspect_ratio = original_shape[0] / original_shape[1] square_mode = abs(original_aspect_ratio - 1.0) landscape_mode = abs(original_aspect_ratio - 240 / 320) portrait_mode = abs(original_aspect_ratio - 320 / 240) best_mode = min(square_mode, landscape_mode, portrait_mode) if best_mode == square_mode: target_shape = (320, 320) elif best_mode == landscape_mode: target_shape = (240, 320) else: target_shape = (320, 240) return target_shape def preprocess_input(input_image, target_shape): input_tensor = tf.expand_dims(input_image, axis=0) input_tensor = tf.image.resize( input_tensor, target_shape, preserve_aspect_ratio=True ) vertical_padding = target_shape[0] - input_tensor.shape[1] horizontal_padding = target_shape[1] - input_tensor.shape[2] vertical_padding_1 = vertical_padding // 2 vertical_padding_2 = vertical_padding - vertical_padding_1 horizontal_padding_1 = horizontal_padding // 2 horizontal_padding_2 = horizontal_padding - horizontal_padding_1 input_tensor = tf.pad( input_tensor, [ [0, 0], [vertical_padding_1, vertical_padding_2], [horizontal_padding_1, horizontal_padding_2], [0, 0], ], ) return ( input_tensor, [vertical_padding_1, vertical_padding_2], [horizontal_padding_1, horizontal_padding_2], ) def postprocess_output(output_tensor, vertical_padding, horizontal_padding, original_shape): output_tensor = output_tensor[ :, vertical_padding[0] : output_tensor.shape[1] - vertical_padding[1], horizontal_padding[0] : output_tensor.shape[2] - horizontal_padding[1], :, ] output_tensor = tf.image.resize(output_tensor, original_shape) output_array = output_tensor.numpy().squeeze() return output_array # Keep as grayscale def predict_saliency(image): input_image = np.array(image, dtype=np.float32) original_shape = input_image.shape[:2] target_shape = get_target_shape(original_shape) input_tensor, vertical_padding, horizontal_padding = preprocess_input(input_image, target_shape) output_tensor = model(input_tensor)["output"] saliency_gray = postprocess_output(output_tensor, vertical_padding, horizontal_padding, original_shape) # Compute the sum of grayscale values total_saliency = np.sum(saliency_gray) # Convert to colormap for visualization saliency_map_rgb = plt.cm.inferno(saliency_gray)[..., :3] # Blend with original image alpha = 0.9 blended_image = alpha * saliency_map_rgb + (1 - alpha) * input_image / 255 return blended_image, f"Total grayscale saliency: {total_saliency:.2f}" iface = gr.Interface( fn=predict_saliency, inputs=gr.Image(type="pil"), outputs=[ gr.Image(type="numpy", label="Saliency Map"), gr.Textbox(label="Grayscale Pixel Sum") ], title="MSI-Net Saliency Map", description="Upload an image to generate its saliency map and view the total intensity.", ) iface.launch(share=True)