CineMA / app.py
mathpluscode's picture
Add LAX panel
68ea82c
raw
history blame
12.1 kB
import numpy as np
import gradio as gr
from huggingface_hub import hf_hub_download
import SimpleITK as sitk # noqa: N813
import torch
from monai.transforms import Compose, ScaleIntensityd, SpatialPadd
from cinema import ConvUNetR
from pathlib import Path
from cinema.examples.inference.segmentation_sax import (
plot_segmentations as plot_segmentations_sax,
plot_volume_changes as plot_volume_changes_sax,
)
from cinema.examples.inference.segmentation_lax_4c import (
plot_segmentations as plot_segmentations_lax,
plot_volume_changes as plot_volume_changes_lax,
post_process as post_process_lax_segmentation,
)
from tqdm import tqdm
import spaces
import requests
# cache directories
cache_dir = Path("/tmp/.cinema")
cache_dir.mkdir(parents=True, exist_ok=True)
# set device and dtype
dtype, device = torch.float32, torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda")
if torch.cuda.is_bf16_supported():
dtype = torch.bfloat16
# Create the Gradio interface
theme = gr.themes.Ocean(
primary_hue="red",
secondary_hue="purple",
)
@spaces.GPU
def segmentation_sax_inference(
images: torch.Tensor,
view: str,
transform: Compose,
model: ConvUNetR,
progress=gr.Progress(),
) -> np.ndarray:
model.to(device)
n_slices, n_frames = images.shape[-2:]
labels_list = []
for t in tqdm(range(0, n_frames), total=n_frames):
progress((t + 1) / n_frames, desc=f"Processing frame {t + 1} / {n_frames}...")
batch = transform({view: torch.from_numpy(images[None, ..., t])})
batch = {
k: v[None, ...].to(device=device, dtype=torch.float32)
for k, v in batch.items()
}
with (
torch.no_grad(),
torch.autocast("cuda", dtype=dtype, enabled=torch.cuda.is_available()),
):
logits = model(batch)[view]
labels_list.append(torch.argmax(logits, dim=1)[0, ..., :n_slices])
labels = torch.stack(labels_list, dim=-1).detach().cpu().numpy()
return labels
def segmentation_sax(trained_dataset, seed, image_id, t_step, progress=gr.Progress()):
# Fixed parameters
view = "sax"
split = "train" if image_id <= 100 else "test"
trained_dataset = {
"ACDC": "acdc",
"M&MS": "mnms",
"M&MS2": "mnms2",
}[str(trained_dataset)]
# Download and load model
progress(0, desc="Downloading model...")
image_path = hf_hub_download(
repo_id="mathpluscode/ACDC",
repo_type="dataset",
filename=f"{split}/patient{image_id:03d}/patient{image_id:03d}_sax_t.nii.gz",
cache_dir=cache_dir,
)
model = ConvUNetR.from_finetuned(
repo_id="mathpluscode/CineMA",
model_filename=f"finetuned/segmentation/{trained_dataset}_{view}/{trained_dataset}_{view}_{seed}.safetensors",
config_filename=f"finetuned/segmentation/{trained_dataset}_{view}/config.yaml",
cache_dir=cache_dir,
)
# Inference
progress(0, desc="Downloading data...")
transform = Compose(
[
ScaleIntensityd(keys=view),
SpatialPadd(keys=view, spatial_size=(192, 192, 16), method="end"),
]
)
images = np.transpose(sitk.GetArrayFromImage(sitk.ReadImage(image_path)))
images = images[..., ::t_step]
labels = segmentation_sax_inference(images, view, transform, model, progress)
progress(1, desc="Plotting results...")
fig1 = plot_segmentations_sax(images, labels, t_step)
fig2 = plot_volume_changes_sax(labels, t_step)
return fig1, fig2
def segmentation_sax_tab():
with gr.Blocks() as sax_interface:
gr.Markdown(
"""
This page demonstrates the segmentation of cardiac structures in the Short-Axis (SAX) view.
Please adjust the settings on the right panels and click the button to run the inference.
"""
)
with gr.Row():
with gr.Column(scale=4):
gr.Markdown("""
## Description
### Data
The available data is from ACDC. All images have been resampled to 1 mm Γ— 1 mm Γ— 10 mm and centre-cropped to 192 mm Γ— 192 mm for each SAX slice.
Image 1 - 100 are from the training set, and image 101 - 150 are from the test set.
### Model
The available models are finetuned on different datasets ([ACDC](https://www.creatis.insa-lyon.fr/Challenge/acdc/), [M&Ms](https://www.ub.edu/mnms/), and [M&Ms2](https://www.ub.edu/mnms-2/)). For each dataset, there are 3 models finetuned on different seeds: 0, 1, 2.
### Visualization
The left figure shows the segmentation of ventricles and myocardium every n time steps across all SAX slices.
The right figure plots the ventricle and mycoardium volumes across all inference time frames.
""")
with gr.Column(scale=3):
gr.Markdown("## Data Settings")
image_id = gr.Slider(
minimum=1,
maximum=150,
step=1,
label="Choose an ACDC image, ID is between 1 and 150",
value=150,
)
t_step = gr.Slider(
minimum=1,
maximum=10,
step=1,
label="Choose the gap between time frames",
value=2,
)
with gr.Column(scale=3):
gr.Markdown("## Model Setting")
trained_dataset = gr.Dropdown(
choices=["ACDC", "M&MS", "M&MS2"],
label="Choose which dataset the segmentation model was finetuned on",
value="ACDC",
)
seed = gr.Slider(
minimum=0,
maximum=2,
step=1,
label="Choose which seed the finetuning used",
value=0,
)
run_button = gr.Button("Run SAX segmentation inference", variant="primary")
with gr.Row():
with gr.Column():
gr.Markdown("## Ventricle and Myocardium Segmentation")
segmentation_plot = gr.Plot(show_label=False)
with gr.Column():
gr.Markdown("## Ejection Fraction Prediction")
volume_plot = gr.Plot(show_label=False)
run_button.click(
fn=segmentation_sax,
inputs=[trained_dataset, seed, image_id, t_step],
outputs=[segmentation_plot, volume_plot],
)
return sax_interface
@spaces.GPU
def segmentation_lax_inference(
images: torch.Tensor,
view: str,
transform: Compose,
model: ConvUNetR,
progress=gr.Progress(),
) -> np.ndarray:
model.to(device)
n_frames = images.shape[-1]
labels_list = []
for t in tqdm(range(n_frames), total=n_frames):
progress((t + 1) / n_frames, desc=f"Processing frame {t + 1} / {n_frames}...")
batch = transform({view: torch.from_numpy(images[None, ..., 0, t])})
batch = {
k: v[None, ...].to(device=device, dtype=dtype) for k, v in batch.items()
}
with (
torch.no_grad(),
torch.autocast("cuda", dtype=dtype, enabled=torch.cuda.is_available()),
):
logits = model(batch)[view] # (1, 4, x, y)
labels = torch.argmax(logits, dim=1)[0].detach().cpu().numpy() # (x, y)
# the model seems to hallucinate an additional right ventricle and myocardium sometimes
# find the connected component that is closest to left ventricle
labels = post_process_lax_segmentation(labels)
labels_list.append(labels)
labels = np.stack(labels_list, axis=-1) # (x, y, t)
return labels
def segmentation_lax(seed, image_id, progress=gr.Progress()):
# Fixed parameters
trained_dataset = "mnms2"
view = "lax_4c"
# Download and load model
progress(0, desc="Downloading model...")
image_url = f"https://raw.githubusercontent.com/mathpluscode/CineMA/main/cinema/examples/data/ukb/{image_id}/{image_id}_{view}.nii.gz"
image_path = cache_dir / f"{image_id}_{view}.nii.gz"
response = requests.get(image_url)
with open(image_path, "wb") as f:
f.write(response.content)
model = ConvUNetR.from_finetuned(
repo_id="mathpluscode/CineMA",
model_filename=f"finetuned/segmentation/{trained_dataset}_{view}/{trained_dataset}_{view}_{seed}.safetensors",
config_filename=f"finetuned/segmentation/{trained_dataset}_{view}/config.yaml",
cache_dir=cache_dir,
)
# Inference
progress(0, desc="Downloading data...")
transform = ScaleIntensityd(keys=view)
images = np.transpose(sitk.GetArrayFromImage(sitk.ReadImage(image_path)))
labels = segmentation_lax_inference(images, view, transform, model, progress)
progress(1, desc="Plotting results...")
fig1 = plot_segmentations_lax(images, labels)
fig2 = plot_volume_changes_lax(labels)
return fig1, fig2
def segmentation_lax_tab():
with gr.Blocks() as lax_interface:
gr.Markdown(
"""
This page demonstrates the segmentation of cardiac structures in the Long-Axis (LAX) view.
Please adjust the settings on the right panels and click the button to run the inference.
"""
)
with gr.Row():
with gr.Column(scale=4):
gr.Markdown("""
## Description
### Data
There are four example samples. All images have been resampled to 1 mm Γ— 1 mm and centre-cropped.
### Model
The available models are finetuned on [M&Ms2](https://www.ub.edu/mnms-2/). For each dataset, there are 3 models finetuned on different seeds: 0, 1, 2.
### Visualization
The left figure shows the segmentation of ventricles and myocardium across all time frames.
The right figure plots the ventricle and mycoardium volumes across all inference time frames.
""")
with gr.Column(scale=3):
gr.Markdown("## Data Settings")
image_id = gr.Slider(
minimum=1,
maximum=4,
step=1,
label="Choose an image, ID is between 1 and 4",
value=4,
)
with gr.Column(scale=3):
gr.Markdown("## Model Setting")
seed = gr.Slider(
minimum=0,
maximum=2,
step=1,
label="Choose which seed the finetuning used",
value=0,
)
run_button = gr.Button("Run LAX segmentation inference", variant="primary")
with gr.Row():
with gr.Column():
gr.Markdown("## Ventricle and Myocardium Segmentation")
segmentation_plot = gr.Plot(show_label=False)
with gr.Column():
gr.Markdown("## Ejection Fraction Prediction")
volume_plot = gr.Plot(show_label=False)
run_button.click(
fn=segmentation_lax,
inputs=[seed, image_id],
outputs=[segmentation_plot, volume_plot],
)
return lax_interface
with gr.Blocks(
theme=theme, title="CineMA: A Foundation Model for Cine Cardiac MRI"
) as demo:
gr.Markdown(
"""
# CineMA: A Foundation Model for Cine Cardiac MRI πŸŽ₯πŸ«€
This demo showcases the capabilities of CineMA in multiple tasks.
For more details, checkout our [GitHub](https://github.com/mathpluscode/CineMA).
"""
)
with gr.Tabs() as tabs:
with gr.TabItem("Segmentation in SAX View"):
segmentation_sax_tab()
with gr.TabItem("Segmentation in LAX View"):
segmentation_lax_tab()
demo.launch()