File size: 6,180 Bytes
65411d8
 
 
 
 
 
 
 
7373d19
 
 
 
65411d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ebd9a25
65411d8
 
 
 
 
 
 
 
ebd9a25
 
65411d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7373d19
65411d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7373d19
65411d8
 
 
 
 
 
ebd9a25
65411d8
 
 
 
 
 
 
 
7373d19
65411d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import numpy as np
import gradio as gr
from huggingface_hub import hf_hub_download
import SimpleITK as sitk  # noqa: N813
import torch
from monai.transforms import Compose, ScaleIntensityd, SpatialPadd
from cinema import ConvUNetR
from pathlib import Path
from cinema.examples.inference.segmentation_sax import (
    plot_segmentations,
    plot_volume_changes,
)
import spaces

# cache directories
cache_dir = Path("/tmp/.cinema")
cache_dir.mkdir(parents=True, exist_ok=True)


@spaces.GPU
def inferece(
    images: torch.Tensor,
    view: str,
    transform: Compose,
    model: ConvUNetR,
    progress=gr.Progress(),
) -> np.ndarray:
    # set device and dtype
    dtype, device = torch.float32, torch.device("cpu")
    if torch.cuda.is_available():
        device = torch.device("cuda")
        if torch.cuda.is_bf16_supported():
            dtype = torch.bfloat16

    # inference
    model.to(device)
    n_slices, n_frames = images.shape[-2:]
    labels_list = []
    for t in range(0, n_frames):
        progress((t + 1) / n_frames, desc=f"Processing frame {t + 1} / {n_frames}...")
        batch = transform({view: torch.from_numpy(images[None, ..., t])})
        batch = {
            k: v[None, ...].to(device=device, dtype=torch.float32)
            for k, v in batch.items()
        }
        with (
            torch.no_grad(),
            torch.autocast("cuda", dtype=dtype, enabled=torch.cuda.is_available()),
        ):
            logits = model(batch)[view]
        labels_list.append(torch.argmax(logits, dim=1)[0, ..., :n_slices])
    labels = torch.stack(labels_list, dim=-1).detach().cpu().numpy()
    return labels


def run_inference(trained_dataset, seed, image_id, t_step, progress=gr.Progress()):
    # Fixed parameters
    view = "sax"
    split = "train" if image_id <= 100 else "test"
    trained_dataset = {
        "ACDC": "acdc",
        "M&MS": "mnms",
        "M&MS2": "mnms2",
    }[str(trained_dataset)]

    # Download and load model
    progress(0, desc="Downloading model and data...")
    image_path = hf_hub_download(
        repo_id="mathpluscode/ACDC",
        repo_type="dataset",
        filename=f"{split}/patient{image_id:03d}/patient{image_id:03d}_sax_t.nii.gz",
        cache_dir=cache_dir,
    )

    model = ConvUNetR.from_finetuned(
        repo_id="mathpluscode/CineMA",
        model_filename=f"finetuned/segmentation/{trained_dataset}_{view}/{trained_dataset}_{view}_{seed}.safetensors",
        config_filename=f"finetuned/segmentation/{trained_dataset}_{view}/config.yaml",
        cache_dir=cache_dir,
    )

    # Load and process data
    transform = Compose(
        [
            ScaleIntensityd(keys=view),
            SpatialPadd(keys=view, spatial_size=(192, 192, 16), method="end"),
        ]
    )

    images = np.transpose(sitk.GetArrayFromImage(sitk.ReadImage(image_path)))
    images = images[..., ::t_step]
    labels = inferece(images, view, transform, model, progress)

    progress(1, desc="Plotting results...")
    fig1 = plot_segmentations(images, labels, t_step)
    fig2 = plot_volume_changes(labels, t_step)

    return fig1, fig2


# Create the Gradio interface
theme = gr.themes.Ocean(
    primary_hue="red",
    secondary_hue="purple",
)
with gr.Blocks(
    theme=theme, title="CineMA: A Foundation Model for Cine Cardiac MRI"
) as demo:
    gr.Markdown(
        """
    # CineMA: A Foundation Model for Cine Cardiac MRI πŸŽ₯πŸ«€

    Below is an example of ejection fraction prediction inference. For more examples, checkout our [GitHub](https://github.com/mathpluscode/CineMA).
    """
    )

    with gr.Row():
        with gr.Column(scale=4):
            gr.Markdown("## Description")
            gr.Markdown("""
            Please adjust the settings on the right panels and click the button to run the inference.

            ### Data

            The available data is from ACDC. All images have been resampled to 1 mm Γ— 1 mm Γ— 10 mm and centre-cropped to 192 mm Γ— 192 mm for each SAX slice.
            Image 1 - 100 are from the training set, and image 101 - 150 are from the test set.

            ### Model

            The available models are finetuned on different datasets ([ACDC](https://www.creatis.insa-lyon.fr/Challenge/acdc/), [M&Ms](https://www.ub.edu/mnms/), and [M&Ms2](https://www.ub.edu/mnms-2/)). For each dataset, there are 3 models finetuned on different seeds: 0, 1, 2. The default model is the one finetuned on ACDC dataset with seed 0.

            ### Visualization

            The left panel shows the segmentation of ventricles and myocardium every n time steps across all SAX slices.
            The right panel plots the ventricle and mycoardium volumes across all inference time frames.
            """)
        with gr.Column(scale=3):
            gr.Markdown("## Data Settings")
            image_id = gr.Slider(
                minimum=1,
                maximum=150,
                step=1,
                label="Choose an ACDC image, ID is between 1 and 150",
                value=150,
            )
            t_step = gr.Slider(
                minimum=1,
                maximum=10,
                step=1,
                label="Choose the gap between time frames",
                value=2,
            )
        with gr.Column(scale=3):
            gr.Markdown("## Model Setting")
            trained_dataset = gr.Dropdown(
                choices=["ACDC", "M&MS", "M&MS2"],
                label="Choose which dataset the segmentation model was finetuned on",
                value="ACDC",
            )
            seed = gr.Slider(
                minimum=0,
                maximum=2,
                step=1,
                label="Choose which seed the finetuning used",
                value=0,
            )
    run_button = gr.Button("Run segmentation inference", variant="primary")

    with gr.Row():
        segmentation_plot = gr.Plot(label="Ventricle and Myocardium Segmentation")
        volume_plot = gr.Plot(label="Ejection Fraction Prediction")

    run_button.click(
        fn=run_inference,
        inputs=[trained_dataset, seed, image_id, t_step],
        outputs=[segmentation_plot, volume_plot],
    )

demo.launch()