mathpluscode commited on
Commit
c3b3ff1
Β·
1 Parent(s): 5826e7f

Update layout

Browse files
Files changed (3) hide show
  1. .gitignore +6 -0
  2. app.py +135 -117
  3. requirements.txt +1 -1
.gitignore CHANGED
@@ -2,3 +2,9 @@
2
  .DS_Store
3
  node_modules/
4
  src/cache/
 
 
 
 
 
 
 
2
  .DS_Store
3
  node_modules/
4
  src/cache/
5
+ .locks/
6
+ ukb/
7
+ *.gif
8
+ *.png
9
+ models--mathpluscode--CineMA/
10
+ datasets--mathpluscode--ACDC/
app.py CHANGED
@@ -10,6 +10,7 @@ import torch
10
  from cinema import CineMA, ConvUNetR, ConvViT, heatmap_soft_argmax
11
  from cinema.examples.cine_cmr import plot_cmr_views
12
  from cinema.examples.inference.landmark_heatmap import (
 
13
  plot_landmarks,
14
  plot_lv,
15
  )
@@ -81,8 +82,8 @@ def cmr_tab():
81
  minimum=1,
82
  maximum=4,
83
  step=1,
84
- label="Choose an image, ID is between 1 and 4",
85
- value=1,
86
  )
87
  # Placeholder for slice slider, will update dynamically
88
  slice_idx = gr.Slider(
@@ -90,7 +91,7 @@ def cmr_tab():
90
  maximum=8,
91
  step=1,
92
  label="SAX slice to visualize",
93
- value=0,
94
  )
95
 
96
  def get_num_slices(image_id):
@@ -99,7 +100,7 @@ def cmr_tab():
99
 
100
  def update_slice_slider(image_id):
101
  num_slices = get_num_slices(image_id)
102
- return gr.update(maximum=num_slices - 1, value=0, visible=True)
103
 
104
  def fn(image_id, slice_idx):
105
  lax_2c_image = load_nifti_from_github(
@@ -125,7 +126,7 @@ def cmr_tab():
125
 
126
  # When image changes, update the slice slider and plot
127
  gr.on(
128
- fn=lambda image_id: [update_slice_slider(image_id), fn(image_id, 0)],
129
  inputs=[image_id],
130
  outputs=[slice_idx, cmr_plot],
131
  )
@@ -238,9 +239,11 @@ def mae_tab():
238
  with gr.Blocks() as mae_interface:
239
  gr.Markdown(
240
  """
241
- This page demonstrates the masking and reconstruction process of the masked autoencoder. The model was trained with a mask ratio of 0.75 over 74,000 studies.
242
  """
243
  )
 
 
244
  with gr.Row():
245
  with gr.Column(scale=5):
246
  gr.Markdown("## Reconstruction")
@@ -249,14 +252,14 @@ def mae_tab():
249
  type="filepath",
250
  label="Masked Autoencoder Reconstruction",
251
  )
252
- with gr.Column(scale=3):
253
  gr.Markdown("## Data Settings")
254
  image_id = gr.Slider(
255
  minimum=1,
256
  maximum=4,
257
  step=1,
258
- label="Choose an image, ID is between 1 and 4",
259
- value=1,
260
  )
261
  mask_ratio = gr.Slider(
262
  minimum=0.05,
@@ -265,7 +268,6 @@ def mae_tab():
265
  label="Mask ratio",
266
  value=0.75,
267
  )
268
- run_button = gr.Button("Run masked autoencoder", variant="primary")
269
 
270
  run_button.click(
271
  fn=mae,
@@ -376,9 +378,10 @@ def segmentation_sax_tab():
376
  with gr.Blocks() as sax_interface:
377
  gr.Markdown(
378
  """
379
- This page demonstrates the segmentation of cardiac structures in the short-axis (SAX) view.
380
  """
381
  )
 
382
 
383
  with gr.Row():
384
  with gr.Column(scale=4):
@@ -391,43 +394,47 @@ def segmentation_sax_tab():
391
  ### Model
392
 
393
  The available models are finetuned on different datasets ([ACDC](https://www.creatis.insa-lyon.fr/Challenge/acdc/), [M&Ms](https://www.ub.edu/mnms/), and [M&Ms2](https://www.ub.edu/mnms-2/)). For each dataset, there are three models finetuned with seeds: 0, 1, 2.
394
-
395
- ### Visualisation
396
-
397
- The left figure shows the segmentation of ventricles and myocardium at every n time step across all SAX slices.
398
- The right figure shows the volumes across all time frames and estimates the ejection fraction (EF) for the left ventricle (LV) and right ventricle (RV).
399
  """)
400
- with gr.Column(scale=3):
401
- gr.Markdown("## Data Settings")
402
- image_id = gr.Slider(
403
- minimum=101,
404
- maximum=150,
405
- step=1,
406
- label="Choose an image, ID is between 101 and 150",
407
- value=101,
408
- )
409
- t_step = gr.Slider(
410
- minimum=1,
411
- maximum=10,
412
- step=1,
413
- label="Choose the gap between time frames",
414
- value=3,
415
- )
416
- with gr.Column(scale=3):
417
- gr.Markdown("## Model Settings")
418
- trained_dataset = gr.Dropdown(
419
- choices=["ACDC", "M&MS", "M&MS2"],
420
- label="Choose which dataset the model was finetuned on",
421
- value="ACDC",
422
- )
423
- seed = gr.Slider(
424
- minimum=0,
425
- maximum=2,
426
- step=1,
427
- label="Choose which seed the finetuning used",
428
- value=0,
429
- )
430
- run_button = gr.Button("Run SAX segmentation inference", variant="primary")
 
 
 
 
 
 
 
 
 
431
 
432
  with gr.Row():
433
  with gr.Column():
@@ -535,9 +542,10 @@ def segmentation_lax_tab():
535
  with gr.Blocks() as lax_interface:
536
  gr.Markdown(
537
  """
538
- This page demonstrates the segmentation of cardiac structures in the long-axis (LAX) four-chamber (4C) view.
539
  """
540
  )
 
541
 
542
  with gr.Row():
543
  with gr.Column(scale=4):
@@ -550,31 +558,35 @@ def segmentation_lax_tab():
550
  ### Model
551
 
552
  The available models are finetuned on [M&Ms2](https://www.ub.edu/mnms-2/). There are three models finetuned with seeds: 0, 1, 2.
553
-
554
- ### Visualisation
555
-
556
- The left figure shows the segmentation of ventricles and myocardium across all time frames.
557
- The right figure shows the volumes across all time frames and estimates the ejection fraction (EF).
558
  """)
559
- with gr.Column(scale=3):
560
- gr.Markdown("## Data Settings")
561
- image_id = gr.Slider(
562
- minimum=1,
563
- maximum=4,
564
- step=1,
565
- label="Choose an image, ID is between 1 and 4",
566
- value=1,
567
- )
568
- with gr.Column(scale=3):
569
- gr.Markdown("## Model Settings")
570
- seed = gr.Slider(
571
- minimum=0,
572
- maximum=2,
573
- step=1,
574
- label="Choose which seed the finetuning used",
575
- value=0,
576
- )
577
- run_button = gr.Button("Run LAX 4C segmentation inference", variant="primary")
 
 
 
 
 
 
 
 
 
578
 
579
  with gr.Row():
580
  with gr.Column():
@@ -707,17 +719,18 @@ def landmark(image_id, view, method, seed, progress=gr.Progress()):
707
  )
708
 
709
  if method == "heatmap":
710
- _, coords = landmark_heatmap_inference(images, view, transform, model, progress)
 
 
 
 
711
  elif method == "coordinate":
712
  coords = landmark_coordinate_inference(images, view, transform, model, progress)
 
 
713
  else:
714
  raise ValueError(f"Invalid method: {method}")
715
 
716
- progress(1, desc="Inference finished. Plotting ...")
717
-
718
- # Plot landmarks in GIF
719
- plot_landmarks(images, coords, landmark_path)
720
-
721
  # Plot LV change in PNG
722
  plot_lv(coords, lv_path)
723
 
@@ -728,9 +741,12 @@ def landmark_tab():
728
  with gr.Blocks() as landmark_interface:
729
  gr.Markdown(
730
  """
731
- This page demonstrates landmark localisation in the long-axis (LAX) two-chamber (2C) and four-chamber (4C) views.
732
  """
733
  )
 
 
 
734
 
735
  with gr.Row():
736
  with gr.Column(scale=4):
@@ -749,43 +765,45 @@ def landmark_tab():
749
  - **Coordinate**: predicts landmark coordinates directly
750
 
751
  For each type, there are three models finetuned with seeds: 0, 1, 2.
752
-
753
- ### Visualisation
754
-
755
- The left figure shows the landmark positions across all time frames.
756
- The right figure shows the length of the left ventricle across all time frames and the estimates of two metrics:
757
- - Mitral annular plane systolic excursion (MAPSE)
758
- - Global longitudinal shortening (GLS)
759
  """)
760
- with gr.Column(scale=3):
761
- gr.Markdown("## Data Settings")
762
- image_id = gr.Slider(
763
- minimum=1,
764
- maximum=4,
765
- step=1,
766
- label="Choose an image, ID is between 1 and 4",
767
- value=1,
768
- )
769
- view = gr.Dropdown(
770
- choices=["LAX 2C", "LAX 4C"],
771
- label="Choose which view to localise the landmarks",
772
- value="LAX 2C",
773
- )
774
- with gr.Column(scale=3):
775
- gr.Markdown("## Model Settings")
776
- method = gr.Dropdown(
777
- choices=["Heatmap", "Coordinate"],
778
- label="Choose which method to use",
779
- value="Heatmap",
780
- )
781
- seed = gr.Slider(
782
- minimum=0,
783
- maximum=2,
784
- step=1,
785
- label="Choose which seed the finetuning used",
786
- value=0,
787
- )
788
- run_button = gr.Button("Run landmark localisation inference", variant="primary")
 
 
 
 
 
 
 
 
 
789
 
790
  with gr.Row():
791
  with gr.Column():
@@ -816,9 +834,9 @@ with gr.Blocks(
816
  """
817
  # CineMA: A Foundation Model for Cine Cardiac MRI πŸŽ₯πŸ«€
818
 
819
- πŸš€ The following demos showcase the capabilities of CineMA in multiple tasks.<br>
820
  ⏱️ The examples may take 10-60 seconds, if not cached, to download data and model, perform inference, and render plots.<br>
821
- πŸ”— For more details, check out our [GitHub](https://github.com/mathpluscode/CineMA).
822
  """
823
  )
824
 
 
10
  from cinema import CineMA, ConvUNetR, ConvViT, heatmap_soft_argmax
11
  from cinema.examples.cine_cmr import plot_cmr_views
12
  from cinema.examples.inference.landmark_heatmap import (
13
+ plot_heatmap_and_landmarks,
14
  plot_landmarks,
15
  plot_lv,
16
  )
 
82
  minimum=1,
83
  maximum=4,
84
  step=1,
85
+ label="Choose an image",
86
+ value=2,
87
  )
88
  # Placeholder for slice slider, will update dynamically
89
  slice_idx = gr.Slider(
 
91
  maximum=8,
92
  step=1,
93
  label="SAX slice to visualize",
94
+ value=1,
95
  )
96
 
97
  def get_num_slices(image_id):
 
100
 
101
  def update_slice_slider(image_id):
102
  num_slices = get_num_slices(image_id)
103
+ return gr.update(maximum=num_slices - 1, value=1, visible=True)
104
 
105
  def fn(image_id, slice_idx):
106
  lax_2c_image = load_nifti_from_github(
 
126
 
127
  # When image changes, update the slice slider and plot
128
  gr.on(
129
+ fn=lambda image_id: [update_slice_slider(image_id), fn(image_id, 1)],
130
  inputs=[image_id],
131
  outputs=[slice_idx, cmr_plot],
132
  )
 
239
  with gr.Blocks() as mae_interface:
240
  gr.Markdown(
241
  """
242
+ This page demonstrates the masking and reconstruction process. The model was trained with a mask ratio of 0.75. Click the button below to launch the inference. ⬇️
243
  """
244
  )
245
+ run_button = gr.Button("Launch reconstruction", variant="primary")
246
+
247
  with gr.Row():
248
  with gr.Column(scale=5):
249
  gr.Markdown("## Reconstruction")
 
252
  type="filepath",
253
  label="Masked Autoencoder Reconstruction",
254
  )
255
+ with gr.Column(scale=5):
256
  gr.Markdown("## Data Settings")
257
  image_id = gr.Slider(
258
  minimum=1,
259
  maximum=4,
260
  step=1,
261
+ label="Choose an image",
262
+ value=2,
263
  )
264
  mask_ratio = gr.Slider(
265
  minimum=0.05,
 
268
  label="Mask ratio",
269
  value=0.75,
270
  )
 
271
 
272
  run_button.click(
273
  fn=mae,
 
378
  with gr.Blocks() as sax_interface:
379
  gr.Markdown(
380
  """
381
+ This page demonstrates the segmentation of cardiac structures in the short-axis (SAX) view. Click the button below to launch the inference. ⬇️
382
  """
383
  )
384
+ run_button = gr.Button("Launch segmentation inference", variant="primary")
385
 
386
  with gr.Row():
387
  with gr.Column(scale=4):
 
394
  ### Model
395
 
396
  The available models are finetuned on different datasets ([ACDC](https://www.creatis.insa-lyon.fr/Challenge/acdc/), [M&Ms](https://www.ub.edu/mnms/), and [M&Ms2](https://www.ub.edu/mnms-2/)). For each dataset, there are three models finetuned with seeds: 0, 1, 2.
 
 
 
 
 
397
  """)
398
+ with gr.Column(scale=6):
399
+ with gr.Row():
400
+ with gr.Column(scale=1):
401
+ gr.Markdown("## Data Settings")
402
+ image_id = gr.Slider(
403
+ minimum=101,
404
+ maximum=150,
405
+ step=1,
406
+ label="Choose an image",
407
+ value=150,
408
+ )
409
+ t_step = gr.Slider(
410
+ minimum=1,
411
+ maximum=10,
412
+ step=1,
413
+ label="Choose the gap between time frames",
414
+ value=3,
415
+ )
416
+ with gr.Column(scale=1):
417
+ gr.Markdown("## Model Settings")
418
+ trained_dataset = gr.Dropdown(
419
+ choices=["ACDC", "M&MS", "M&MS2"],
420
+ label="Choose which dataset the model was finetuned on",
421
+ value="ACDC",
422
+ )
423
+ seed = gr.Slider(
424
+ minimum=0,
425
+ maximum=2,
426
+ step=1,
427
+ label="Choose which seed the finetuning used",
428
+ value=1,
429
+ )
430
+
431
+ # Visualisation description block
432
+ gr.Markdown("""
433
+ ## Visualisation
434
+
435
+ The left figure shows the segmentation at every n time step across all SAX slices.
436
+ The right figure shows the volumes across time frames and estimates the ejection fraction (EF) for the left ventricle (LV) and right ventricle (RV).
437
+ """)
438
 
439
  with gr.Row():
440
  with gr.Column():
 
542
  with gr.Blocks() as lax_interface:
543
  gr.Markdown(
544
  """
545
+ This page demonstrates the segmentation of cardiac structures in the long-axis (LAX) four-chamber (4C) view. Click the button below to launch the inference. ⬇️
546
  """
547
  )
548
+ run_button = gr.Button("Launch segmentation inference", variant="primary")
549
 
550
  with gr.Row():
551
  with gr.Column(scale=4):
 
558
  ### Model
559
 
560
  The available models are finetuned on [M&Ms2](https://www.ub.edu/mnms-2/). There are three models finetuned with seeds: 0, 1, 2.
 
 
 
 
 
561
  """)
562
+ with gr.Column(scale=6):
563
+ with gr.Row():
564
+ with gr.Column(scale=1):
565
+ gr.Markdown("## Data Settings")
566
+ image_id = gr.Slider(
567
+ minimum=1,
568
+ maximum=4,
569
+ step=1,
570
+ label="Choose an image",
571
+ value=2,
572
+ )
573
+ with gr.Column(scale=1):
574
+ gr.Markdown("## Model Settings")
575
+ seed = gr.Slider(
576
+ minimum=0,
577
+ maximum=2,
578
+ step=1,
579
+ label="Choose which seed the finetuning used",
580
+ value=1,
581
+ )
582
+
583
+ # Visualisation description block
584
+ gr.Markdown("""
585
+ ## Visualisation
586
+
587
+ The left figure shows the segmentation across time frames.
588
+ The right figure shows the volumes across time frames and estimates the ejection fraction (EF).
589
+ """)
590
 
591
  with gr.Row():
592
  with gr.Column():
 
719
  )
720
 
721
  if method == "heatmap":
722
+ probs, coords = landmark_heatmap_inference(
723
+ images, view, transform, model, progress
724
+ )
725
+ progress(1, desc="Inference finished. Plotting ...")
726
+ plot_heatmap_and_landmarks(images, probs, coords, landmark_path)
727
  elif method == "coordinate":
728
  coords = landmark_coordinate_inference(images, view, transform, model, progress)
729
+ progress(1, desc="Inference finished. Plotting ...")
730
+ plot_landmarks(images, coords, landmark_path)
731
  else:
732
  raise ValueError(f"Invalid method: {method}")
733
 
 
 
 
 
 
734
  # Plot LV change in PNG
735
  plot_lv(coords, lv_path)
736
 
 
741
  with gr.Blocks() as landmark_interface:
742
  gr.Markdown(
743
  """
744
+ This page demonstrates landmark localisation in the long-axis (LAX) two-chamber (2C) and four-chamber (4C) views. Click the button below to launch the inference. ⬇️
745
  """
746
  )
747
+ run_button = gr.Button(
748
+ "Launch landmark localisation inference", variant="primary"
749
+ )
750
 
751
  with gr.Row():
752
  with gr.Column(scale=4):
 
765
  - **Coordinate**: predicts landmark coordinates directly
766
 
767
  For each type, there are three models finetuned with seeds: 0, 1, 2.
 
 
 
 
 
 
 
768
  """)
769
+ with gr.Column(scale=6):
770
+ with gr.Row():
771
+ with gr.Column(scale=1):
772
+ gr.Markdown("## Data Settings")
773
+ image_id = gr.Slider(
774
+ minimum=1,
775
+ maximum=4,
776
+ step=1,
777
+ label="Choose an image",
778
+ value=2,
779
+ )
780
+ view = gr.Dropdown(
781
+ choices=["LAX 2C", "LAX 4C"],
782
+ label="Choose which view to localise the landmarks",
783
+ value="LAX 2C",
784
+ )
785
+ with gr.Column(scale=1):
786
+ gr.Markdown("## Model Settings")
787
+ method = gr.Dropdown(
788
+ choices=["Heatmap", "Coordinate"],
789
+ label="Choose which method to use",
790
+ value="Heatmap",
791
+ )
792
+ seed = gr.Slider(
793
+ minimum=0,
794
+ maximum=2,
795
+ step=1,
796
+ label="Choose which seed the finetuning used",
797
+ value=1,
798
+ )
799
+
800
+ # Visualisation description block
801
+ gr.Markdown("""
802
+ ## Visualisation
803
+
804
+ The left figure shows the landmark positions across time frames.
805
+ The right figure shows the length of the left ventricle across time frames and estimates mitral annular plane systolic excursion (MAPSE) and global longitudinal shortening (GLS).
806
+ """)
807
 
808
  with gr.Row():
809
  with gr.Column():
 
834
  """
835
  # CineMA: A Foundation Model for Cine Cardiac MRI πŸŽ₯πŸ«€
836
 
837
+ πŸš€ The following demonstrations showcase the capabilities of CineMA in multiple tasks. Click the button to launch the inference.<br>
838
  ⏱️ The examples may take 10-60 seconds, if not cached, to download data and model, perform inference, and render plots.<br>
839
+ πŸ”— For more details, check out our [GitHub repository](https://github.com/mathpluscode/CineMA).
840
  """
841
  )
842
 
requirements.txt CHANGED
@@ -17,6 +17,6 @@ scikit-learn==1.6.1
17
  scipy==1.15.2
18
  spaces==0.36.0
19
  timm==1.0.15
20
- git+https://github.com/mathpluscode/CineMA@0d1afe864d4b4c348a993b8b2b790adf8581bc03#egg=cinema
21
  --extra-index-url https://download.pytorch.org/whl/cu113
22
  torch==2.5.1
 
17
  scipy==1.15.2
18
  spaces==0.36.0
19
  timm==1.0.15
20
+ git+https://github.com/mathpluscode/CineMA@dd9c19cfe5f09c26dbf29373f92ced2f9a0648b7#egg=cinema
21
  --extra-index-url https://download.pytorch.org/whl/cu113
22
  torch==2.5.1