Update app.py
Browse files
app.py
CHANGED
@@ -32,7 +32,7 @@ try:
|
|
32 |
cell_attribute_model.load_state_dict(custom_weights)
|
33 |
cell_attribute_model.eval().to(device)
|
34 |
|
35 |
-
model = DetectMultiBackend('Attridet_weight/
|
36 |
except Exception as e:
|
37 |
print(f"Error loading model: {e}")
|
38 |
|
@@ -53,10 +53,20 @@ abstract = """
|
|
53 |
"""
|
54 |
|
55 |
footer = r"""
|
56 |
-
π¦
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
```bibtex
|
61 |
@inproceedings{rehman2024large,
|
62 |
title={A large-scale multi domain leukemia dataset for the white blood cells detection with morphological attributes for explainability},
|
@@ -68,16 +78,23 @@ We would be grateful if you consider starring our <a href="Website">https://gith
|
|
68 |
}
|
69 |
|
70 |
|
71 |
-
π§ **Contact**
|
72 |
-
If you have any questions, please feel free to contact Abdul Rehman <b>(phdcs23002@itu.edu.pk)</b>.
|
73 |
"""
|
74 |
|
75 |
css = """
|
76 |
h1#title {
|
77 |
-
text-align:
|
78 |
}
|
79 |
"""
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
|
83 |
|
@@ -181,7 +198,7 @@ def capture_image(pil_img):
|
|
181 |
pred_Cytoplasmic_Vacuoles_array = []
|
182 |
|
183 |
for i in range(len(pred[0])):
|
184 |
-
if pred[0][i].numel() > 0: # Check if the tensor is not empty
|
185 |
|
186 |
pred_tensor = pred[0][i][0:4]
|
187 |
|
@@ -272,7 +289,7 @@ def capture_image(pil_img):
|
|
272 |
|
273 |
return new_data
|
274 |
|
275 |
-
names = ["
|
276 |
"Promonocyte", "Monoblast", "Lymphocyte", "Myelocyte", "Abnormal promyelocyte",
|
277 |
"Monocyte", "Metamyelocyte", "Eosinophil", "Basophil"]
|
278 |
|
@@ -337,12 +354,13 @@ def capture_image(pil_img):
|
|
337 |
continue
|
338 |
|
339 |
# Draw the bounding box
|
340 |
-
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), (0,
|
341 |
|
342 |
# Display prediction with confidence
|
343 |
-
label = f"{prediction} ({confidence:.2f})"
|
|
|
344 |
cv2.putText(img, label, (x_min, max(0, y_min - 10)),
|
345 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255
|
346 |
|
347 |
return img # Return the annotated image
|
348 |
# df_predictions.to_csv("predictions.csv", index=False) # Save if needed
|
@@ -389,7 +407,7 @@ def inference_fn_select(image_input):
|
|
389 |
return None, f"Error in inference: {e}"
|
390 |
|
391 |
def set_cloze_samples(example: list) -> dict:
|
392 |
-
return gr.
|
393 |
|
394 |
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
|
395 |
gr.Markdown(header)
|
@@ -400,18 +418,27 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
|
|
400 |
with gr.Column(scale=0.5, min_width=500):
|
401 |
image_input = gr.Image(type="pil", interactive=True, label="Upload an image π", height=250)
|
402 |
with gr.Column(scale=0.5, min_width=500):
|
403 |
-
task_button = gr.Radio(label="Task", interactive=True,
|
404 |
-
choices=['Detect
|
405 |
value='Detect')
|
406 |
with gr.Row():
|
407 |
submit_button = gr.Button(value="π Run", interactive=True, variant="primary")
|
408 |
clear_button = gr.Button(value="π Clear", interactive=True)
|
409 |
|
410 |
with gr.Row():
|
411 |
-
with gr.Column(scale=0.5, min_width=
|
412 |
image_output = gr.Image(type='pil', interactive=False, label="Detection output")
|
413 |
with gr.Column(scale=0.5, min_width=500):
|
414 |
chat_output = gr.Textbox(label="Text output")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
415 |
|
416 |
submit_button.click(
|
417 |
inference_fn_select,
|
@@ -432,8 +459,13 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
|
|
432 |
[image_output, chat_output],
|
433 |
queue=False,
|
434 |
)
|
|
|
|
|
|
|
|
|
|
|
435 |
|
436 |
gr.Markdown(footer)
|
437 |
|
438 |
demo.queue() # Enable request queuing
|
439 |
-
demo.launch(share=False)
|
|
|
32 |
cell_attribute_model.load_state_dict(custom_weights)
|
33 |
cell_attribute_model.eval().to(device)
|
34 |
|
35 |
+
model = DetectMultiBackend('Attridet_weight/last_300e_100x.pt')
|
36 |
except Exception as e:
|
37 |
print(f"Error loading model: {e}")
|
38 |
|
|
|
53 |
"""
|
54 |
|
55 |
footer = r"""
|
56 |
+
## π¦ Developed by
|
57 |
+
***Intelligent Machines Lab***, Information Technology University of Punjab
|
58 |
+
<a href="https://im.itu.edu.pk/" target="_blank">π website</a>
|
59 |
+
|
60 |
+
## π§ͺ Demo Paper
|
61 |
+
Our demo paper is available at: A Large-scale Multi Domain Leukemia Dataset for the White Blood Cells Detection with Morphological Attributes for Explainability
|
62 |
+
<a href="https://arxiv.org/abs/2405.10803" target="_blank">π arXiv:2405.10803</a>
|
63 |
+
|
64 |
+
## π¦ Github Repository
|
65 |
+
We would be grateful if you consider starring our
|
66 |
+
<a href="https://github.com/intelligentMachines-ITU/Blood-Cancer-Dataset-Lukemia-Attri-MICCAI-2024" target="_blank">β Blood Cancer Dataset Repository</a>
|
67 |
+
##π§ **Contact**
|
68 |
+
If you have any questions, please feel free to contact Abdul Rehman <b>(phdcs23002@itu.edu.pk)</b>.
|
69 |
+
## π Citation
|
70 |
```bibtex
|
71 |
@inproceedings{rehman2024large,
|
72 |
title={A large-scale multi domain leukemia dataset for the white blood cells detection with morphological attributes for explainability},
|
|
|
78 |
}
|
79 |
|
80 |
|
|
|
|
|
81 |
"""
|
82 |
|
83 |
css = """
|
84 |
h1#title {
|
85 |
+
text-align: right;
|
86 |
}
|
87 |
"""
|
88 |
+
cloze_samples = [
|
89 |
+
["sample/18_33_1000_ALL.png"],
|
90 |
+
["sample/8_18_1000_ALL.png"],
|
91 |
+
["sample/15_20_1000_AML.png"],
|
92 |
+
["sample/21_32_1000_CLL.png"],
|
93 |
+
["sample/28_24_1000_CML.png"],
|
94 |
+
["sample/31_23_1000_CML.png"],
|
95 |
+
["sample/31_34_1000_CML.png"],
|
96 |
+
["sample/23_40_1000_APML.png"],
|
97 |
+
]
|
98 |
|
99 |
|
100 |
|
|
|
198 |
pred_Cytoplasmic_Vacuoles_array = []
|
199 |
|
200 |
for i in range(len(pred[0])):
|
201 |
+
# if pred[0][i].numel() > 0: # Check if the tensor is not empty
|
202 |
|
203 |
pred_tensor = pred[0][i][0:4]
|
204 |
|
|
|
289 |
|
290 |
return new_data
|
291 |
|
292 |
+
names = ["Unidentified", "Myeloblast", "Lymphoblast", "Neutrophil", "Atypical lymphocyte",
|
293 |
"Promonocyte", "Monoblast", "Lymphocyte", "Myelocyte", "Abnormal promyelocyte",
|
294 |
"Monocyte", "Metamyelocyte", "Eosinophil", "Basophil"]
|
295 |
|
|
|
354 |
continue
|
355 |
|
356 |
# Draw the bounding box
|
357 |
+
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), (0,255, 0), 2)
|
358 |
|
359 |
# Display prediction with confidence
|
360 |
+
# label = f"{prediction} ({confidence:.2f})"
|
361 |
+
label = f"{prediction}"
|
362 |
cv2.putText(img, label, (x_min, max(0, y_min - 10)),
|
363 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0, 255), 2)
|
364 |
|
365 |
return img # Return the annotated image
|
366 |
# df_predictions.to_csv("predictions.csv", index=False) # Save if needed
|
|
|
407 |
return None, f"Error in inference: {e}"
|
408 |
|
409 |
def set_cloze_samples(example: list) -> dict:
|
410 |
+
return gr.update(value=example[0]), 'Cloze Test'
|
411 |
|
412 |
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
|
413 |
gr.Markdown(header)
|
|
|
418 |
with gr.Column(scale=0.5, min_width=500):
|
419 |
image_input = gr.Image(type="pil", interactive=True, label="Upload an image π", height=250)
|
420 |
with gr.Column(scale=0.5, min_width=500):
|
421 |
+
task_button = gr.Radio(label="Contextual Task type", interactive=True,
|
422 |
+
choices=['Detect'],
|
423 |
value='Detect')
|
424 |
with gr.Row():
|
425 |
submit_button = gr.Button(value="π Run", interactive=True, variant="primary")
|
426 |
clear_button = gr.Button(value="π Clear", interactive=True)
|
427 |
|
428 |
with gr.Row():
|
429 |
+
with gr.Column(scale=0.5, min_width=500):
|
430 |
image_output = gr.Image(type='pil', interactive=False, label="Detection output")
|
431 |
with gr.Column(scale=0.5, min_width=500):
|
432 |
chat_output = gr.Textbox(label="Text output")
|
433 |
+
# with gr.Row():
|
434 |
+
# with gr.Column(scale=0.5, min_width=500):
|
435 |
+
with gr.Row():
|
436 |
+
cloze_examples = gr.Dataset(
|
437 |
+
label='WBC Detetion with Morphology Example ',
|
438 |
+
components=[image_input],
|
439 |
+
samples=cloze_samples,
|
440 |
+
)
|
441 |
+
|
442 |
|
443 |
submit_button.click(
|
444 |
inference_fn_select,
|
|
|
459 |
[image_output, chat_output],
|
460 |
queue=False,
|
461 |
)
|
462 |
+
cloze_examples.click(
|
463 |
+
fn=set_cloze_samples,
|
464 |
+
inputs=[cloze_examples],
|
465 |
+
outputs=[image_input, chat_output],
|
466 |
+
)
|
467 |
|
468 |
gr.Markdown(footer)
|
469 |
|
470 |
demo.queue() # Enable request queuing
|
471 |
+
demo.launch(share=False)
|