Spaces:
Sleeping
Sleeping
Commit
·
3b7d632
1
Parent(s):
171905d
update requirements
Browse files- app.py +0 -1
- requirements.txt +0 -8
- utils/misc.py +0 -50
app.py
CHANGED
@@ -11,7 +11,6 @@ from pytorch_grad_cam.utils.image import show_cam_on_image
|
|
11 |
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
|
12 |
|
13 |
from models.custom_resnet import Model
|
14 |
-
|
15 |
from utils import get_device
|
16 |
|
17 |
DEVICE = get_device()
|
|
|
11 |
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
|
12 |
|
13 |
from models.custom_resnet import Model
|
|
|
14 |
from utils import get_device
|
15 |
|
16 |
DEVICE = get_device()
|
requirements.txt
CHANGED
@@ -1,15 +1,7 @@
|
|
1 |
torch
|
2 |
torchvision
|
3 |
-
torchinfo
|
4 |
-
tqdm
|
5 |
-
matplotlib
|
6 |
-
albumentations
|
7 |
numpy
|
8 |
-
opencv-python
|
9 |
-
torch-lr-finder
|
10 |
grad-cam
|
11 |
-
pytorch-lightning
|
12 |
-
torchmetrics
|
13 |
pandas
|
14 |
gradio
|
15 |
Pillow
|
|
|
1 |
torch
|
2 |
torchvision
|
|
|
|
|
|
|
|
|
3 |
numpy
|
|
|
|
|
4 |
grad-cam
|
|
|
|
|
5 |
pandas
|
6 |
gradio
|
7 |
Pillow
|
utils/misc.py
CHANGED
@@ -1,9 +1,4 @@
|
|
1 |
import torch
|
2 |
-
import torchinfo
|
3 |
-
from matplotlib import pyplot as plt
|
4 |
-
from pytorch_grad_cam import GradCAM
|
5 |
-
from pytorch_grad_cam.utils.image import show_cam_on_image
|
6 |
-
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
|
7 |
|
8 |
SEED = 42
|
9 |
DEVICE = None
|
@@ -22,48 +17,3 @@ def get_device():
|
|
22 |
DEVICE = "cpu"
|
23 |
print("Device Selected:", DEVICE)
|
24 |
return DEVICE
|
25 |
-
|
26 |
-
|
27 |
-
def set_seed(seed=SEED):
|
28 |
-
torch.manual_seed(seed)
|
29 |
-
if get_device() == 'cuda':
|
30 |
-
torch.cuda.manual_seed(seed)
|
31 |
-
|
32 |
-
|
33 |
-
def plot_examples(images, labels, figsize=None, n=20):
|
34 |
-
_ = plt.figure(figsize=figsize)
|
35 |
-
|
36 |
-
for i in range(n):
|
37 |
-
plt.subplot(4, n//4, i + 1)
|
38 |
-
plt.tight_layout()
|
39 |
-
image = images[i]
|
40 |
-
plt.imshow(image, cmap='gray')
|
41 |
-
label = labels[i]
|
42 |
-
plt.title(str(label))
|
43 |
-
plt.xticks([])
|
44 |
-
plt.yticks([])
|
45 |
-
|
46 |
-
|
47 |
-
def get_incorrect_preds(prediction, labels):
|
48 |
-
prediction = prediction.argmax(dim=1)
|
49 |
-
indices = prediction.ne(labels).nonzero().reshape(-1).tolist()
|
50 |
-
return indices, prediction[indices].tolist(), labels[indices].tolist()
|
51 |
-
|
52 |
-
|
53 |
-
def get_cam_visualisation(model, dataset, input_tensor, label, target_layer, use_cuda=False):
|
54 |
-
grad_cam = GradCAM(model=model, target_layers=[target_layer], use_cuda=use_cuda)
|
55 |
-
|
56 |
-
targets = [ClassifierOutputTarget(label)]
|
57 |
-
|
58 |
-
grayscale_cam = grad_cam(input_tensor=input_tensor.unsqueeze(0), targets=targets)
|
59 |
-
# In this example grayscale_cam has only one image in the batch:
|
60 |
-
grayscale_cam = grayscale_cam[0, :]
|
61 |
-
|
62 |
-
output = show_cam_on_image(dataset.show_transform(input_tensor).cpu().numpy(), grayscale_cam,
|
63 |
-
use_rgb=True)
|
64 |
-
return output
|
65 |
-
|
66 |
-
|
67 |
-
def model_summary(model, input_size=None):
|
68 |
-
return torchinfo.summary(model, input_size=input_size, depth=5,
|
69 |
-
col_names=["input_size", "output_size", "num_params", "params_percent"])
|
|
|
1 |
import torch
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
SEED = 42
|
4 |
DEVICE = None
|
|
|
17 |
DEVICE = "cpu"
|
18 |
print("Device Selected:", DEVICE)
|
19 |
return DEVICE
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|