cifar10 / app.py
swapniel99's picture
update default transparency
078d10a
import torch
import pandas as pd
import numpy as np
import gradio as gr
from PIL import Image
from torch.nn import functional as F
from collections import OrderedDict
from torchvision import transforms
from pytorch_grad_cam import GradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from custom_resnet import Model
def get_device():
if torch.cuda.is_available():
device = "cuda"
elif torch.backends.mps.is_available():
device = "mps"
else:
device = "cpu"
print("Device Selected:", device)
return device
DEVICE = get_device()
classes = {0: 'airplane',
1: 'automobile',
2: 'bird',
3: 'cat',
4: 'deer',
5: 'dog',
6: 'frog',
7: 'horse',
8: 'ship',
9: 'truck'}
missed_df = pd.read_csv('S12_incorrect.csv')
missed_df['ground_truths'] = missed_df['ground_truths'].map(classes)
missed_df['predicted_vals'] = missed_df['predicted_vals'].map(classes)
missed_df = missed_df.sample(frac=1)
model = Model()
model.load_state_dict(torch.load('S12_model.pth', map_location=DEVICE), strict=False)
model.eval()
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.25, 0.25, 0.25])
])
inv_transform = transforms.Normalize(mean=[-2, -2, -2], std=[4, 4, 4])
grad_cams = [GradCAM(model=model, target_layers=[model.network[i]], use_cuda=(DEVICE == 'cuda')) for i in range(4)]
def get_gradcam_image(input_tensor, label, target_layer):
grad_cam = grad_cams[target_layer]
targets = [ClassifierOutputTarget(label)]
grayscale_cam = grad_cam(input_tensor=input_tensor, targets=targets)
grayscale_cam = grayscale_cam[0, :]
return grayscale_cam
def image_classifier(input_image, top_classes=3, show_cam=True, target_layers=[2, 3], transparency=0.7):
input_ = transform(input_image).unsqueeze(0)
output = model(input_)
output = F.softmax(output.flatten(), dim=-1)
confidences = [(classes[i], float(output[i])) for i in range(10)]
confidences.sort(key=lambda x: x[1], reverse=True)
confidences = OrderedDict(confidences[:top_classes])
label = torch.argmax(output).item()
outputs = list()
if show_cam:
for layer in target_layers:
grayscale_cam = get_gradcam_image(input_, label, layer)
output_image = show_cam_on_image(input_image / 255, grayscale_cam, use_rgb=True, image_weight=transparency)
outputs.append((output_image, f"Layer {layer - 4}"))
else:
outputs.append((input_image, "Input"))
return outputs, confidences
demo1 = gr.Interface(
fn=image_classifier,
inputs=[
gr.Image(shape=(32, 32), label="Input Image", value='examples/cat.jpg'),
gr.Slider(1, 10, value=3, step=1, label="Top Classes",
info="How many top classes do you want to view?"),
gr.Checkbox(label="Enable GradCAM", value=True, info="Do you want to see Class Activation Maps?"),
gr.CheckboxGroup(["-4", "-3", "-2", "-1"], value=["-2", "-1"], label="Network Layers", type='index',
info="Which layer CAMs do you want to visualize?",),
gr.Slider(0, 1, value=0.7, label="Transparency", step=0.1,
info="Set Transparency of CAMs")
],
outputs=[gr.Gallery(label="Output Images", columns=2, rows=2), gr.Label(label='Top Classes')],
examples=[[f'examples/{k}.jpg'] for k in classes.values()]
)
def show_incorrect(num_examples=20, show_cam=True, target_layer=-2, transparency=0.7):
result = list()
for index, row in missed_df.iterrows():
image = np.asarray(Image.open(f'missed_examples/{index}.jpg'))
output_images, confidences = image_classifier(image, top_classes=1, show_cam=show_cam,
target_layers=[4 + target_layer], transparency=transparency)
truth = row['ground_truths']
predicted = list(confidences)[0]
if truth != predicted:
result.append((output_images[0][0], f"{row['ground_truths']} / {predicted}"))
if len(result) >= num_examples:
break
return result
demo2 = gr.Interface(
fn=show_incorrect,
inputs=[
gr.Number(value=20, minimum=1, maximum=100, label="No. of Examples", precision=0,
info="How many misclassified examples do you want to view? (1 - 100)"),
gr.Checkbox(label="Enable GradCAM", value=True, info="Do you want to see Class Activation Maps?"),
gr.Slider(-4, -1, value=-2, step=1, label="Network Layer", info="Which layer CAM do you want to visualize?"),
gr.Slider(0, 1, value=0.7, label="Transparency", step=0.1, info="Set Transparency of CAMs"),
],
outputs=[gr.Gallery(label="Missclassified Images (Truth / Predicted)", columns=5)]
)
demo = gr.TabbedInterface([demo1, demo2], ["Examples", "Misclassified Examples"])
demo.launch()