Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ import torch.nn.utils.prune as prune
|
|
7 |
|
8 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
9 |
|
10 |
-
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-swinv2-tiny-
|
11 |
model.eval()
|
12 |
|
13 |
# Apply global unstructured pruning
|
@@ -27,20 +27,21 @@ model = torch.quantization.quantize_dynamic(
|
|
27 |
model, {torch.nn.Linear, torch.nn.Conv2d}, dtype=torch.qint8
|
28 |
)
|
29 |
|
30 |
-
model = model.to(device)
|
31 |
|
32 |
-
processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-
|
33 |
|
34 |
color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
|
|
|
35 |
|
36 |
-
input_tensor = torch.zeros((1, 3, 128, 128), dtype=torch.
|
37 |
|
38 |
def preprocess_image(image):
|
39 |
-
image = torch.from_numpy(image).to(device)
|
40 |
image = torch.nn.functional.interpolate(image.permute(2, 0, 1).unsqueeze(0), size=(128, 128), mode='bilinear', align_corners=False)
|
41 |
return (image.squeeze(0) / 255.0)
|
42 |
|
43 |
-
static_input = torch.zeros((1, 3, 128, 128), device=device, dtype=torch.float16)
|
44 |
g = torch.cuda.CUDAGraph()
|
45 |
with torch.cuda.graph(g):
|
46 |
static_output = model(static_input)
|
@@ -52,13 +53,12 @@ def process_frame(image):
|
|
52 |
preprocessed = preprocess_image(image)
|
53 |
static_input.copy_(preprocessed)
|
54 |
g.replay()
|
55 |
-
depth_map = static_output.predicted_depth.squeeze()
|
56 |
depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())
|
57 |
-
depth_map = (depth_map * 255).
|
58 |
-
depth_map_colored =
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
interface = gr.Interface(
|
63 |
fn=process_frame,
|
64 |
inputs=gr.Image(sources="webcam", streaming=True),
|
|
|
7 |
|
8 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
9 |
|
10 |
+
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-swinv2-tiny-128", torch_dtype=torch.float16)
|
11 |
model.eval()
|
12 |
|
13 |
# Apply global unstructured pruning
|
|
|
27 |
model, {torch.nn.Linear, torch.nn.Conv2d}, dtype=torch.qint8
|
28 |
)
|
29 |
|
30 |
+
model = model.half().to(device)
|
31 |
|
32 |
+
processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-128")
|
33 |
|
34 |
color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
|
35 |
+
color_map = torch.from_numpy(color_map).to(device)
|
36 |
|
37 |
+
input_tensor = torch.zeros((1, 3, 128, 128), dtype=torch.float16, device=device)
|
38 |
|
39 |
def preprocess_image(image):
|
40 |
+
image = torch.from_numpy(image).to(device, dtype=torch.float16)
|
41 |
image = torch.nn.functional.interpolate(image.permute(2, 0, 1).unsqueeze(0), size=(128, 128), mode='bilinear', align_corners=False)
|
42 |
return (image.squeeze(0) / 255.0)
|
43 |
|
44 |
+
static_input = torch.zeros((1, 3, 128, 128), device=device, dtype=torch.float16)
|
45 |
g = torch.cuda.CUDAGraph()
|
46 |
with torch.cuda.graph(g):
|
47 |
static_output = model(static_input)
|
|
|
53 |
preprocessed = preprocess_image(image)
|
54 |
static_input.copy_(preprocessed)
|
55 |
g.replay()
|
56 |
+
depth_map = static_output.predicted_depth.squeeze()
|
57 |
depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())
|
58 |
+
depth_map = (depth_map * 255).to(torch.uint8)
|
59 |
+
depth_map_colored = color_map[depth_map]
|
60 |
+
return depth_map_colored.cpu().numpy()
|
61 |
+
|
|
|
62 |
interface = gr.Interface(
|
63 |
fn=process_frame,
|
64 |
inputs=gr.Image(sources="webcam", streaming=True),
|