EgoHackZero
commited on
Commit
·
7d74b9b
1
Parent(s):
5af96f6
solve problem with input secont try
Browse files
app.py
CHANGED
@@ -5,19 +5,21 @@ from PIL import Image
|
|
5 |
import cv2
|
6 |
|
7 |
# Загрузка модели
|
8 |
-
|
|
|
9 |
midas.eval()
|
10 |
|
11 |
-
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
|
12 |
transform = midas_transforms.small_transform
|
13 |
|
|
|
14 |
def predict_depth(image):
|
15 |
img = np.array(image)
|
16 |
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
17 |
-
input_tensor = transform(img_rgb)
|
18 |
|
19 |
with torch.no_grad():
|
20 |
-
prediction = midas(input_tensor
|
21 |
prediction = torch.nn.functional.interpolate(
|
22 |
prediction.unsqueeze(1),
|
23 |
size=img_rgb.shape[:2],
|
|
|
5 |
import cv2
|
6 |
|
7 |
# Загрузка модели
|
8 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
9 |
+
midas = torch.hub.load("intel-isl/MiDaS", "MiDaS_small").to(device)
|
10 |
midas.eval()
|
11 |
|
12 |
+
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms").to(device)
|
13 |
transform = midas_transforms.small_transform
|
14 |
|
15 |
+
|
16 |
def predict_depth(image):
|
17 |
img = np.array(image)
|
18 |
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
19 |
+
input_tensor = transform(img_rgb).to(device)
|
20 |
|
21 |
with torch.no_grad():
|
22 |
+
prediction = midas(input_tensor)
|
23 |
prediction = torch.nn.functional.interpolate(
|
24 |
prediction.unsqueeze(1),
|
25 |
size=img_rgb.shape[:2],
|