chongzhou commited on
Commit
4d4080b
·
1 Parent(s): defc201

fix inference_state["device"]

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -228,7 +228,8 @@ def preprocess_video_in(
228
  predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device="cpu")
229
  if torch.cuda.is_available():
230
  predictor.to("cuda")
231
- inference_state["device"] = "cuda"
 
232
  if torch.cuda.get_device_properties(0).major >= 8:
233
  torch.backends.cuda.matmul.allow_tf32 = True
234
  torch.backends.cudnn.allow_tf32 = True
@@ -260,7 +261,8 @@ def segment_with_points(
260
  predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device="cpu")
261
  if torch.cuda.is_available():
262
  predictor.to("cuda")
263
- inference_state["device"] = "cuda"
 
264
  if torch.cuda.get_device_properties(0).major >= 8:
265
  torch.backends.cuda.matmul.allow_tf32 = True
266
  torch.backends.cudnn.allow_tf32 = True
@@ -351,7 +353,8 @@ def propagate_to_all(
351
  predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device="cpu")
352
  if torch.cuda.is_available():
353
  predictor.to("cuda")
354
- inference_state["device"] = "cuda"
 
355
  if torch.cuda.get_device_properties(0).major >= 8:
356
  torch.backends.cuda.matmul.allow_tf32 = True
357
  torch.backends.cudnn.allow_tf32 = True
 
228
  predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device="cpu")
229
  if torch.cuda.is_available():
230
  predictor.to("cuda")
231
+ if inference_state:
232
+ inference_state["device"] = "cuda"
233
  if torch.cuda.get_device_properties(0).major >= 8:
234
  torch.backends.cuda.matmul.allow_tf32 = True
235
  torch.backends.cudnn.allow_tf32 = True
 
261
  predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device="cpu")
262
  if torch.cuda.is_available():
263
  predictor.to("cuda")
264
+ if inference_state:
265
+ inference_state["device"] = "cuda"
266
  if torch.cuda.get_device_properties(0).major >= 8:
267
  torch.backends.cuda.matmul.allow_tf32 = True
268
  torch.backends.cudnn.allow_tf32 = True
 
353
  predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device="cpu")
354
  if torch.cuda.is_available():
355
  predictor.to("cuda")
356
+ if inference_state:
357
+ inference_state["device"] = "cuda"
358
  if torch.cuda.get_device_properties(0).major >= 8:
359
  torch.backends.cuda.matmul.allow_tf32 = True
360
  torch.backends.cudnn.allow_tf32 = True