roman-bachmann commited on
Commit
d4afa02
·
verified ·
1 Parent(s): 26aa516

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -25,10 +25,11 @@ if torch.cuda.is_available():
25
  torch.cuda.max_memory_allocated(device=device)
26
  # Detect if bf16 is enabled or not
27
  enable_bf16 = detect_bf16_support()
 
 
28
  else:
29
  device, power_device, enable_bf16 = "cpu", "CPU", False
30
- print(f'Device: {device}, GPU type: {gpu_type}')
31
- print('BF16 enabled:', enable_bf16)
32
 
33
 
34
  # The flag below controls whether to allow TF32 on matmul. This flag defaults to False in PyTorch 1.12 and later.
 
25
  torch.cuda.max_memory_allocated(device=device)
26
  # Detect if bf16 is enabled or not
27
  enable_bf16 = detect_bf16_support()
28
+ print(f'Device: {device}, GPU type: {gpu_type}')
29
+ print('BF16 enabled:', enable_bf16)
30
  else:
31
  device, power_device, enable_bf16 = "cpu", "CPU", False
32
+ print('Running on CPU')
 
33
 
34
 
35
  # The flag below controls whether to allow TF32 on matmul. This flag defaults to False in PyTorch 1.12 and later.