Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -21,8 +21,6 @@ def preload_models(model_choices):
|
|
21 |
torch_dtype=torch.bfloat16,
|
22 |
trust_remote_code=True,
|
23 |
token=os.environ.get("token"),
|
24 |
-
device_map="cpu",
|
25 |
-
low_cpu_mem_usage=True
|
26 |
)
|
27 |
|
28 |
# Load tokenizer
|
@@ -46,7 +44,7 @@ def get_model_pipeline(model_name):
|
|
46 |
raise ValueError(f"Model {model_name} not found in preloaded models")
|
47 |
|
48 |
# Move model to GPU
|
49 |
-
model = LOADED_MODELS[model_name]
|
50 |
tokenizer = LOADED_TOKENIZERS[model_name]
|
51 |
|
52 |
# Create pipeline with the GPU model
|
@@ -55,6 +53,7 @@ def get_model_pipeline(model_name):
|
|
55 |
model=model,
|
56 |
tokenizer=tokenizer,
|
57 |
torch_dtype=torch.bfloat16,
|
|
|
58 |
)
|
59 |
|
60 |
return pipe, model
|
|
|
21 |
torch_dtype=torch.bfloat16,
|
22 |
trust_remote_code=True,
|
23 |
token=os.environ.get("token"),
|
|
|
|
|
24 |
)
|
25 |
|
26 |
# Load tokenizer
|
|
|
44 |
raise ValueError(f"Model {model_name} not found in preloaded models")
|
45 |
|
46 |
# Move model to GPU
|
47 |
+
model = LOADED_MODELS[model_name]
|
48 |
tokenizer = LOADED_TOKENIZERS[model_name]
|
49 |
|
50 |
# Create pipeline with the GPU model
|
|
|
53 |
model=model,
|
54 |
tokenizer=tokenizer,
|
55 |
torch_dtype=torch.bfloat16,
|
56 |
+
device="cuda"
|
57 |
)
|
58 |
|
59 |
return pipe, model
|