oliver-aizip commited on
Commit
14933d0
·
1 Parent(s): 481fe8b

remove all explicit device assigns

Browse files
Files changed (1) hide show
  1. utils/models.py +19 -19
utils/models.py CHANGED
@@ -17,25 +17,25 @@ from .prompts import format_rag_prompt
17
  from .shared import generation_interrupt
18
 
19
  models = {
20
- "Qwen2.5-1.5b-Instruct": "qwen/qwen2.5-1.5b-instruct",
21
- "Qwen2.5-3b-Instruct": "qwen/qwen2.5-3b-instruct",
22
- "Llama-3.2-1b-Instruct": "meta-llama/llama-3.2-1b-instruct",
23
- "Llama-3.2-3b-Instruct": "meta-llama/llama-3.2-3b-instruct",
24
- "Gemma-3-1b-it": "google/gemma-3-1b-it",
25
- "Gemma-3-4b-it": "google/gemma-3-4b-it",
26
- "Gemma-2-2b-it": "google/gemma-2-2b-it",
27
- "Phi-4-mini-instruct": "microsoft/phi-4-mini-instruct",
28
- "Cogito-v1-preview-llama-3b": "deepcogito/cogito-v1-preview-llama-3b",
29
- "IBM Granite-3.3-2b-instruct": "ibm-granite/granite-3.3-2b-instruct",
30
- # "Bitnet-b1.58-2B4T": "microsoft/bitnet-b1.58-2B-4T",
31
  # #"MiniCPM3-RAG-LoRA": "openbmb/MiniCPM3-RAG-LoRA",
32
  "Qwen3-0.6b": "qwen/qwen3-0.6b",
33
- "Qwen3-1.7b": "qwen/qwen3-1.7b",
34
- "Qwen3-4b": "qwen/qwen3-4b",
35
- "SmolLM2-1.7b-Instruct": "HuggingFaceTB/SmolLM2-1.7B-Instruct",
36
- "EXAONE-3.5-2.4B-instruct": "LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct",
37
- "OLMo-2-1B-Instruct": "allenai/OLMo-2-0425-1B-Instruct",
38
- "icecream-3b": "aizip-dev/icecream-3b",
39
  }
40
 
41
  tokenizer_cache = {}
@@ -159,7 +159,7 @@ def run_inference(model_name, context, question):
159
  if "bitnet" in model_name.lower():
160
  bitnet_model = BitNetForCausalLM.from_pretrained(
161
  model_name,
162
- device_map="auto",
163
  torch_dtype=torch.bfloat16,
164
  #trust_remote_code=True,
165
  )
@@ -167,7 +167,7 @@ def run_inference(model_name, context, question):
167
  "text-generation",
168
  model=bitnet_model,
169
  tokenizer=tokenizer,
170
- device_map="auto",
171
  #trust_remote_code=True,
172
  torch_dtype=torch.bfloat16,
173
  model_kwargs={
 
17
  from .shared import generation_interrupt
18
 
19
  models = {
20
+ # "Qwen2.5-1.5b-Instruct": "qwen/qwen2.5-1.5b-instruct",
21
+ # "Qwen2.5-3b-Instruct": "qwen/qwen2.5-3b-instruct",
22
+ # "Llama-3.2-1b-Instruct": "meta-llama/llama-3.2-1b-instruct",
23
+ # "Llama-3.2-3b-Instruct": "meta-llama/llama-3.2-3b-instruct",
24
+ # "Gemma-3-1b-it": "google/gemma-3-1b-it",
25
+ # "Gemma-3-4b-it": "google/gemma-3-4b-it",
26
+ # "Gemma-2-2b-it": "google/gemma-2-2b-it",
27
+ # "Phi-4-mini-instruct": "microsoft/phi-4-mini-instruct",
28
+ # "Cogito-v1-preview-llama-3b": "deepcogito/cogito-v1-preview-llama-3b",
29
+ # "IBM Granite-3.3-2b-instruct": "ibm-granite/granite-3.3-2b-instruct",
30
+ "Bitnet-b1.58-2B4T": "microsoft/bitnet-b1.58-2B-4T",
31
  # #"MiniCPM3-RAG-LoRA": "openbmb/MiniCPM3-RAG-LoRA",
32
  "Qwen3-0.6b": "qwen/qwen3-0.6b",
33
+ # "Qwen3-1.7b": "qwen/qwen3-1.7b",
34
+ # "Qwen3-4b": "qwen/qwen3-4b",
35
+ # "SmolLM2-1.7b-Instruct": "HuggingFaceTB/SmolLM2-1.7B-Instruct",
36
+ # "EXAONE-3.5-2.4B-instruct": "LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct",
37
+ # "OLMo-2-1B-Instruct": "allenai/OLMo-2-0425-1B-Instruct",
38
+ # "icecream-3b": "aizip-dev/icecream-3b",
39
  }
40
 
41
  tokenizer_cache = {}
 
159
  if "bitnet" in model_name.lower():
160
  bitnet_model = BitNetForCausalLM.from_pretrained(
161
  model_name,
162
+ #device_map="auto",
163
  torch_dtype=torch.bfloat16,
164
  #trust_remote_code=True,
165
  )
 
167
  "text-generation",
168
  model=bitnet_model,
169
  tokenizer=tokenizer,
170
+ #device_map="auto",
171
  #trust_remote_code=True,
172
  torch_dtype=torch.bfloat16,
173
  model_kwargs={