oliver-aizip commited on
Commit
0226e6c
·
1 Parent(s): 345c0d1

try bitnet with transformers 4.52

Browse files
Files changed (2) hide show
  1. requirements.txt +1 -1
  2. utils/models.py +17 -18
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
 
2
- transformers>=4.51.0
3
  pandas>=2.2.3
4
  accelerate>=1.6.0
5
  numpy==1.26.4
 
1
 
2
+ transformers>=4.52.0
3
  pandas>=2.2.3
4
  accelerate>=1.6.0
5
  numpy==1.26.4
utils/models.py CHANGED
@@ -17,25 +17,25 @@ from .prompts import format_rag_prompt
17
  from .shared import generation_interrupt
18
 
19
  models = {
20
- "Qwen2.5-1.5b-Instruct": "qwen/qwen2.5-1.5b-instruct",
21
- "Qwen2.5-3b-Instruct": "qwen/qwen2.5-3b-instruct",
22
- "Llama-3.2-1b-Instruct": "meta-llama/llama-3.2-1b-instruct",
23
- "Llama-3.2-3b-Instruct": "meta-llama/llama-3.2-3b-instruct",
24
- "Gemma-3-1b-it": "google/gemma-3-1b-it",
25
- "Gemma-3-4b-it": "google/gemma-3-4b-it",
26
- "Gemma-2-2b-it": "google/gemma-2-2b-it",
27
- "Phi-4-mini-instruct": "microsoft/phi-4-mini-instruct",
28
- "Cogito-v1-preview-llama-3b": "deepcogito/cogito-v1-preview-llama-3b",
29
- "IBM Granite-3.3-2b-instruct": "ibm-granite/granite-3.3-2b-instruct",
30
- # #"Bitnet-b1.58-2B4T": "microsoft/bitnet-b1.58-2B-4T",
31
  # #"MiniCPM3-RAG-LoRA": "openbmb/MiniCPM3-RAG-LoRA",
32
  "Qwen3-0.6b": "qwen/qwen3-0.6b",
33
- "Qwen3-1.7b": "qwen/qwen3-1.7b",
34
- "Qwen3-4b": "qwen/qwen3-4b",
35
- "SmolLM2-1.7b-Instruct": "HuggingFaceTB/SmolLM2-1.7B-Instruct",
36
- "EXAONE-3.5-2.4B-instruct": "LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct",
37
- "OLMo-2-1B-Instruct": "allenai/OLMo-2-0425-1B-Instruct",
38
- "icecream-3b": "aizip-dev/icecream-3b",
39
  }
40
 
41
  tokenizer_cache = {}
@@ -128,7 +128,6 @@ def run_inference(model_name, context, question):
128
  tokenizer_kwargs["enable_thinking"] = False
129
 
130
  try:
131
- print("REACHED HERE BEFORE tokenizer")
132
  if model_name in tokenizer_cache:
133
  tokenizer = tokenizer_cache[model_name]
134
  else:
 
17
  from .shared import generation_interrupt
18
 
19
  models = {
20
+ # "Qwen2.5-1.5b-Instruct": "qwen/qwen2.5-1.5b-instruct",
21
+ # "Qwen2.5-3b-Instruct": "qwen/qwen2.5-3b-instruct",
22
+ # "Llama-3.2-1b-Instruct": "meta-llama/llama-3.2-1b-instruct",
23
+ # "Llama-3.2-3b-Instruct": "meta-llama/llama-3.2-3b-instruct",
24
+ # "Gemma-3-1b-it": "google/gemma-3-1b-it",
25
+ # "Gemma-3-4b-it": "google/gemma-3-4b-it",
26
+ # "Gemma-2-2b-it": "google/gemma-2-2b-it",
27
+ # "Phi-4-mini-instruct": "microsoft/phi-4-mini-instruct",
28
+ # "Cogito-v1-preview-llama-3b": "deepcogito/cogito-v1-preview-llama-3b",
29
+ # "IBM Granite-3.3-2b-instruct": "ibm-granite/granite-3.3-2b-instruct",
30
+ "Bitnet-b1.58-2B4T": "microsoft/bitnet-b1.58-2B-4T",
31
  # #"MiniCPM3-RAG-LoRA": "openbmb/MiniCPM3-RAG-LoRA",
32
  "Qwen3-0.6b": "qwen/qwen3-0.6b",
33
+ # "Qwen3-1.7b": "qwen/qwen3-1.7b",
34
+ # "Qwen3-4b": "qwen/qwen3-4b",
35
+ # "SmolLM2-1.7b-Instruct": "HuggingFaceTB/SmolLM2-1.7B-Instruct",
36
+ # "EXAONE-3.5-2.4B-instruct": "LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct",
37
+ # "OLMo-2-1B-Instruct": "allenai/OLMo-2-0425-1B-Instruct",
38
+ # "icecream-3b": "aizip-dev/icecream-3b",
39
  }
40
 
41
  tokenizer_cache = {}
 
128
  tokenizer_kwargs["enable_thinking"] = False
129
 
130
  try:
 
131
  if model_name in tokenizer_cache:
132
  tokenizer = tokenizer_cache[model_name]
133
  else: