danielhanchen commited on
Commit
e291270
·
verified ·
1 Parent(s): d42d17c

Add files using upload-large-folder tool

Browse files
chat_template.jinja ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'image' -%}
33
+ {{ '<start_of_image>' }}
34
+ {%- elif item['type'] == 'text' -%}
35
+ {{ item['text'] | trim }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{ raise_exception("Invalid content type") }}
40
+ {%- endif -%}
41
+ {{ '<end_of_turn>
42
+ ' }}
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ {{'<start_of_turn>model
46
+ '}}
47
+ {%- endif -%}
config.json CHANGED
@@ -25,65 +25,65 @@
25
  "multi_modal_projector",
26
  "merger",
27
  "modality_projection",
28
- "language_model.model.layers.1.mlp",
29
- "language_model.model.layers.4.mlp",
30
- "language_model.model.layers.2.mlp",
31
  "language_model.model.layers.5.mlp",
 
 
 
 
 
32
  "vision_tower.vision_model.encoder.layers.22.self_attn",
33
- "language_model.model.layers.3.mlp",
34
- "vision_tower.vision_model.encoder.layers.26.self_attn",
35
- "language_model.model.layers.0.mlp",
36
  "vision_tower.vision_model.encoder.layers.19.self_attn",
37
  "vision_tower.vision_model.encoder.layers.25.self_attn",
38
- "vision_tower.vision_model.encoder.layers.23.self_attn",
 
39
  "vision_tower.vision_model.encoder.layers.25.mlp",
 
40
  "vision_tower.vision_model.encoder.layers.20.self_attn",
41
- "vision_tower.vision_model.encoder.layers.24.self_attn",
42
- "language_model.model.layers.2.self_attn",
43
- "vision_tower.vision_model.encoder.layers.21.self_attn",
44
  "vision_tower.vision_model.encoder.layers.18.self_attn",
45
- "vision_tower.vision_model.encoder.layers.19.mlp",
46
  "vision_tower.vision_model.encoder.layers.20.mlp",
47
  "vision_tower.vision_model.encoder.layers.21.mlp",
 
48
  "vision_tower.vision_model.encoder.layers.15.mlp",
49
- "vision_tower.vision_model.encoder.layers.22.mlp",
50
- "vision_tower.vision_model.encoder.layers.23.mlp",
51
- "vision_tower.vision_model.encoder.layers.17.mlp",
52
- "vision_tower.vision_model.encoder.layers.18.mlp",
53
  "vision_tower.vision_model.encoder.layers.24.mlp",
54
- "vision_tower.vision_model.encoder.layers.16.self_attn",
55
  "vision_tower.vision_model.encoder.layers.16.mlp",
56
- "vision_tower.vision_model.encoder.layers.17.self_attn",
 
57
  "vision_tower.vision_model.encoder.layers.13.mlp",
 
 
58
  "vision_tower.vision_model.encoder.layers.14.mlp",
59
  "vision_tower.vision_model.encoder.layers.15.self_attn",
60
- "vision_tower.vision_model.encoder.layers.12.mlp",
61
  "vision_tower.vision_model.encoder.layers.14.self_attn",
62
- "vision_tower.vision_model.encoder.layers.9.mlp",
63
  "vision_tower.vision_model.encoder.layers.5.mlp",
 
 
 
64
  "vision_tower.vision_model.encoder.layers.10.self_attn",
65
- "vision_tower.vision_model.encoder.layers.7.mlp",
66
- "vision_tower.vision_model.encoder.layers.9.self_attn",
67
- "vision_tower.vision_model.encoder.layers.8.mlp",
68
- "vision_tower.vision_model.encoder.layers.10.mlp",
69
  "vision_tower.vision_model.encoder.layers.13.self_attn",
70
- "vision_tower.vision_model.encoder.layers.11.mlp",
71
- "vision_tower.vision_model.encoder.layers.11.self_attn",
72
  "vision_tower.vision_model.encoder.layers.12.self_attn",
73
- "vision_tower.vision_model.encoder.layers.3.mlp",
74
  "vision_tower.vision_model.encoder.layers.8.self_attn",
75
- "vision_tower.vision_model.encoder.layers.4.self_attn",
76
- "vision_tower.vision_model.encoder.layers.6.mlp",
77
  "vision_tower.vision_model.encoder.layers.4.mlp",
 
 
78
  "vision_tower.vision_model.encoder.layers.2.mlp",
79
- "vision_tower.vision_model.encoder.layers.6.self_attn",
80
  "vision_tower.vision_model.encoder.layers.7.self_attn",
81
- "vision_tower.vision_model.encoder.layers.1.self_attn",
 
82
  "vision_tower.vision_model.encoder.layers.1.mlp",
83
- "vision_tower.vision_model.encoder.layers.3.self_attn",
84
  "vision_tower.vision_model.encoder.layers.5.self_attn",
85
- "vision_tower.vision_model.encoder.layers.0.self_attn",
 
86
  "vision_tower.vision_model.encoder.layers.0.mlp",
 
87
  "vision_tower.vision_model.encoder.layers.2.self_attn",
88
  "vision_tower.vision_model.encoder.layers.26.mlp"
89
  ],
@@ -123,7 +123,7 @@
123
  "vocab_size": 262208
124
  },
125
  "torch_dtype": "bfloat16",
126
- "transformers_version": "4.51.0",
127
  "unsloth_fixed": true,
128
  "vision_config": {
129
  "attention_dropout": 0.0,
 
25
  "multi_modal_projector",
26
  "merger",
27
  "modality_projection",
 
 
 
28
  "language_model.model.layers.5.mlp",
29
+ "language_model.model.layers.6.mlp",
30
+ "vision_tower.vision_model.encoder.layers.23.self_attn",
31
+ "language_model.model.layers.4.mlp",
32
+ "language_model.model.layers.1.mlp",
33
+ "vision_tower.vision_model.encoder.layers.24.self_attn",
34
  "vision_tower.vision_model.encoder.layers.22.self_attn",
35
+ "vision_tower.vision_model.encoder.layers.21.self_attn",
 
 
36
  "vision_tower.vision_model.encoder.layers.19.self_attn",
37
  "vision_tower.vision_model.encoder.layers.25.self_attn",
38
+ "vision_tower.vision_model.encoder.layers.26.self_attn",
39
+ "language_model.model.layers.3.mlp",
40
  "vision_tower.vision_model.encoder.layers.25.mlp",
41
+ "vision_tower.vision_model.encoder.layers.19.mlp",
42
  "vision_tower.vision_model.encoder.layers.20.self_attn",
43
+ "vision_tower.vision_model.encoder.layers.17.mlp",
44
+ "vision_tower.vision_model.encoder.layers.22.mlp",
 
45
  "vision_tower.vision_model.encoder.layers.18.self_attn",
46
+ "language_model.model.layers.0.mlp",
47
  "vision_tower.vision_model.encoder.layers.20.mlp",
48
  "vision_tower.vision_model.encoder.layers.21.mlp",
49
+ "language_model.model.layers.2.self_attn",
50
  "vision_tower.vision_model.encoder.layers.15.mlp",
51
+ "vision_tower.vision_model.encoder.layers.17.self_attn",
 
 
 
52
  "vision_tower.vision_model.encoder.layers.24.mlp",
 
53
  "vision_tower.vision_model.encoder.layers.16.mlp",
54
+ "vision_tower.vision_model.encoder.layers.16.self_attn",
55
+ "vision_tower.vision_model.encoder.layers.18.mlp",
56
  "vision_tower.vision_model.encoder.layers.13.mlp",
57
+ "vision_tower.vision_model.encoder.layers.23.mlp",
58
+ "vision_tower.vision_model.encoder.layers.10.mlp",
59
  "vision_tower.vision_model.encoder.layers.14.mlp",
60
  "vision_tower.vision_model.encoder.layers.15.self_attn",
61
+ "vision_tower.vision_model.encoder.layers.11.self_attn",
62
  "vision_tower.vision_model.encoder.layers.14.self_attn",
63
+ "vision_tower.vision_model.encoder.layers.7.mlp",
64
  "vision_tower.vision_model.encoder.layers.5.mlp",
65
+ "vision_tower.vision_model.encoder.layers.9.mlp",
66
+ "vision_tower.vision_model.encoder.layers.11.mlp",
67
+ "vision_tower.vision_model.encoder.layers.12.mlp",
68
  "vision_tower.vision_model.encoder.layers.10.self_attn",
 
 
 
 
69
  "vision_tower.vision_model.encoder.layers.13.self_attn",
70
+ "vision_tower.vision_model.encoder.layers.8.mlp",
 
71
  "vision_tower.vision_model.encoder.layers.12.self_attn",
 
72
  "vision_tower.vision_model.encoder.layers.8.self_attn",
 
 
73
  "vision_tower.vision_model.encoder.layers.4.mlp",
74
+ "vision_tower.vision_model.encoder.layers.4.self_attn",
75
+ "vision_tower.vision_model.encoder.layers.9.self_attn",
76
  "vision_tower.vision_model.encoder.layers.2.mlp",
 
77
  "vision_tower.vision_model.encoder.layers.7.self_attn",
78
+ "vision_tower.vision_model.encoder.layers.3.mlp",
79
+ "vision_tower.vision_model.encoder.layers.6.mlp",
80
  "vision_tower.vision_model.encoder.layers.1.mlp",
81
+ "vision_tower.vision_model.encoder.layers.6.self_attn",
82
  "vision_tower.vision_model.encoder.layers.5.self_attn",
83
+ "vision_tower.vision_model.encoder.layers.1.self_attn",
84
+ "vision_tower.vision_model.encoder.layers.3.self_attn",
85
  "vision_tower.vision_model.encoder.layers.0.mlp",
86
+ "vision_tower.vision_model.encoder.layers.0.self_attn",
87
  "vision_tower.vision_model.encoder.layers.2.self_attn",
88
  "vision_tower.vision_model.encoder.layers.26.mlp"
89
  ],
 
123
  "vocab_size": 262208
124
  },
125
  "torch_dtype": "bfloat16",
126
+ "transformers_version": "4.52.0.dev0",
127
  "unsloth_fixed": true,
128
  "vision_config": {
129
  "attention_dropout": 0.0,
generation_config.json CHANGED
@@ -9,5 +9,5 @@
9
  "pad_token_id": 0,
10
  "top_k": 64,
11
  "top_p": 0.95,
12
- "transformers_version": "4.51.0"
13
  }
 
9
  "pad_token_id": 0,
10
  "top_k": 64,
11
  "top_p": 0.95,
12
+ "transformers_version": "4.52.0.dev0"
13
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50fee60341951d70ff28e0480f908480927a1b11e69738457bfe17da34f548c4
3
- size 4562294331
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:853a95036ed1022c4ac7743c00e906a63f2e21c3c0cf285bda2371ecf07e996f
3
+ size 4562294332
tokenizer_config.json CHANGED
@@ -51325,7 +51325,6 @@
51325
  },
51326
  "boi_token": "<start_of_image>",
51327
  "bos_token": "<bos>",
51328
- "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
51329
  "clean_up_tokenization_spaces": false,
51330
  "eoi_token": "<end_of_image>",
51331
  "eos_token": "<end_of_turn>",
 
51325
  },
51326
  "boi_token": "<start_of_image>",
51327
  "bos_token": "<bos>",
 
51328
  "clean_up_tokenization_spaces": false,
51329
  "eoi_token": "<end_of_image>",
51330
  "eos_token": "<end_of_turn>",