lazybug commited on
Commit
20f93fb
·
verified ·
1 Parent(s): 2c9c8fc

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. LICENSE +201 -0
  2. README.md +117 -0
  3. UPDATE.md +105 -0
  4. checkpoints/llava-v1.5-7b-pretrain/.gitattributes +35 -0
  5. checkpoints/llava-v1.5-7b-pretrain/README.md +41 -0
  6. checkpoints/llava-v1.5-7b-pretrain/config.json +38 -0
  7. checkpoints/llava-v1.5-7b-pretrain/mm_projector.bin +3 -0
  8. checkpoints/llava-v1.5-7b-pretrain/trainer_state.json +0 -0
  9. hf_models/clip-vit-large-patch14-336/.gitattributes +28 -0
  10. hf_models/clip-vit-large-patch14-336/README.md +50 -0
  11. hf_models/clip-vit-large-patch14-336/config.json +179 -0
  12. hf_models/clip-vit-large-patch14-336/merges.txt +0 -0
  13. hf_models/clip-vit-large-patch14-336/preprocessor_config.json +19 -0
  14. hf_models/clip-vit-large-patch14-336/special_tokens_map.json +1 -0
  15. hf_models/clip-vit-large-patch14-336/tokenizer.json +0 -0
  16. hf_models/clip-vit-large-patch14-336/tokenizer_config.json +1 -0
  17. hf_models/clip-vit-large-patch14-336/vocab.json +0 -0
  18. hf_models/vicuna-7b-v1.5/.gitattributes +35 -0
  19. hf_models/vicuna-7b-v1.5/README.md +48 -0
  20. hf_models/vicuna-7b-v1.5/config.json +26 -0
  21. hf_models/vicuna-7b-v1.5/generation_config.json +8 -0
  22. hf_models/vicuna-7b-v1.5/pytorch_model.bin.index.json +330 -0
  23. hf_models/vicuna-7b-v1.5/special_tokens_map.json +24 -0
  24. hf_models/vicuna-7b-v1.5/tokenizer.model +3 -0
  25. hf_models/vicuna-7b-v1.5/tokenizer_config.json +35 -0
  26. llava/__init__.py +1 -0
  27. llava/constants.py +13 -0
  28. llava/conversation.py +449 -0
  29. llava/mm_utils.py +247 -0
  30. llava/model/__init__.py +4 -0
  31. llava/model/__pycache__/__init__.cpython-310.pyc +0 -0
  32. llava/model/__pycache__/builder.cpython-310.pyc +0 -0
  33. llava/model/__pycache__/llava_arch.cpython-310.pyc +0 -0
  34. llava/model/apply_delta.py +48 -0
  35. llava/model/builder.py +168 -0
  36. llava/model/consolidate.py +29 -0
  37. llava/model/language_model/__pycache__/llava_llama_1stg.cpython-310.pyc +0 -0
  38. llava/model/language_model/cache_py/modeling_attn_mask_utils.py +501 -0
  39. llava/model/language_model/llava_llama_1stg.py +633 -0
  40. llava/model/llava_arch.py +375 -0
  41. llava/model/make_delta.py +52 -0
  42. llava/model/multimodal_encoder/__pycache__/builder.cpython-310.pyc +0 -0
  43. llava/model/multimodal_encoder/__pycache__/clip_encoder.cpython-310.pyc +0 -0
  44. llava/model/multimodal_encoder/builder.py +11 -0
  45. llava/model/multimodal_encoder/clip_encoder.py +88 -0
  46. llava/model/multimodal_projector/__pycache__/builder.cpython-310.pyc +0 -0
  47. llava/model/multimodal_projector/builder.py +51 -0
  48. llava/model/utils.py +20 -0
  49. llava/serve/__init__.py +0 -0
  50. llava/serve/cli.py +128 -0
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # VoCo-LLaMA: Towards Vision Compression with Large Language Models
2
+
3
+ [Xubing Ye](https://yxxxb.github.io/), [Yukang Gan](https://scholar.google.com/citations?user=8rltp9AAAAAJ&hl=zh-CN), [Xiaoke Huang](https://xk-huang.github.io/), [Yixiao Ge](https://geyixiao.com/), [Yansong Tang](https://andytang15.github.io)
4
+
5
+ <p align="left">
6
+ <a href='https://arxiv.org/abs/2406.12275v2'>
7
+ <img src='https://img.shields.io/badge/Arxiv-2406.12275-A42C25?style=flat&logo=arXiv&logoColor=A42C25'></a>
8
+ <a href='https://arxiv.org/pdf/2406.12275v2'>
9
+ <img src='https://img.shields.io/badge/Paper-PDF-purple?style=flat&logo=arXiv&logoColor=yellow'></a>
10
+ <a href='https://yxxxb.github.io/VoCo-LLaMA-page/'>
11
+ <img src='https://img.shields.io/badge/Project-Page-%23df5b46?style=flat&logo=Google%20chrome&logoColor=%23df5b46'></a>
12
+ </p>
13
+
14
+ ## TL;DR
15
+
16
+ We propose VoCo-LLaMA, the first approach to compress vision tokens using LLMs. By fully utilizing the LLMs' understanding paradigm of vision tokens, our method can compress hundreds of vision tokens into a single VoCo token, while minimizing visual information loss.
17
+
18
+ VoCo-LLaMA demonstrates the ability to understand video through continuous training using time-series compressed token sequences of video frames.
19
+
20
+ VoCo-LLaMA presents a promising way to unlock the full potential of VLMs' contextual window.
21
+
22
+ ![image](https://i.imgur.com/wznshA6.jpeg)
23
+
24
+ ## News
25
+
26
+ - [x] **[2024/06/17]** Upload paper and release vision compression code.
27
+
28
+ ## Preparation
29
+
30
+ ### Install
31
+
32
+ 1. Clone this repository and navigate to VoCo-LLaMA folder
33
+
34
+ ```bash
35
+ git clone https://github.com/Yxxxb/VoCo-LLaMA.git
36
+ cd VoCo-LLaMA
37
+ ```
38
+
39
+ 2. Install Package
40
+
41
+ ```Shell
42
+ conda create -n voco_llama python=3.10 -y
43
+ conda activate voco_llama
44
+ pip install --upgrade pip # enable PEP 660 support
45
+ pip install -e .
46
+ ```
47
+
48
+ 3. Install additional packages for training cases
49
+
50
+ ```
51
+ pip install -e ".[train]"
52
+ pip install flash-attn --no-build-isolation
53
+ cp VoCo-LLaMA/llava/model/language_model/cache_py/modeling_attn_mask_utils.py /data/miniconda3/envs/voco_llama/lib/python3.10/site-packages/transformers/modeling_attn_mask_utils.py
54
+ ```
55
+
56
+ ### Data and Pre-trained weights
57
+
58
+ VoCo-LLaMA training requires only visual instruction fine-tuning. Please download the aligned LLaVA checkpoints ([base LLM and projection layers](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md)). Please download the annotation of the LLaVA instruction tuning data [llava_v1_5_mix665k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/llava_v1_5_mix665k.json), and download the images from constituting datasets:
59
+
60
+ - COCO: [train2017](http://images.cocodataset.org/zips/train2017.zip)
61
+ - GQA: [images](https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip)
62
+ - OCR-VQA: [download script](https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing), we save all files as `.jpg`
63
+ - TextVQA: [train_val_images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip)
64
+ - VisualGenome: [part1](https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip), [part2](https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip)
65
+
66
+ After downloading all of them, organize the data as follows in `./playground/data`,
67
+
68
+ ```
69
+ ├── coco
70
+ │ └── train2017
71
+ ├── gqa
72
+ │ └── images
73
+ ├── ocr_vqa
74
+ │ └── images
75
+ ├── textvqa
76
+ │ └── train_images
77
+ └── vg
78
+ ├── VG_100K
79
+ └── VG_100K_2
80
+ ```
81
+
82
+ ## Train
83
+
84
+ VoCo-LLaMA is trained on 8 A100 GPUs with 40GB memory. To train on fewer GPUs, you can reduce the `per_device_train_batch_size` and increase the `gradient_accumulation_steps` accordingly. Always keep the global batch size the same: `per_device_train_batch_size` x `gradient_accumulation_steps` x `num_gpus`.
85
+
86
+ Train VoCo-LLaMA with vision instruction tuning by running following command:
87
+
88
+ ```
89
+ bash scripts/finetune.sh
90
+ ```
91
+
92
+ ## Evaluation
93
+
94
+ There are evaluations about visual understanding we follow the relevant settings in LLaVA. Please refer to the LLaVA official [repository](https://github.com/haotian-liu/LLaVA/blob/main/docs/Evaluation.md) for details of data setup and testing.
95
+
96
+ ## Citation
97
+
98
+ If you find this work useful, please consider citing our paper:
99
+
100
+ ```bash
101
+ @article{ye2024voco,
102
+ author={Ye, Xubing and Gan, Yukang and Huang, Xiaoke and Ge, Yixiao and Shan, Ying and Tang, Yansong},
103
+ title={{VoCo-LLaMA: Towards Vision Compression with Large Language Models}},
104
+ journal={arXiv preprint arXiv:2406.12275},
105
+ year={2024},
106
+ }
107
+ ```
108
+
109
+ ##
110
+
111
+ ## Acknowledgement
112
+
113
+ - [LLaVA](https://github.com/haotian-liu/LLaVA): the codebase we built upon.
114
+ - [Vicuna](https://github.com/lm-sys/FastChat): our base model Vicuna-7B that has the amazing language capabilities!
115
+
116
+
117
+
UPDATE.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 复现遇到的问题
2
+ 1. peft版本太高
3
+ ```
4
+ pip install peft==0.6.0
5
+ ```
6
+
7
+ 2. zero3.json必须有`"train_batch_size"`字段
8
+
9
+ 3. cuda版本和deepspeed不对应
10
+ ```
11
+ 找对应的torch库和deepspeed库
12
+ ```
13
+
14
+ 4. deepseek给的zero3.json文件用了cpu的优化器
15
+ ```
16
+ "offload_optimizer": {
17
+ "device": "none",
18
+ "pin_memory": true
19
+ },
20
+ "offload_param": {
21
+ "device": "none",
22
+ "pin_memory": true
23
+ },
24
+
25
+ ```
26
+
27
+ 5. no sync context manager is incompatible with gradientpartitioning logic of ZeRo stage 3
28
+ ```
29
+ # 某些时候百度比AI好用
30
+ pip install deepspeed==0.15.4
31
+ ```
32
+
33
+ 6. zero3.json
34
+ ```
35
+
36
+ {
37
+ "bf16": {
38
+ "enabled": true
39
+ },
40
+ "zero_optimization": {
41
+ "stage": 3,
42
+ "offload_optimizer": {
43
+ "device": "none",
44
+ "pin_memory": true
45
+ },
46
+ "offload_param": {
47
+ "device": "none",
48
+ "pin_memory": true
49
+ },
50
+ "overlap_comm": true,
51
+ "contiguous_gradients": true,
52
+ "sub_group_size": 1e9,
53
+ "stage3_max_live_parameters": 1e9,
54
+ "stage3_max_reuse_distance": 1e9
55
+ },
56
+ "gradient_accumulation_steps": 16,
57
+ "train_micro_batch_size_per_gpu": 1,
58
+ "train_batch_size": 128,
59
+ "gradient_clipping": "auto",
60
+ "steps_per_print": 10,
61
+ "wall_clock_breakdown": false
62
+ }
63
+
64
+ ```
65
+
66
+ 7. 下载全部ocr_vqa图片的方法
67
+ ```
68
+ https://github.com/haotian-liu/LLaVA/issues/1618
69
+ ```
70
+
71
+ 8. 保存模型时报错,需要在lmsys/vicuna-7b-v1.5里的generation_config.json里
72
+ 因为评估时是贪婪搜索,所以把下面的两行删掉
73
+ ```
74
+ "temperature": 0.9,
75
+ "top_p": 0.6,
76
+ ```
77
+
78
+ # 评估复现的坑
79
+
80
+ 1. checkpoint的文件名要包含llava
81
+ 2. LlamaModel的forward函数没有处理输入Token只有一个的情况(推理时,第二次前向,输入Token只有一个),为了兼容输入token只有一个都情况下做出如下修改
82
+ ```
83
+ # 不过很奇怪的是,他居然考虑到voco_loc_back要+1
84
+
85
+ https://github.com/Yxxxb/VoCo-LLaMA/blob/385e7974a866cf73f1cabc8c29cb7a2180fd4dfd/llava/model/language_model/llava_llama_1stg.py#L271
86
+
87
+ 改成
88
+
89
+ # 整体操作是我每次前向都创建整个序列的mask,管你有没有KVCache
90
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
91
+ attention_mask,
92
+ (batch_size, seq_length + past_key_values_length), # 原来是(batch_size, seq_length), 现在我能保证走同一条路了
93
+ inputs_embeds, # 这个只用.dtype和isinstance,所以传这个没有影响
94
+ 0, # 原来是past_key_values_length
95
+ )
96
+ # ------------------------------------------
97
+ # https://github.com/Yxxxb/VoCo-LLaMA/blob/385e7974a866cf73f1cabc8c29cb7a2180fd4dfd/llava/model/language_model/llava_llama_1stg.py#L305
98
+
99
+ 上面加入
100
+
101
+ # 处理完Attention_mask后
102
+ attention_mask = attention_mask[:,:,-seq_length:,:]
103
+ ```
104
+
105
+
checkpoints/llava-v1.5-7b-pretrain/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
checkpoints/llava-v1.5-7b-pretrain/README.md ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ inference: false
3
+ ---
4
+
5
+ <br>
6
+ <br>
7
+
8
+ # LLaVA Model Card
9
+
10
+ This is a pretrained checkpoint, you can use it to instruct tune your multimodal models.
11
+
12
+ Check out the instructions [here](https://github.com/haotian-liu/LLaVA/blob/main/README.md#visual-instruction-tuning)
13
+
14
+ ## Model details
15
+
16
+ **Model type:**
17
+ LLaVA is an open-source chatbot trained by fine-tuning LLaMA/Vicuna on GPT-generated multimodal instruction-following data.
18
+ It is an auto-regressive language model, based on the transformer architecture.
19
+
20
+ **Model date:**
21
+ LLaVA-v1.5-MLP2x-336px-Pretrain-Vicuna-7B-v1.5 was trained in September 2023.
22
+
23
+ **Paper or resources for more information:**
24
+ https://llava-vl.github.io/
25
+
26
+ ## License
27
+ Llama 2 is licensed under the LLAMA 2 Community License,
28
+ Copyright (c) Meta Platforms, Inc. All Rights Reserved.
29
+
30
+ **Where to send questions or comments about the model:**
31
+ https://github.com/haotian-liu/LLaVA/issues
32
+
33
+ ## Intended use
34
+ **Primary intended uses:**
35
+ The primary use of LLaVA is research on large multimodal models and chatbots.
36
+
37
+ **Primary intended users:**
38
+ The primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence.
39
+
40
+ ## Training dataset
41
+ - 558K filtered image-text pairs from LAION/CC/SBU, captioned by BLIP.
checkpoints/llava-v1.5-7b-pretrain/config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./checkpoints/vicuna-7b-v1-5",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "image_aspect_ratio": "square",
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 11008,
13
+ "max_position_embeddings": 4096,
14
+ "mm_hidden_size": 1024,
15
+ "mm_patch_merge_type": "flat",
16
+ "mm_projector_type": "mlp2x_gelu",
17
+ "mm_use_im_patch_token": false,
18
+ "mm_use_im_start_end": false,
19
+ "mm_vision_select_feature": "patch",
20
+ "mm_vision_select_layer": -2,
21
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
22
+ "model_type": "llava",
23
+ "num_attention_heads": 32,
24
+ "num_hidden_layers": 32,
25
+ "num_key_value_heads": 32,
26
+ "pad_token_id": 0,
27
+ "pretraining_tp": 1,
28
+ "rms_norm_eps": 1e-05,
29
+ "rope_scaling": null,
30
+ "tie_word_embeddings": false,
31
+ "torch_dtype": "float16",
32
+ "transformers_version": "4.31.0",
33
+ "tune_mm_mlp_adapter": true,
34
+ "tune_mm_vision_resampler": false,
35
+ "use_cache": true,
36
+ "use_mm_proj": true,
37
+ "vocab_size": 32000
38
+ }
checkpoints/llava-v1.5-7b-pretrain/mm_projector.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a8d5a8fc6030bd16d8ee3df3b20a21c79190a3b0932a798d05f52a1ffdfc215
3
+ size 41961085
checkpoints/llava-v1.5-7b-pretrain/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
hf_models/clip-vit-large-patch14-336/.gitattributes ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.wasm filter=lfs diff=lfs merge=lfs -text
25
+ *.xz filter=lfs diff=lfs merge=lfs -text
26
+ *.zip filter=lfs diff=lfs merge=lfs -text
27
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
28
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
hf_models/clip-vit-large-patch14-336/README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_keras_callback
4
+ widget:
5
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png
6
+ candidate_labels: playing music, playing sports
7
+ example_title: Cat & Dog
8
+ model-index:
9
+ - name: clip-vit-large-patch14-336
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information Keras had access to. You should
14
+ probably proofread and complete it, then remove this comment. -->
15
+
16
+ # clip-vit-large-patch14-336
17
+
18
+ This model was trained from scratch on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+
21
+
22
+ ## Model description
23
+
24
+ More information needed
25
+
26
+ ## Intended uses & limitations
27
+
28
+ More information needed
29
+
30
+ ## Training and evaluation data
31
+
32
+ More information needed
33
+
34
+ ## Training procedure
35
+
36
+ ### Training hyperparameters
37
+
38
+ The following hyperparameters were used during training:
39
+ - optimizer: None
40
+ - training_precision: float32
41
+
42
+ ### Training results
43
+
44
+
45
+
46
+ ### Framework versions
47
+
48
+ - Transformers 4.21.3
49
+ - TensorFlow 2.8.2
50
+ - Tokenizers 0.12.1
hf_models/clip-vit-large-patch14-336/config.json ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/clip-vit-large-patch14-336",
3
+ "architectures": [
4
+ "CLIPModel"
5
+ ],
6
+ "initializer_factor": 1.0,
7
+ "logit_scale_init_value": 2.6592,
8
+ "model_type": "clip",
9
+ "projection_dim": 768,
10
+ "text_config": {
11
+ "_name_or_path": "",
12
+ "add_cross_attention": false,
13
+ "architectures": null,
14
+ "attention_dropout": 0.0,
15
+ "bad_words_ids": null,
16
+ "bos_token_id": 0,
17
+ "chunk_size_feed_forward": 0,
18
+ "cross_attention_hidden_size": null,
19
+ "decoder_start_token_id": null,
20
+ "diversity_penalty": 0.0,
21
+ "do_sample": false,
22
+ "dropout": 0.0,
23
+ "early_stopping": false,
24
+ "encoder_no_repeat_ngram_size": 0,
25
+ "eos_token_id": 2,
26
+ "exponential_decay_length_penalty": null,
27
+ "finetuning_task": null,
28
+ "forced_bos_token_id": null,
29
+ "forced_eos_token_id": null,
30
+ "hidden_act": "quick_gelu",
31
+ "hidden_size": 768,
32
+ "id2label": {
33
+ "0": "LABEL_0",
34
+ "1": "LABEL_1"
35
+ },
36
+ "initializer_factor": 1.0,
37
+ "initializer_range": 0.02,
38
+ "intermediate_size": 3072,
39
+ "is_decoder": false,
40
+ "is_encoder_decoder": false,
41
+ "label2id": {
42
+ "LABEL_0": 0,
43
+ "LABEL_1": 1
44
+ },
45
+ "layer_norm_eps": 1e-05,
46
+ "length_penalty": 1.0,
47
+ "max_length": 20,
48
+ "max_position_embeddings": 77,
49
+ "min_length": 0,
50
+ "model_type": "clip_text_model",
51
+ "no_repeat_ngram_size": 0,
52
+ "num_attention_heads": 12,
53
+ "num_beam_groups": 1,
54
+ "num_beams": 1,
55
+ "num_hidden_layers": 12,
56
+ "num_return_sequences": 1,
57
+ "output_attentions": false,
58
+ "output_hidden_states": false,
59
+ "output_scores": false,
60
+ "pad_token_id": 1,
61
+ "prefix": null,
62
+ "problem_type": null,
63
+ "projection_dim": 768,
64
+ "pruned_heads": {},
65
+ "remove_invalid_values": false,
66
+ "repetition_penalty": 1.0,
67
+ "return_dict": true,
68
+ "return_dict_in_generate": false,
69
+ "sep_token_id": null,
70
+ "task_specific_params": null,
71
+ "temperature": 1.0,
72
+ "tf_legacy_loss": false,
73
+ "tie_encoder_decoder": false,
74
+ "tie_word_embeddings": true,
75
+ "tokenizer_class": null,
76
+ "top_k": 50,
77
+ "top_p": 1.0,
78
+ "torch_dtype": null,
79
+ "torchscript": false,
80
+ "transformers_version": "4.21.3",
81
+ "typical_p": 1.0,
82
+ "use_bfloat16": false,
83
+ "vocab_size": 49408
84
+ },
85
+ "text_config_dict": {
86
+ "hidden_size": 768,
87
+ "intermediate_size": 3072,
88
+ "num_attention_heads": 12,
89
+ "num_hidden_layers": 12,
90
+ "projection_dim": 768
91
+ },
92
+ "torch_dtype": "float32",
93
+ "transformers_version": null,
94
+ "vision_config": {
95
+ "_name_or_path": "",
96
+ "add_cross_attention": false,
97
+ "architectures": null,
98
+ "attention_dropout": 0.0,
99
+ "bad_words_ids": null,
100
+ "bos_token_id": null,
101
+ "chunk_size_feed_forward": 0,
102
+ "cross_attention_hidden_size": null,
103
+ "decoder_start_token_id": null,
104
+ "diversity_penalty": 0.0,
105
+ "do_sample": false,
106
+ "dropout": 0.0,
107
+ "early_stopping": false,
108
+ "encoder_no_repeat_ngram_size": 0,
109
+ "eos_token_id": null,
110
+ "exponential_decay_length_penalty": null,
111
+ "finetuning_task": null,
112
+ "forced_bos_token_id": null,
113
+ "forced_eos_token_id": null,
114
+ "hidden_act": "quick_gelu",
115
+ "hidden_size": 1024,
116
+ "id2label": {
117
+ "0": "LABEL_0",
118
+ "1": "LABEL_1"
119
+ },
120
+ "image_size": 336,
121
+ "initializer_factor": 1.0,
122
+ "initializer_range": 0.02,
123
+ "intermediate_size": 4096,
124
+ "is_decoder": false,
125
+ "is_encoder_decoder": false,
126
+ "label2id": {
127
+ "LABEL_0": 0,
128
+ "LABEL_1": 1
129
+ },
130
+ "layer_norm_eps": 1e-05,
131
+ "length_penalty": 1.0,
132
+ "max_length": 20,
133
+ "min_length": 0,
134
+ "model_type": "clip_vision_model",
135
+ "no_repeat_ngram_size": 0,
136
+ "num_attention_heads": 16,
137
+ "num_beam_groups": 1,
138
+ "num_beams": 1,
139
+ "num_channels": 3,
140
+ "num_hidden_layers": 24,
141
+ "num_return_sequences": 1,
142
+ "output_attentions": false,
143
+ "output_hidden_states": false,
144
+ "output_scores": false,
145
+ "pad_token_id": null,
146
+ "patch_size": 14,
147
+ "prefix": null,
148
+ "problem_type": null,
149
+ "projection_dim": 768,
150
+ "pruned_heads": {},
151
+ "remove_invalid_values": false,
152
+ "repetition_penalty": 1.0,
153
+ "return_dict": true,
154
+ "return_dict_in_generate": false,
155
+ "sep_token_id": null,
156
+ "task_specific_params": null,
157
+ "temperature": 1.0,
158
+ "tf_legacy_loss": false,
159
+ "tie_encoder_decoder": false,
160
+ "tie_word_embeddings": true,
161
+ "tokenizer_class": null,
162
+ "top_k": 50,
163
+ "top_p": 1.0,
164
+ "torch_dtype": null,
165
+ "torchscript": false,
166
+ "transformers_version": "4.21.3",
167
+ "typical_p": 1.0,
168
+ "use_bfloat16": false
169
+ },
170
+ "vision_config_dict": {
171
+ "hidden_size": 1024,
172
+ "image_size": 336,
173
+ "intermediate_size": 4096,
174
+ "num_attention_heads": 16,
175
+ "num_hidden_layers": 24,
176
+ "patch_size": 14,
177
+ "projection_dim": 768
178
+ }
179
+ }
hf_models/clip-vit-large-patch14-336/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
hf_models/clip-vit-large-patch14-336/preprocessor_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": 336,
3
+ "do_center_crop": true,
4
+ "do_normalize": true,
5
+ "do_resize": true,
6
+ "feature_extractor_type": "CLIPFeatureExtractor",
7
+ "image_mean": [
8
+ 0.48145466,
9
+ 0.4578275,
10
+ 0.40821073
11
+ ],
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "resample": 3,
18
+ "size": 336
19
+ }
hf_models/clip-vit-large-patch14-336/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
hf_models/clip-vit-large-patch14-336/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
hf_models/clip-vit-large-patch14-336/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": "<|endoftext|>", "add_prefix_space": false, "errors": "replace", "do_lower_case": true, "name_or_path": "openai/clip-vit-base-patch32", "model_max_length": 77, "special_tokens_map_file": "/home/suraj/.cache/huggingface/transformers/18a566598f286c9139f88160c99f84eec492a26bd22738fa9cb44d5b7e0a5c76.cce1206abbad28826f000510f22f354e53e66a97f7c23745a7dfe27609cc07f5", "tokenizer_class": "CLIPTokenizer"}
hf_models/clip-vit-large-patch14-336/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
hf_models/vicuna-7b-v1.5/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
hf_models/vicuna-7b-v1.5/README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ inference: false
3
+ license: llama2
4
+ ---
5
+
6
+ # Vicuna Model Card
7
+
8
+ ## Model Details
9
+
10
+ Vicuna is a chat assistant trained by fine-tuning Llama 2 on user-shared conversations collected from ShareGPT.
11
+
12
+ - **Developed by:** [LMSYS](https://lmsys.org/)
13
+ - **Model type:** An auto-regressive language model based on the transformer architecture
14
+ - **License:** Llama 2 Community License Agreement
15
+ - **Finetuned from model:** [Llama 2](https://arxiv.org/abs/2307.09288)
16
+
17
+ ### Model Sources
18
+
19
+ - **Repository:** https://github.com/lm-sys/FastChat
20
+ - **Blog:** https://lmsys.org/blog/2023-03-30-vicuna/
21
+ - **Paper:** https://arxiv.org/abs/2306.05685
22
+ - **Demo:** https://chat.lmsys.org/
23
+
24
+ ## Uses
25
+
26
+ The primary use of Vicuna is research on large language models and chatbots.
27
+ The primary intended users of the model are researchers and hobbyists in natural language processing, machine learning, and artificial intelligence.
28
+
29
+ ## How to Get Started with the Model
30
+
31
+ - Command line interface: https://github.com/lm-sys/FastChat#vicuna-weights
32
+ - APIs (OpenAI API, Huggingface API): https://github.com/lm-sys/FastChat/tree/main#api
33
+
34
+ ## Training Details
35
+
36
+ Vicuna v1.5 is fine-tuned from Llama 2 with supervised instruction fine-tuning.
37
+ The training data is around 125K conversations collected from ShareGPT.com.
38
+ See more details in the "Training Details of Vicuna Models" section in the appendix of this [paper](https://arxiv.org/pdf/2306.05685.pdf).
39
+
40
+ ## Evaluation
41
+
42
+ ![Evaluation Results](https://github.com/lm-sys/lm-sys.github.io/blob/main/public/images/webdata/vicuna_v1.5_eval.png?raw=true)
43
+
44
+ Vicuna is evaluated with standard benchmarks, human preference, and LLM-as-a-judge. See more details in this [paper](https://arxiv.org/pdf/2306.05685.pdf) and [leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard).
45
+
46
+ ## Difference between different versions of Vicuna
47
+
48
+ See [vicuna_weights_version.md](https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md)
hf_models/vicuna-7b-v1.5/config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "vicuna-7b-v1.5",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 11008,
12
+ "max_position_embeddings": 4096,
13
+ "model_type": "llama",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 32,
16
+ "num_key_value_heads": 32,
17
+ "pad_token_id": 0,
18
+ "pretraining_tp": 1,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_scaling": null,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.31.0",
24
+ "use_cache": true,
25
+ "vocab_size": 32000
26
+ }
hf_models/vicuna-7b-v1.5/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "eos_token_id": 2,
4
+ "max_length": 4096,
5
+ "pad_token_id": 0,
6
+ "do_sample": false,
7
+ "transformers_version": "4.31.0"
8
+ }
hf_models/vicuna-7b-v1.5/pytorch_model.bin.index.json ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 13476839424
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00002-of-00002.bin",
7
+ "model.embed_tokens.weight": "pytorch_model-00001-of-00002.bin",
8
+ "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
9
+ "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
10
+ "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
11
+ "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
12
+ "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
13
+ "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
14
+ "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
15
+ "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
16
+ "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
17
+ "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
18
+ "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
19
+ "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
20
+ "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
21
+ "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
22
+ "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
23
+ "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
24
+ "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
25
+ "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
26
+ "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
27
+ "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
28
+ "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
29
+ "model.layers.10.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
30
+ "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
31
+ "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
32
+ "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
33
+ "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
34
+ "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
35
+ "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
36
+ "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
37
+ "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
38
+ "model.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
39
+ "model.layers.11.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
40
+ "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
41
+ "model.layers.11.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
42
+ "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
43
+ "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
44
+ "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
45
+ "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
46
+ "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
47
+ "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
48
+ "model.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
49
+ "model.layers.12.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
50
+ "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
51
+ "model.layers.12.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
52
+ "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
53
+ "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
54
+ "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
55
+ "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
56
+ "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
57
+ "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
58
+ "model.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
59
+ "model.layers.13.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
60
+ "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
61
+ "model.layers.13.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
62
+ "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
63
+ "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
64
+ "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
65
+ "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
66
+ "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
67
+ "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
68
+ "model.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
69
+ "model.layers.14.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
70
+ "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
71
+ "model.layers.14.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
72
+ "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
73
+ "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
74
+ "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
75
+ "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
76
+ "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
77
+ "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
78
+ "model.layers.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
79
+ "model.layers.15.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
80
+ "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
81
+ "model.layers.15.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
82
+ "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
83
+ "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
84
+ "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
85
+ "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
86
+ "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
87
+ "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
88
+ "model.layers.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
89
+ "model.layers.16.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
90
+ "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
91
+ "model.layers.16.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
92
+ "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
93
+ "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
94
+ "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
95
+ "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
96
+ "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
97
+ "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
98
+ "model.layers.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
99
+ "model.layers.17.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
100
+ "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
101
+ "model.layers.17.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
102
+ "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
103
+ "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
104
+ "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
105
+ "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
106
+ "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
107
+ "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
108
+ "model.layers.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
109
+ "model.layers.18.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
110
+ "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
111
+ "model.layers.18.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
112
+ "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
113
+ "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
114
+ "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
115
+ "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
116
+ "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
117
+ "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
118
+ "model.layers.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
119
+ "model.layers.19.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
120
+ "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
121
+ "model.layers.19.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
122
+ "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
123
+ "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
124
+ "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
125
+ "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
126
+ "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
127
+ "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
128
+ "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
129
+ "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
130
+ "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
131
+ "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
132
+ "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
133
+ "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
134
+ "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
135
+ "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
136
+ "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
137
+ "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
138
+ "model.layers.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
139
+ "model.layers.20.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
140
+ "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
141
+ "model.layers.20.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
142
+ "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
143
+ "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
144
+ "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
145
+ "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
146
+ "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
147
+ "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
148
+ "model.layers.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
149
+ "model.layers.21.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
150
+ "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
151
+ "model.layers.21.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
152
+ "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
153
+ "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
154
+ "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
155
+ "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
156
+ "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
157
+ "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
158
+ "model.layers.22.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
159
+ "model.layers.22.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
160
+ "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
161
+ "model.layers.22.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
162
+ "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
163
+ "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
164
+ "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
165
+ "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
166
+ "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
167
+ "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
168
+ "model.layers.23.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
169
+ "model.layers.23.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
170
+ "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
171
+ "model.layers.23.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
172
+ "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
173
+ "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
174
+ "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
175
+ "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
176
+ "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
177
+ "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
178
+ "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
179
+ "model.layers.24.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
180
+ "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
181
+ "model.layers.24.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
182
+ "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
183
+ "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
184
+ "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
185
+ "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
186
+ "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
187
+ "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
188
+ "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
189
+ "model.layers.25.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
190
+ "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
191
+ "model.layers.25.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
192
+ "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
193
+ "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
194
+ "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
195
+ "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
196
+ "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
197
+ "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
198
+ "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
199
+ "model.layers.26.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
200
+ "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
201
+ "model.layers.26.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
202
+ "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
203
+ "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
204
+ "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
205
+ "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
206
+ "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
207
+ "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
208
+ "model.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
209
+ "model.layers.27.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
210
+ "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
211
+ "model.layers.27.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
212
+ "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
213
+ "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
214
+ "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
215
+ "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
216
+ "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
217
+ "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
218
+ "model.layers.28.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
219
+ "model.layers.28.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
220
+ "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
221
+ "model.layers.28.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
222
+ "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
223
+ "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
224
+ "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
225
+ "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
226
+ "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
227
+ "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
228
+ "model.layers.29.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
229
+ "model.layers.29.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
230
+ "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
231
+ "model.layers.29.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
232
+ "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
233
+ "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
234
+ "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
235
+ "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
236
+ "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
237
+ "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
238
+ "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
239
+ "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
240
+ "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
241
+ "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
242
+ "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
243
+ "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
244
+ "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
245
+ "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
246
+ "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
247
+ "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
248
+ "model.layers.30.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
249
+ "model.layers.30.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
250
+ "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
251
+ "model.layers.30.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
252
+ "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
253
+ "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
254
+ "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
255
+ "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
256
+ "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
257
+ "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
258
+ "model.layers.31.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
259
+ "model.layers.31.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
260
+ "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
261
+ "model.layers.31.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
262
+ "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
263
+ "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
264
+ "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
265
+ "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
266
+ "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
267
+ "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
268
+ "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
269
+ "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
270
+ "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
271
+ "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
272
+ "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
273
+ "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
274
+ "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
275
+ "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
276
+ "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
277
+ "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
278
+ "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
279
+ "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
280
+ "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
281
+ "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
282
+ "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
283
+ "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
284
+ "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
285
+ "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
286
+ "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
287
+ "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
288
+ "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
289
+ "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
290
+ "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
291
+ "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
292
+ "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
293
+ "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
294
+ "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
295
+ "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
296
+ "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
297
+ "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
298
+ "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
299
+ "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
300
+ "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
301
+ "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
302
+ "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
303
+ "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
304
+ "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
305
+ "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
306
+ "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
307
+ "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
308
+ "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
309
+ "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
310
+ "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
311
+ "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
312
+ "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
313
+ "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
314
+ "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
315
+ "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
316
+ "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
317
+ "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
318
+ "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
319
+ "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
320
+ "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
321
+ "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
322
+ "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
323
+ "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
324
+ "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
325
+ "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
326
+ "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
327
+ "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
328
+ "model.norm.weight": "pytorch_model-00002-of-00002.bin"
329
+ }
330
+ }
hf_models/vicuna-7b-v1.5/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
hf_models/vicuna-7b-v1.5/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
hf_models/vicuna-7b-v1.5/tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 4096,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
llava/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .model import LlavaLlamaForCausalLM
llava/constants.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CONTROLLER_HEART_BEAT_EXPIRATION = 30
2
+ WORKER_HEART_BEAT_INTERVAL = 15
3
+
4
+ LOGDIR = "."
5
+
6
+ # Model Constants
7
+ IGNORE_INDEX = -100
8
+ IMAGE_TOKEN_INDEX = -200
9
+ DEFAULT_IMAGE_TOKEN = "<image>"
10
+ DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
11
+ DEFAULT_IM_START_TOKEN = "<im_start>"
12
+ DEFAULT_IM_END_TOKEN = "<im_end>"
13
+ IMAGE_PLACEHOLDER = "<image-placeholder>"
llava/conversation.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from enum import auto, Enum
3
+ from typing import List, Tuple
4
+ import base64
5
+ from io import BytesIO
6
+ from PIL import Image
7
+
8
+
9
+ class SeparatorStyle(Enum):
10
+ """Different separator style."""
11
+ SINGLE = auto()
12
+ TWO = auto()
13
+ MPT = auto()
14
+ PLAIN = auto()
15
+ LLAMA_2 = auto()
16
+
17
+
18
+ @dataclasses.dataclass
19
+ class Conversation:
20
+ """A class that keeps all conversation history."""
21
+ system: str
22
+ roles: List[str]
23
+ messages: List[List[str]]
24
+ offset: int
25
+ sep_style: SeparatorStyle = SeparatorStyle.SINGLE
26
+ sep: str = "###"
27
+ sep2: str = None
28
+ version: str = "Unknown"
29
+
30
+ skip_next: bool = False
31
+
32
+ def get_prompt(self):
33
+ messages = self.messages
34
+ if len(messages) > 0 and type(messages[0][1]) is tuple:
35
+ messages = self.messages.copy()
36
+ init_role, init_msg = messages[0].copy()
37
+ init_msg = init_msg[0].replace("<image>", "").strip()
38
+ if 'mmtag' in self.version:
39
+ messages[0] = (init_role, init_msg)
40
+ messages.insert(0, (self.roles[0], "<Image><image></Image>"))
41
+ messages.insert(1, (self.roles[1], "Received."))
42
+ else:
43
+ messages[0] = (init_role, "<image>\n" + init_msg)
44
+
45
+ if self.sep_style == SeparatorStyle.SINGLE:
46
+ ret = self.system + self.sep
47
+ for role, message in messages:
48
+ if message:
49
+ if type(message) is tuple:
50
+ message, _, _ = message
51
+ ret += role + ": " + message + self.sep
52
+ else:
53
+ ret += role + ":"
54
+ elif self.sep_style == SeparatorStyle.TWO:
55
+ seps = [self.sep, self.sep2]
56
+ ret = self.system + seps[0]
57
+ for i, (role, message) in enumerate(messages):
58
+ if message:
59
+ if type(message) is tuple:
60
+ message, _, _ = message
61
+ ret += role + ": " + message + seps[i % 2]
62
+ else:
63
+ ret += role + ":"
64
+ elif self.sep_style == SeparatorStyle.MPT:
65
+ ret = self.system + self.sep
66
+ for role, message in messages:
67
+ if message:
68
+ if type(message) is tuple:
69
+ message, _, _ = message
70
+ ret += role + message + self.sep
71
+ else:
72
+ ret += role
73
+ elif self.sep_style == SeparatorStyle.LLAMA_2:
74
+ wrap_sys = lambda msg: f"<<SYS>>\n{msg}\n<</SYS>>\n\n" if len(msg) > 0 else msg
75
+ wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
76
+ ret = ""
77
+
78
+ for i, (role, message) in enumerate(messages):
79
+ if i == 0:
80
+ assert message, "first message should not be none"
81
+ assert role == self.roles[0], "first message should come from user"
82
+ if message:
83
+ if type(message) is tuple:
84
+ message, _, _ = message
85
+ if i == 0: message = wrap_sys(self.system) + message
86
+ if i % 2 == 0:
87
+ message = wrap_inst(message)
88
+ ret += self.sep + message
89
+ else:
90
+ ret += " " + message + " " + self.sep2
91
+ else:
92
+ ret += ""
93
+ ret = ret.lstrip(self.sep)
94
+ elif self.sep_style == SeparatorStyle.PLAIN:
95
+ seps = [self.sep, self.sep2]
96
+ ret = self.system
97
+ for i, (role, message) in enumerate(messages):
98
+ if message:
99
+ if type(message) is tuple:
100
+ message, _, _ = message
101
+ ret += message + seps[i % 2]
102
+ else:
103
+ ret += ""
104
+ else:
105
+ raise ValueError(f"Invalid style: {self.sep_style}")
106
+
107
+ return ret
108
+
109
+ def append_message(self, role, message):
110
+ self.messages.append([role, message])
111
+
112
+ def process_image(self, image, image_process_mode, return_pil=False, image_format='PNG', max_len=1344, min_len=672):
113
+ if image_process_mode == "Pad":
114
+ def expand2square(pil_img, background_color=(122, 116, 104)):
115
+ width, height = pil_img.size
116
+ if width == height:
117
+ return pil_img
118
+ elif width > height:
119
+ result = Image.new(pil_img.mode, (width, width), background_color)
120
+ result.paste(pil_img, (0, (width - height) // 2))
121
+ return result
122
+ else:
123
+ result = Image.new(pil_img.mode, (height, height), background_color)
124
+ result.paste(pil_img, ((height - width) // 2, 0))
125
+ return result
126
+ image = expand2square(image)
127
+ elif image_process_mode in ["Default", "Crop"]:
128
+ pass
129
+ elif image_process_mode == "Resize":
130
+ image = image.resize((336, 336))
131
+ else:
132
+ raise ValueError(f"Invalid image_process_mode: {image_process_mode}")
133
+ if max(image.size) > max_len:
134
+ max_hw, min_hw = max(image.size), min(image.size)
135
+ aspect_ratio = max_hw / min_hw
136
+ shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
137
+ longest_edge = int(shortest_edge * aspect_ratio)
138
+ W, H = image.size
139
+ if H > W:
140
+ H, W = longest_edge, shortest_edge
141
+ else:
142
+ H, W = shortest_edge, longest_edge
143
+ image = image.resize((W, H))
144
+ if return_pil:
145
+ return image
146
+ else:
147
+ buffered = BytesIO()
148
+ image.save(buffered, format=image_format)
149
+ img_b64_str = base64.b64encode(buffered.getvalue()).decode()
150
+ return img_b64_str
151
+
152
+ def get_images(self, return_pil=False):
153
+ images = []
154
+ for i, (role, msg) in enumerate(self.messages[self.offset:]):
155
+ if i % 2 == 0:
156
+ if type(msg) is tuple:
157
+ msg, image, image_process_mode = msg
158
+ image = self.process_image(image, image_process_mode, return_pil=return_pil)
159
+ images.append(image)
160
+ return images
161
+
162
+ def to_gradio_chatbot(self):
163
+ ret = []
164
+ for i, (role, msg) in enumerate(self.messages[self.offset:]):
165
+ if i % 2 == 0:
166
+ if type(msg) is tuple:
167
+ msg, image, image_process_mode = msg
168
+ img_b64_str = self.process_image(
169
+ image, "Default", return_pil=False,
170
+ image_format='JPEG')
171
+ img_str = f'<img src="data:image/jpeg;base64,{img_b64_str}" alt="user upload image" />'
172
+ msg = img_str + msg.replace('<image>', '').strip()
173
+ ret.append([msg, None])
174
+ else:
175
+ ret.append([msg, None])
176
+ else:
177
+ ret[-1][-1] = msg
178
+ return ret
179
+
180
+ def copy(self):
181
+ return Conversation(
182
+ system=self.system,
183
+ roles=self.roles,
184
+ messages=[[x, y] for x, y in self.messages],
185
+ offset=self.offset,
186
+ sep_style=self.sep_style,
187
+ sep=self.sep,
188
+ sep2=self.sep2,
189
+ version=self.version)
190
+
191
+ def dict(self):
192
+ if len(self.get_images()) > 0:
193
+ return {
194
+ "system": self.system,
195
+ "roles": self.roles,
196
+ "messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
197
+ "offset": self.offset,
198
+ "sep": self.sep,
199
+ "sep2": self.sep2,
200
+ }
201
+ return {
202
+ "system": self.system,
203
+ "roles": self.roles,
204
+ "messages": self.messages,
205
+ "offset": self.offset,
206
+ "sep": self.sep,
207
+ "sep2": self.sep2,
208
+ }
209
+
210
+
211
+ conv_vicuna_v0 = Conversation(
212
+ system="A chat between a curious human and an artificial intelligence assistant. "
213
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
214
+ roles=("Human", "Assistant"),
215
+ messages=(
216
+ ("Human", "What are the key differences between renewable and non-renewable energy sources?"),
217
+ ("Assistant",
218
+ "Renewable energy sources are those that can be replenished naturally in a relatively "
219
+ "short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
220
+ "Non-renewable energy sources, on the other hand, are finite and will eventually be "
221
+ "depleted, such as coal, oil, and natural gas. Here are some key differences between "
222
+ "renewable and non-renewable energy sources:\n"
223
+ "1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
224
+ "energy sources are finite and will eventually run out.\n"
225
+ "2. Environmental impact: Renewable energy sources have a much lower environmental impact "
226
+ "than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
227
+ "and other negative effects.\n"
228
+ "3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
229
+ "have lower operational costs than non-renewable sources.\n"
230
+ "4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
231
+ "locations than non-renewable sources.\n"
232
+ "5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
233
+ "situations and needs, while non-renewable sources are more rigid and inflexible.\n"
234
+ "6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
235
+ "non-renewable sources are not, and their depletion can lead to economic and social instability.\n")
236
+ ),
237
+ offset=2,
238
+ sep_style=SeparatorStyle.SINGLE,
239
+ sep="###",
240
+ )
241
+
242
+ conv_vicuna_v1 = Conversation(
243
+ system="A chat between a curious user and an artificial intelligence assistant. "
244
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
245
+ roles=("USER", "ASSISTANT"),
246
+ version="v1",
247
+ messages=(),
248
+ offset=0,
249
+ sep_style=SeparatorStyle.TWO,
250
+ sep=" ",
251
+ sep2="</s>",
252
+ )
253
+
254
+ voco_conv_vicuna_v1 = Conversation(
255
+ system="A chat between a curious user and an artificial intelligence assistant of the image. "
256
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
257
+ roles=("USER", "ASSISTANT"),
258
+ version="v1",
259
+ messages=(),
260
+ offset=0,
261
+ sep_style=SeparatorStyle.TWO,
262
+ sep=" ",
263
+ sep2="</s>",
264
+ )
265
+
266
+ voco_stg2_vid1_conv_vicuna_v1 = Conversation(
267
+ system="A chat between a curious user and an artificial intelligence assistant of the video. "
268
+ "The assistant carefully watch the video and pay attention to the cause and sequence of events, the detail and movement of objects, and the action and pose of persons. Based on his observations, give the answer that best addresses the question.\n",
269
+ roles=("USER", "ASSISTANT"),
270
+ version="v1",
271
+ messages=(),
272
+ offset=0,
273
+ sep_style=SeparatorStyle.TWO,
274
+ sep=" ",
275
+ sep2="</s>",
276
+ )
277
+
278
+ voco_stg2_vid2_conv_vicuna_v1 = Conversation(
279
+ system="A chat between a curious user and an artificial intelligence assistant of the video. "
280
+ "The assistant gives helpful, detailed, and polite answers to the user's questions. "
281
+ "The assistant carefully watch the video and pay attention to the cause and sequence of events, the detail and movement of objects, and the action and pose of persons. Based on the observations, give the answer that best addresses the question.",
282
+ roles=("USER", "ASSISTANT"),
283
+ version="v1",
284
+ messages=(),
285
+ offset=0,
286
+ sep_style=SeparatorStyle.TWO,
287
+ sep=" ",
288
+ sep2="</s>",
289
+ )
290
+
291
+ voco_stg2_conv_vicuna_v1 = Conversation(
292
+ system="A chat between a curious user and an artificial intelligence assistant of the video. "
293
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
294
+ roles=("USER", "ASSISTANT"),
295
+ version="v1",
296
+ messages=(),
297
+ offset=0,
298
+ sep_style=SeparatorStyle.TWO,
299
+ sep=" ",
300
+ sep2="</s>",
301
+ )
302
+
303
+ conv_llama_2 = Conversation(
304
+ system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
305
+
306
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
307
+ roles=("USER", "ASSISTANT"),
308
+ version="llama_v2",
309
+ messages=(),
310
+ offset=0,
311
+ sep_style=SeparatorStyle.LLAMA_2,
312
+ sep="<s>",
313
+ sep2="</s>",
314
+ )
315
+
316
+ conv_llava_llama_2 = Conversation(
317
+ system="You are a helpful language and vision assistant. "
318
+ "You are able to understand the visual content that the user provides, "
319
+ "and assist the user with a variety of tasks using natural language.",
320
+ roles=("USER", "ASSISTANT"),
321
+ version="llama_v2",
322
+ messages=(),
323
+ offset=0,
324
+ sep_style=SeparatorStyle.LLAMA_2,
325
+ sep="<s>",
326
+ sep2="</s>",
327
+ )
328
+
329
+ conv_mpt = Conversation(
330
+ system="""<|im_start|>system
331
+ A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
332
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
333
+ version="mpt",
334
+ messages=(),
335
+ offset=0,
336
+ sep_style=SeparatorStyle.MPT,
337
+ sep="<|im_end|>",
338
+ )
339
+
340
+ conv_llava_plain = Conversation(
341
+ system="",
342
+ roles=("", ""),
343
+ messages=(
344
+ ),
345
+ offset=0,
346
+ sep_style=SeparatorStyle.PLAIN,
347
+ sep="\n",
348
+ )
349
+
350
+ conv_llava_v0 = Conversation(
351
+ system="A chat between a curious human and an artificial intelligence assistant. "
352
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
353
+ roles=("Human", "Assistant"),
354
+ messages=(
355
+ ),
356
+ offset=0,
357
+ sep_style=SeparatorStyle.SINGLE,
358
+ sep="###",
359
+ )
360
+
361
+ conv_llava_v0_mmtag = Conversation(
362
+ system="A chat between a curious user and an artificial intelligence assistant. "
363
+ "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
364
+ "The visual content will be provided with the following format: <Image>visual content</Image>.",
365
+ roles=("Human", "Assistant"),
366
+ messages=(
367
+ ),
368
+ offset=0,
369
+ sep_style=SeparatorStyle.SINGLE,
370
+ sep="###",
371
+ version="v0_mmtag",
372
+ )
373
+
374
+ conv_llava_v1 = Conversation(
375
+ system="A chat between a curious human and an artificial intelligence assistant. "
376
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
377
+ roles=("USER", "ASSISTANT"),
378
+ version="v1",
379
+ messages=(),
380
+ offset=0,
381
+ sep_style=SeparatorStyle.TWO,
382
+ sep=" ",
383
+ sep2="</s>",
384
+ )
385
+
386
+ conv_llava_v1_mmtag = Conversation(
387
+ system="A chat between a curious user and an artificial intelligence assistant. "
388
+ "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
389
+ "The visual content will be provided with the following format: <Image>visual content</Image>.",
390
+ roles=("USER", "ASSISTANT"),
391
+ messages=(),
392
+ offset=0,
393
+ sep_style=SeparatorStyle.TWO,
394
+ sep=" ",
395
+ sep2="</s>",
396
+ version="v1_mmtag",
397
+ )
398
+
399
+ conv_mistral_instruct = Conversation(
400
+ system="",
401
+ roles=("USER", "ASSISTANT"),
402
+ version="llama_v2",
403
+ messages=(),
404
+ offset=0,
405
+ sep_style=SeparatorStyle.LLAMA_2,
406
+ sep="",
407
+ sep2="</s>",
408
+ )
409
+
410
+ conv_chatml_direct = Conversation(
411
+ system="""<|im_start|>system
412
+ Answer the questions.""",
413
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
414
+ version="mpt",
415
+ messages=(),
416
+ offset=0,
417
+ sep_style=SeparatorStyle.MPT,
418
+ sep="<|im_end|>",
419
+ )
420
+
421
+ default_conversation = conv_vicuna_v1
422
+ voco_default_conversation = voco_conv_vicuna_v1
423
+ voco_stg2_default_conversation = voco_stg2_conv_vicuna_v1
424
+ voco_stg2_vid1_default_conversation = voco_stg2_vid1_conv_vicuna_v1
425
+ voco_stg2_vid2_default_conversation = voco_stg2_vid2_conv_vicuna_v1
426
+ conv_templates = {
427
+ "default": conv_vicuna_v0,
428
+ "v0": conv_vicuna_v0,
429
+ "v1": conv_vicuna_v1,
430
+ "vicuna_v1": conv_vicuna_v1,
431
+ "llama_2": conv_llama_2,
432
+ "mistral_instruct": conv_mistral_instruct,
433
+ "chatml_direct": conv_chatml_direct,
434
+ "mistral_direct": conv_chatml_direct,
435
+
436
+ "plain": conv_llava_plain,
437
+ "v0_plain": conv_llava_plain,
438
+ "llava_v0": conv_llava_v0,
439
+ "v0_mmtag": conv_llava_v0_mmtag,
440
+ "llava_v1": conv_llava_v1,
441
+ "v1_mmtag": conv_llava_v1_mmtag,
442
+ "llava_llama_2": conv_llava_llama_2,
443
+
444
+ "mpt": conv_mpt,
445
+ }
446
+
447
+
448
+ if __name__ == "__main__":
449
+ print(default_conversation.get_prompt())
llava/mm_utils.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from io import BytesIO
3
+ import base64
4
+ import torch
5
+ import math
6
+ import ast
7
+
8
+ from transformers import StoppingCriteria
9
+ from llava.constants import IMAGE_TOKEN_INDEX
10
+
11
+
12
+ def select_best_resolution(original_size, possible_resolutions):
13
+ """
14
+ Selects the best resolution from a list of possible resolutions based on the original size.
15
+
16
+ Args:
17
+ original_size (tuple): The original size of the image in the format (width, height).
18
+ possible_resolutions (list): A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
19
+
20
+ Returns:
21
+ tuple: The best fit resolution in the format (width, height).
22
+ """
23
+ original_width, original_height = original_size
24
+ best_fit = None
25
+ max_effective_resolution = 0
26
+ min_wasted_resolution = float('inf')
27
+
28
+ for width, height in possible_resolutions:
29
+ scale = min(width / original_width, height / original_height)
30
+ downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
31
+ effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
32
+ wasted_resolution = (width * height) - effective_resolution
33
+
34
+ if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):
35
+ max_effective_resolution = effective_resolution
36
+ min_wasted_resolution = wasted_resolution
37
+ best_fit = (width, height)
38
+
39
+ return best_fit
40
+
41
+
42
+ def resize_and_pad_image(image, target_resolution):
43
+ """
44
+ Resize and pad an image to a target resolution while maintaining aspect ratio.
45
+
46
+ Args:
47
+ image (PIL.Image.Image): The input image.
48
+ target_resolution (tuple): The target resolution (width, height) of the image.
49
+
50
+ Returns:
51
+ PIL.Image.Image: The resized and padded image.
52
+ """
53
+ original_width, original_height = image.size
54
+ target_width, target_height = target_resolution
55
+
56
+ scale_w = target_width / original_width
57
+ scale_h = target_height / original_height
58
+
59
+ if scale_w < scale_h:
60
+ new_width = target_width
61
+ new_height = min(math.ceil(original_height * scale_w), target_height)
62
+ else:
63
+ new_height = target_height
64
+ new_width = min(math.ceil(original_width * scale_h), target_width)
65
+
66
+ # Resize the image
67
+ resized_image = image.resize((new_width, new_height))
68
+
69
+ new_image = Image.new('RGB', (target_width, target_height), (0, 0, 0))
70
+ paste_x = (target_width - new_width) // 2
71
+ paste_y = (target_height - new_height) // 2
72
+ new_image.paste(resized_image, (paste_x, paste_y))
73
+
74
+ return new_image
75
+
76
+
77
+ def divide_to_patches(image, patch_size):
78
+ """
79
+ Divides an image into patches of a specified size.
80
+
81
+ Args:
82
+ image (PIL.Image.Image): The input image.
83
+ patch_size (int): The size of each patch.
84
+
85
+ Returns:
86
+ list: A list of PIL.Image.Image objects representing the patches.
87
+ """
88
+ patches = []
89
+ width, height = image.size
90
+ for i in range(0, height, patch_size):
91
+ for j in range(0, width, patch_size):
92
+ box = (j, i, j + patch_size, i + patch_size)
93
+ patch = image.crop(box)
94
+ patches.append(patch)
95
+
96
+ return patches
97
+
98
+
99
+ def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
100
+ """
101
+ Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
102
+
103
+ Args:
104
+ image_size (tuple): The size of the input image in the format (width, height).
105
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
106
+ patch_size (int): The size of each image patch.
107
+
108
+ Returns:
109
+ tuple: The shape of the image patch grid in the format (width, height).
110
+ """
111
+ if type(grid_pinpoints) is list:
112
+ possible_resolutions = grid_pinpoints
113
+ else:
114
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
115
+ width, height = select_best_resolution(image_size, possible_resolutions)
116
+ return width // patch_size, height // patch_size
117
+
118
+
119
+ def process_anyres_image(image, processor, grid_pinpoints):
120
+ """
121
+ Process an image with variable resolutions.
122
+
123
+ Args:
124
+ image (PIL.Image.Image): The input image to be processed.
125
+ processor: The image processor object.
126
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
127
+
128
+ Returns:
129
+ torch.Tensor: A tensor containing the processed image patches.
130
+ """
131
+ if type(grid_pinpoints) is list:
132
+ possible_resolutions = grid_pinpoints
133
+ else:
134
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
135
+ best_resolution = select_best_resolution(image.size, possible_resolutions)
136
+ image_padded = resize_and_pad_image(image, best_resolution)
137
+
138
+ patches = divide_to_patches(image_padded, processor.crop_size['height'])
139
+
140
+ image_original_resize = image.resize((processor.size['shortest_edge'], processor.size['shortest_edge']))
141
+
142
+ image_patches = [image_original_resize] + patches
143
+ image_patches = [processor.preprocess(image_patch, return_tensors='pt')['pixel_values'][0]
144
+ for image_patch in image_patches]
145
+ return torch.stack(image_patches, dim=0)
146
+
147
+
148
+ def load_image_from_base64(image):
149
+ return Image.open(BytesIO(base64.b64decode(image)))
150
+
151
+
152
+ def expand2square(pil_img, background_color):
153
+ width, height = pil_img.size
154
+ if width == height:
155
+ return pil_img
156
+ elif width > height:
157
+ result = Image.new(pil_img.mode, (width, width), background_color)
158
+ result.paste(pil_img, (0, (width - height) // 2))
159
+ return result
160
+ else:
161
+ result = Image.new(pil_img.mode, (height, height), background_color)
162
+ result.paste(pil_img, ((height - width) // 2, 0))
163
+ return result
164
+
165
+
166
+ def process_images(images, image_processor, model_cfg):
167
+ image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None)
168
+ new_images = []
169
+ if image_aspect_ratio == 'pad':
170
+ for image in images:
171
+ image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))
172
+ image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
173
+ new_images.append(image)
174
+ elif image_aspect_ratio == "anyres":
175
+ for image in images:
176
+ image = process_anyres_image(image, image_processor, model_cfg.image_grid_pinpoints)
177
+ new_images.append(image)
178
+ else:
179
+ return image_processor(images, return_tensors='pt')['pixel_values']
180
+ if all(x.shape == new_images[0].shape for x in new_images):
181
+ new_images = torch.stack(new_images, dim=0)
182
+ return new_images
183
+
184
+
185
+ def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
186
+ prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]
187
+
188
+ def insert_separator(X, sep):
189
+ return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
190
+
191
+ input_ids = []
192
+ offset = 0
193
+ if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
194
+ offset = 1
195
+ input_ids.append(prompt_chunks[0][0])
196
+
197
+ for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
198
+ input_ids.extend(x[offset:])
199
+
200
+ if return_tensors is not None:
201
+ if return_tensors == 'pt':
202
+ return torch.tensor(input_ids, dtype=torch.long)
203
+ raise ValueError(f'Unsupported tensor type: {return_tensors}')
204
+ return input_ids
205
+
206
+
207
+ def get_model_name_from_path(model_path):
208
+ model_path = model_path.strip("/")
209
+ model_paths = model_path.split("/")
210
+ if model_paths[-1].startswith('checkpoint-'):
211
+ return model_paths[-2] + "_" + model_paths[-1]
212
+ else:
213
+ return model_paths[-1]
214
+
215
+ class KeywordsStoppingCriteria(StoppingCriteria):
216
+ def __init__(self, keywords, tokenizer, input_ids):
217
+ self.keywords = keywords
218
+ self.keyword_ids = []
219
+ self.max_keyword_len = 0
220
+ for keyword in keywords:
221
+ cur_keyword_ids = tokenizer(keyword).input_ids
222
+ if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
223
+ cur_keyword_ids = cur_keyword_ids[1:]
224
+ if len(cur_keyword_ids) > self.max_keyword_len:
225
+ self.max_keyword_len = len(cur_keyword_ids)
226
+ self.keyword_ids.append(torch.tensor(cur_keyword_ids))
227
+ self.tokenizer = tokenizer
228
+ self.start_len = input_ids.shape[1]
229
+
230
+ def call_for_batch(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
231
+ offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len)
232
+ self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
233
+ for keyword_id in self.keyword_ids:
234
+ truncated_output_ids = output_ids[0, -keyword_id.shape[0]:]
235
+ if torch.equal(truncated_output_ids, keyword_id):
236
+ return True
237
+ outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
238
+ for keyword in self.keywords:
239
+ if keyword in outputs:
240
+ return True
241
+ return False
242
+
243
+ def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
244
+ outputs = []
245
+ for i in range(output_ids.shape[0]):
246
+ outputs.append(self.call_for_batch(output_ids[i].unsqueeze(0), scores))
247
+ return all(outputs)
llava/model/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ try:
2
+ from .language_model.llava_llama_1stg import LlavaLlamaForCausalLM, LlavaConfig # train compress
3
+ except:
4
+ pass
llava/model/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (275 Bytes). View file
 
llava/model/__pycache__/builder.cpython-310.pyc ADDED
Binary file (4.98 kB). View file
 
llava/model/__pycache__/llava_arch.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
llava/model/apply_delta.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Usage:
3
+ python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta
4
+ """
5
+ import argparse
6
+
7
+ import torch
8
+ from tqdm import tqdm
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM
10
+ from llava import LlavaLlamaForCausalLM
11
+
12
+
13
+ def apply_delta(base_model_path, target_model_path, delta_path):
14
+ print("Loading base model")
15
+ base = AutoModelForCausalLM.from_pretrained(
16
+ base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
17
+
18
+ print("Loading delta")
19
+ delta = LlavaLlamaForCausalLM.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
20
+ delta_tokenizer = AutoTokenizer.from_pretrained(delta_path)
21
+
22
+ print("Applying delta")
23
+ for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"):
24
+ if name not in base.state_dict():
25
+ assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model'
26
+ continue
27
+ if param.data.shape == base.state_dict()[name].shape:
28
+ param.data += base.state_dict()[name]
29
+ else:
30
+ assert name in ['model.embed_tokens.weight', 'lm_head.weight'], \
31
+ f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}'
32
+ bparam = base.state_dict()[name]
33
+ param.data[:bparam.shape[0], :bparam.shape[1]] += bparam
34
+
35
+ print("Saving target model")
36
+ delta.save_pretrained(target_model_path)
37
+ delta_tokenizer.save_pretrained(target_model_path)
38
+
39
+
40
+ if __name__ == "__main__":
41
+ parser = argparse.ArgumentParser()
42
+ parser.add_argument("--base-model-path", type=str, required=True)
43
+ parser.add_argument("--target-model-path", type=str, required=True)
44
+ parser.add_argument("--delta-path", type=str, required=True)
45
+
46
+ args = parser.parse_args()
47
+
48
+ apply_delta(args.base_model_path, args.target_model_path, args.delta_path)
llava/model/builder.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Haotian Liu
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import os
17
+ import warnings
18
+ import shutil
19
+
20
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
21
+ import torch
22
+ from llava.model import *
23
+ from llava.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
24
+
25
+
26
+ def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda", use_flash_attn=False, llava_model = None, **kwargs):
27
+ kwargs = {"device_map": device_map, **kwargs}
28
+
29
+ if device != "cuda":
30
+ kwargs['device_map'] = {"": device}
31
+
32
+ if load_8bit:
33
+ kwargs['load_in_8bit'] = True
34
+ elif load_4bit:
35
+ kwargs['load_in_4bit'] = True
36
+ kwargs['quantization_config'] = BitsAndBytesConfig(
37
+ load_in_4bit=True,
38
+ bnb_4bit_compute_dtype=torch.float16,
39
+ bnb_4bit_use_double_quant=True,
40
+ bnb_4bit_quant_type='nf4'
41
+ )
42
+ else:
43
+ kwargs['torch_dtype'] = torch.float16
44
+
45
+ if use_flash_attn:
46
+ kwargs['attn_implementation'] = 'flash_attention_2'
47
+
48
+ if 'llava' in model_name.lower():
49
+ # Load LLaVA model
50
+ if 'lora' in model_name.lower() and model_base is None:
51
+ warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')
52
+ if 'lora' in model_name.lower() and model_base is not None:
53
+ from llava.model.language_model.llava_llama import LlavaConfig
54
+ lora_cfg_pretrained = LlavaConfig.from_pretrained(model_path)
55
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
56
+ print('Loading LLaVA from base model...')
57
+ model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
58
+ token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
59
+ if model.lm_head.weight.shape[0] != token_num:
60
+ model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
61
+ model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
62
+
63
+ print('Loading additional LLaVA weights...')
64
+ if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
65
+ non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
66
+ else:
67
+ # this is probably from HF Hub
68
+ from huggingface_hub import hf_hub_download
69
+ def load_from_hf(repo_id, filename, subfolder=None):
70
+ cache_file = hf_hub_download(
71
+ repo_id=repo_id,
72
+ filename=filename,
73
+ subfolder=subfolder)
74
+ return torch.load(cache_file, map_location='cpu')
75
+ non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
76
+ non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
77
+ if any(k.startswith('model.model.') for k in non_lora_trainables):
78
+ non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
79
+ model.load_state_dict(non_lora_trainables, strict=False)
80
+
81
+ from peft import PeftModel
82
+ print('Loading LoRA weights...')
83
+ model = PeftModel.from_pretrained(model, model_path)
84
+ print('Merging LoRA weights...')
85
+ model = model.merge_and_unload()
86
+ print('Model is loaded...')
87
+ elif model_base is not None:
88
+ # this may be mm projector only
89
+ print('Loading LLaVA from base model...')
90
+ if 'mpt' in model_name.lower():
91
+ if not os.path.isfile(os.path.join(model_path, 'configuration_mpt.py')):
92
+ shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py'))
93
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)
94
+ cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
95
+ model = LlavaMptForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
96
+ else:
97
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
98
+ cfg_pretrained = AutoConfig.from_pretrained(model_path)
99
+ model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
100
+
101
+ mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')
102
+ mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}
103
+ model.load_state_dict(mm_projector_weights, strict=False)
104
+ else:
105
+ if 'mpt' in model_name.lower():
106
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
107
+ model = LlavaMptForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
108
+ elif 'mistral' in model_name.lower():
109
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
110
+ model = LlavaMistralForCausalLM.from_pretrained(
111
+ model_path,
112
+ low_cpu_mem_usage=True,
113
+ **kwargs
114
+ )
115
+ else:
116
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
117
+ if llava_model == "initial":
118
+ model = LlavaLlamaForCausalLM.from_pretrained(
119
+ model_path,
120
+ low_cpu_mem_usage=True,
121
+ **kwargs
122
+ )
123
+ else:
124
+ # Load language model
125
+ if model_base is not None:
126
+ # PEFT model
127
+ from peft import PeftModel
128
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
129
+ model = AutoModelForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, **kwargs)
130
+ print(f"Loading LoRA weights from {model_path}")
131
+ model = PeftModel.from_pretrained(model, model_path)
132
+ print(f"Merging weights")
133
+ model = model.merge_and_unload()
134
+ print('Convert to FP16...')
135
+ model.to(torch.float16)
136
+ else:
137
+ use_fast = False
138
+ if 'mpt' in model_name.lower():
139
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
140
+ model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)
141
+ else:
142
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
143
+ model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
144
+
145
+ image_processor = None
146
+
147
+ if 'llava' in model_name.lower():
148
+ mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
149
+ mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
150
+ if mm_use_im_patch_token:
151
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
152
+ if mm_use_im_start_end:
153
+ tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
154
+ model.resize_token_embeddings(len(tokenizer))
155
+
156
+ vision_tower = model.get_vision_tower()
157
+ if not vision_tower.is_loaded:
158
+ vision_tower.load_model(device_map=device_map)
159
+ if device_map != 'auto':
160
+ vision_tower.to(device=device_map, dtype=torch.float16)
161
+ image_processor = vision_tower.image_processor
162
+
163
+ if hasattr(model.config, "max_sequence_length"):
164
+ context_len = model.config.max_sequence_length
165
+ else:
166
+ context_len = 2048
167
+
168
+ return tokenizer, model, image_processor, context_len
llava/model/consolidate.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Usage:
3
+ python3 -m llava.model.consolidate --src ~/model_weights/llava-7b --dst ~/model_weights/llava-7b_consolidate
4
+ """
5
+ import argparse
6
+
7
+ import torch
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
+ from llava.model import *
10
+ from llava.model.utils import auto_upgrade
11
+
12
+
13
+ def consolidate_ckpt(src_path, dst_path):
14
+ print("Loading model")
15
+ auto_upgrade(src_path)
16
+ src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
17
+ src_tokenizer = AutoTokenizer.from_pretrained(src_path, use_fast=False)
18
+ src_model.save_pretrained(dst_path)
19
+ src_tokenizer.save_pretrained(dst_path)
20
+
21
+
22
+ if __name__ == "__main__":
23
+ parser = argparse.ArgumentParser()
24
+ parser.add_argument("--src", type=str, required=True)
25
+ parser.add_argument("--dst", type=str, required=True)
26
+
27
+ args = parser.parse_args()
28
+
29
+ consolidate_ckpt(args.src, args.dst)
llava/model/language_model/__pycache__/llava_llama_1stg.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
llava/model/language_model/cache_py/modeling_attn_mask_utils.py ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import List, Optional, Tuple, Union
16
+
17
+ import torch
18
+
19
+
20
+ @dataclass
21
+ class AttentionMaskConverter:
22
+ """
23
+ A utility attention mask class that allows one to:
24
+ - Create a causal 4d mask
25
+ - Create a causal 4d mask with slided window
26
+ - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length,
27
+ key_value_length) that can be multiplied with attention scores
28
+
29
+ Examples:
30
+
31
+ ```python
32
+ >>> import torch
33
+ >>> from transformers.modeling_attn_mask_utils import AttentionMaskConverter
34
+
35
+ >>> converter = AttentionMaskConverter(True)
36
+ >>> converter.to_4d(torch.tensor([[0, 0, 0, 1, 1]]), 5, key_value_length=5, dtype=torch.float32)
37
+ tensor([[[[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
38
+ [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
39
+ [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
40
+ [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, -3.4028e+38],
41
+ [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, 0.0000e+00]]]])
42
+ ```
43
+
44
+ Parameters:
45
+ is_causal (`bool`):
46
+ Whether the attention mask should be a uni-directional (causal) or bi-directional mask.
47
+
48
+ sliding_window (`int`, *optional*):
49
+ Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer.
50
+ """
51
+
52
+ is_causal: bool
53
+ sliding_window: int
54
+
55
+ def __init__(self, is_causal: bool, sliding_window: Optional[int] = None):
56
+ self.is_causal = is_causal
57
+ self.sliding_window = sliding_window
58
+
59
+ if self.sliding_window is not None and self.sliding_window <= 0:
60
+ raise ValueError(
61
+ f"Make sure that when passing `sliding_window` that its value is a strictly positive integer, not `{self.sliding_window}`"
62
+ )
63
+
64
+ def to_causal_4d(
65
+ self,
66
+ batch_size: int,
67
+ query_length: int,
68
+ key_value_length: int,
69
+ dtype: torch.dtype,
70
+ device: Union[torch.device, "str"] = "cpu",
71
+ ) -> Optional[torch.Tensor]:
72
+ """
73
+ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative
74
+ bias to upper right hand triangular matrix (causal mask).
75
+ """
76
+ if not self.is_causal:
77
+ raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.")
78
+
79
+ # If shape is not cached, create a new causal mask and cache it
80
+ input_shape = (batch_size, query_length)
81
+ past_key_values_length = key_value_length - query_length
82
+
83
+ # create causal mask
84
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
85
+ causal_4d_mask = None
86
+ if input_shape[-1] > 1 or self.sliding_window is not None:
87
+ causal_4d_mask = self._make_causal_mask(
88
+ input_shape,
89
+ dtype,
90
+ device=device,
91
+ past_key_values_length=past_key_values_length,
92
+ sliding_window=self.sliding_window,
93
+ )
94
+
95
+ return causal_4d_mask
96
+
97
+ def to_4d(
98
+ self,
99
+ attention_mask_2d: torch.Tensor,
100
+ query_length: int,
101
+ dtype: torch.dtype,
102
+ key_value_length: Optional[int] = None,
103
+ ) -> torch.Tensor:
104
+ """
105
+ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length,
106
+ key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is
107
+ causal, a causal mask will be added.
108
+ """
109
+ input_shape = (attention_mask_2d.shape[0], query_length)
110
+
111
+ # create causal mask
112
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
113
+ causal_4d_mask = None
114
+ if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal:
115
+ if key_value_length is None:
116
+ raise ValueError(
117
+ "This attention mask converter is causal. Make sure to pass `key_value_length` to correctly create a causal mask."
118
+ )
119
+
120
+ past_key_values_length = key_value_length - query_length
121
+ causal_4d_mask = self._make_causal_mask(
122
+ input_shape,
123
+ dtype,
124
+ device=attention_mask_2d.device,
125
+ past_key_values_length=past_key_values_length,
126
+ sliding_window=self.sliding_window,
127
+ )
128
+ elif self.sliding_window is not None:
129
+ raise NotImplementedError("Sliding window is currently only implemented for causal masking")
130
+
131
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
132
+ expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to(
133
+ attention_mask_2d.device
134
+ )
135
+
136
+ if causal_4d_mask is not None:
137
+ expanded_attn_mask = causal_4d_mask.masked_fill(expanded_attn_mask.bool(), torch.finfo(dtype).min)
138
+
139
+ # expanded_attn_mask + causal_4d_mask can cause some overflow
140
+ expanded_4d_mask = expanded_attn_mask
141
+
142
+ return expanded_4d_mask
143
+
144
+ @staticmethod
145
+ def _make_causal_mask(
146
+ input_ids_shape: torch.Size,
147
+ dtype: torch.dtype,
148
+ device: torch.device,
149
+ past_key_values_length: int = 0,
150
+ sliding_window: Optional[int] = None,
151
+ ):
152
+ """
153
+ Make causal mask used for bi-directional self-attention.
154
+ """
155
+ bsz, tgt_len = input_ids_shape
156
+ mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
157
+ mask_cond = torch.arange(mask.size(-1), device=device)
158
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
159
+
160
+ mask = mask.to(dtype)
161
+
162
+ if past_key_values_length > 0:
163
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
164
+
165
+ # add lower triangular sliding window mask if necessary
166
+ if sliding_window is not None:
167
+ diagonal = past_key_values_length - sliding_window + 1
168
+
169
+ context_mask = 1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal)
170
+ mask.masked_fill_(context_mask.bool(), torch.finfo(dtype).min)
171
+
172
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
173
+
174
+ @staticmethod
175
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
176
+ """
177
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
178
+ """
179
+ bsz, src_len = mask.size()
180
+ tgt_len = tgt_len if tgt_len is not None else src_len
181
+
182
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
183
+
184
+ inverted_mask = 1.0 - expanded_mask
185
+
186
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
187
+
188
+ @staticmethod
189
+ def _unmask_unattended(
190
+ expanded_mask: torch.Tensor, attention_mask: torch.Tensor, unmasked_value: Union[bool, float]
191
+ ):
192
+ # fmt: off
193
+ """
194
+ Attend to all tokens in masked rows from the expanded attention mask, for example the relevant first rows when
195
+ using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
196
+ Details: https://github.com/pytorch/pytorch/issues/110213
197
+
198
+ `expanded_mask` is [bsz, num_masks, tgt_seq_len, src_seq_len] or [bsz, tgt_seq_len, src_seq_len].
199
+ `attention_mask` is [bsz, src_seq_len].
200
+
201
+ The dimension num_masks of `expanded_mask` is most often 1, but it can also be the number of heads in the case of alibi attention bias.
202
+
203
+ For example, if `attention_mask` is
204
+ ```
205
+ [[0, 0, 1],
206
+ [1, 1, 1],
207
+ [0, 1, 1]]
208
+ ```
209
+ and `expanded_mask` is (e.g. here left-padding case)
210
+ ```
211
+ [[[[0, 0, 0],
212
+ [0, 0, 0],
213
+ [0, 0, 1]]],
214
+ [[[1, 0, 0],
215
+ [1, 1, 0],
216
+ [1, 1, 1]]],
217
+ [[[0, 0, 0],
218
+ [0, 1, 0],
219
+ [0, 1, 1]]]]
220
+ ```
221
+ then the modified `expanded_mask` will be
222
+ ```
223
+ [[[[1, 1, 1], <-- modified
224
+ [1, 1, 1], <-- modified
225
+ [0, 0, 1]]],
226
+ [[[1, 0, 0],
227
+ [1, 1, 0],
228
+ [1, 1, 1]]],
229
+ [[[1, 1, 1], <-- modified
230
+ [0, 1, 0],
231
+ [0, 1, 1]]]]
232
+ ```
233
+ """
234
+ # fmt: on
235
+
236
+ # Get the index of the first non-zero value for every sample in the batch.
237
+ # In the above example, indices = [[2], [0], [1]]]
238
+ tmp = torch.arange(attention_mask.shape[1], 0, -1)
239
+ indices = torch.argmax(attention_mask.cpu() * tmp, 1, keepdim=True)
240
+
241
+ # Find the batch indexes that have unattended tokens on the leftmost side (e.g. [0, 0, 1, 1, 1]), for which the first rows of the
242
+ # expanded mask will be completely unattended.
243
+ left_masked_rows = torch.where(indices > 0)[0]
244
+
245
+ if left_masked_rows.shape[0] == 0:
246
+ return expanded_mask
247
+ indices = indices[left_masked_rows]
248
+
249
+ max_len = torch.max(indices)
250
+ range_tensor = torch.arange(max_len).unsqueeze(0)
251
+ range_tensor = range_tensor.repeat(indices.size(0), 1)
252
+
253
+ # Avoid unmasking tokens at relevant target positions (on the row axis), by rather unmasking possibly several times the first row that should always be unmasked as we filtered out the batch above.
254
+ range_tensor[range_tensor >= indices] = 0
255
+
256
+ # TODO: we may drop support for 3D attention mask as the refactor from Patrick maybe dropped this case
257
+ if expanded_mask.dim() == 4:
258
+ num_masks = expanded_mask.shape[1]
259
+ if num_masks == 1:
260
+ # Broadcast [left_masked_rows, 1], [left_masked_rows, max_len]
261
+ mask_slice = (left_masked_rows[:, None], 0, range_tensor)
262
+ else:
263
+ # Broadcast [left_masked_rows, 1, 1], [1, num_masks, 1], [left_masked_rows, 1, max_len]
264
+ mask_slice = (
265
+ left_masked_rows[:, None, None],
266
+ torch.arange(num_masks)[None, :, None],
267
+ range_tensor[:, None, :],
268
+ )
269
+ else:
270
+ # Broadcast [left_masked_rows, 1], [left_masked_rows, max_len]
271
+ mask_slice = (left_masked_rows[:, None], range_tensor)
272
+
273
+ expanded_mask[mask_slice] = unmasked_value
274
+
275
+ return expanded_mask
276
+
277
+
278
+ def _prepare_4d_causal_attention_mask(
279
+ attention_mask: Optional[torch.Tensor],
280
+ input_shape: Union[torch.Size, Tuple, List],
281
+ inputs_embeds: torch.Tensor,
282
+ past_key_values_length: int,
283
+ sliding_window: Optional[int] = None,
284
+ ):
285
+ """
286
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
287
+ `(batch_size, key_value_length)`
288
+
289
+ Args:
290
+ attention_mask (`torch.Tensor` or `None`):
291
+ A 2D attention mask of shape `(batch_size, key_value_length)`
292
+ input_shape (`tuple(int)` or `list(int)` or `torch.Size`):
293
+ The input shape should be a tuple that defines `(batch_size, query_length)`.
294
+ inputs_embeds (`torch.Tensor`):
295
+ The embedded inputs as a torch Tensor.
296
+ past_key_values_length (`int`):
297
+ The length of the key value cache.
298
+ sliding_window (`int`, *optional*):
299
+ If the model uses windowed attention, a sliding window should be passed.
300
+ """
301
+ attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
302
+
303
+ key_value_length = input_shape[-1] + past_key_values_length
304
+
305
+ # 4d mask is passed through the layers
306
+ if attention_mask is not None and len(attention_mask.shape) == 2:
307
+ attention_mask = attn_mask_converter.to_4d(
308
+ attention_mask, input_shape[-1], key_value_length=key_value_length, dtype=inputs_embeds.dtype
309
+ )
310
+ elif attention_mask is not None and len(attention_mask.shape) == 4:
311
+ expected_shape = (input_shape[0], 1, input_shape[1], key_value_length)
312
+ if tuple(attention_mask.shape) != expected_shape:
313
+ raise ValueError(
314
+ f"Incorrect 4D attention_mask shape: {tuple(attention_mask.shape)}; expected: {expected_shape}."
315
+ )
316
+ else:
317
+ # if the 4D mask has correct shape - invert it and fill with negative infinity
318
+ inverted_mask = 1.0 - attention_mask
319
+ attention_mask = inverted_mask.masked_fill(
320
+ inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min
321
+ )
322
+ else:
323
+ attention_mask = attn_mask_converter.to_causal_4d(
324
+ input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device
325
+ )
326
+
327
+ return attention_mask
328
+
329
+
330
+ # Adapted from _prepare_4d_causal_attention_mask
331
+ def _prepare_4d_causal_attention_mask_for_sdpa(
332
+ attention_mask: Optional[torch.Tensor],
333
+ input_shape: Union[torch.Size, Tuple, List],
334
+ inputs_embeds: torch.Tensor,
335
+ past_key_values_length: int,
336
+ sliding_window: Optional[int] = None,
337
+ ):
338
+ """
339
+ Prepares the correct `attn_mask` argument to be used by `torch.nn.functional.scaled_dot_product_attention`.
340
+
341
+ In case no token is masked in the `attention_mask` argument, we simply set it to `None` for the cases `query_length == 1` and
342
+ `key_value_length == query_length`, and rely instead on SDPA `is_causal` argument to use causal/non-causal masks,
343
+ allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is passed).
344
+ """
345
+ attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
346
+
347
+ key_value_length = input_shape[-1] + past_key_values_length
348
+ batch_size, query_length = input_shape
349
+
350
+ # torch.jit.trace, symbolic_trace and torchdynamo with fullgraph=True are unable to capture the controlflow `is_causal=attention_mask is None and q_len > 1`
351
+ # used as an SDPA argument. We keep compatibility with these tracing tools by always using SDPA's `attn_mask` argument in case we are tracing.
352
+ # TODO: Fix this as well when using torchdynamo with fullgraph=True.
353
+ is_tracing = torch.jit.is_tracing() or isinstance(inputs_embeds, torch.fx.Proxy)
354
+
355
+ if attention_mask is not None:
356
+ # 4d mask is passed through
357
+ if len(attention_mask.shape) == 4:
358
+ expected_shape = (input_shape[0], 1, input_shape[1], key_value_length)
359
+ if tuple(attention_mask.shape) != expected_shape:
360
+ raise ValueError(
361
+ f"Incorrect 4D attention_mask shape: {tuple(attention_mask.shape)}; expected: {expected_shape}."
362
+ )
363
+ else:
364
+ # if the 4D mask has correct shape - invert it and fill with negative infinity
365
+ inverted_mask = 1.0 - attention_mask.to(inputs_embeds.dtype)
366
+ attention_mask = inverted_mask.masked_fill(
367
+ inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min
368
+ )
369
+ return attention_mask
370
+
371
+ elif not is_tracing and torch.all(attention_mask == 1):
372
+ if query_length == 1:
373
+ # For query_length == 1, causal attention and bi-directional attention are the same.
374
+ attention_mask = None
375
+ elif key_value_length == query_length:
376
+ pass
377
+ # attention_mask = None
378
+ else:
379
+ # Unfortunately, for query_length > 1 and key_value_length != query_length, we cannot generally ignore the attention mask, as SDPA causal mask generation
380
+ # may be wrong. We will set `is_causal=False` in SDPA and rely on Transformers attention_mask instead, hence not setting it to None here.
381
+ # Reference: https://github.com/pytorch/pytorch/issues/108108
382
+ pass
383
+ elif query_length > 1 and key_value_length != query_length:
384
+ # See the comment above (https://github.com/pytorch/pytorch/issues/108108).
385
+ # Ugly: we set it to True here to dispatch in the following controlflow to `to_causal_4d`.
386
+ attention_mask = True
387
+ elif is_tracing:
388
+ raise ValueError(
389
+ 'Attention using SDPA can not be traced with torch.jit.trace when no attention_mask is provided. To solve this issue, please either load your model with the argument `attn_implementation="eager"` or pass an attention_mask input when tracing the model.'
390
+ )
391
+
392
+ if attention_mask is None:
393
+ expanded_4d_mask = None
394
+ elif attention_mask is True:
395
+ expanded_4d_mask = attn_mask_converter.to_causal_4d(
396
+ input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device
397
+ )
398
+ else:
399
+ expanded_4d_mask = attn_mask_converter.to_4d(
400
+ attention_mask,
401
+ input_shape[-1],
402
+ dtype=inputs_embeds.dtype,
403
+ key_value_length=key_value_length,
404
+ )
405
+
406
+ # From PyTorch 2.1 onwards, F.scaled_dot_product_attention with the memory-efficient attention backend
407
+ # produces nans if sequences are completely unattended in the attention mask. Details: https://github.com/pytorch/pytorch/issues/110213
408
+ #
409
+ # This fix is not applied in case we are tracing with torch.jit.trace or symbolic_trace, as _unmask_unattended has a data-dependent
410
+ # controlflow that can not be captured properly.
411
+ # TODO: _unmask_unattended does not work either with torch.compile when using fullgraph=True. We should find a way to detect this case.
412
+ if query_length > 1 and not is_tracing:
413
+ expanded_4d_mask = AttentionMaskConverter._unmask_unattended(
414
+ expanded_4d_mask, attention_mask, unmasked_value=0.0
415
+ )
416
+
417
+ return expanded_4d_mask
418
+
419
+
420
+ def _prepare_4d_attention_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
421
+ """
422
+ Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
423
+ `(batch_size, key_value_length)`
424
+
425
+ Args:
426
+ mask (`torch.Tensor` or `None`):
427
+ A 2D attention mask of shape `(batch_size, key_value_length)`
428
+ dtype (`torch.dtype`):
429
+ The torch dtype the created mask shall have.
430
+ tgt_len (`int`):
431
+ The target length or query length the created mask shall have.
432
+ """
433
+ return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
434
+
435
+
436
+ def _prepare_4d_attention_mask_for_sdpa(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
437
+ """
438
+ Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
439
+ `(batch_size, key_value_length)`
440
+
441
+ Args:
442
+ mask (`torch.Tensor` or `None`):
443
+ A 2D attention mask of shape `(batch_size, key_value_length)`
444
+ dtype (`torch.dtype`):
445
+ The torch dtype the created mask shall have.
446
+ tgt_len (`int`):
447
+ The target length or query length the created mask shall have.
448
+ """
449
+ batch_size, key_value_length = mask.shape
450
+ tgt_len = tgt_len if tgt_len is not None else key_value_length
451
+
452
+ # torch.jit.trace and torchdynamo with fullgraph=True are unable to capture the controlflow `is_causal=attention_mask is None and q_len > 1`
453
+ # used as an SDPA argument. We keep compatibility with these tracing tools by always using SDPA's `attn_mask` argument in case we are tracing.
454
+ # TODO: Fix this as well when using torchdynamo with fullgraph=True.
455
+ is_tracing = torch.jit.is_tracing()
456
+
457
+ if torch.all(mask == 1):
458
+ if is_tracing:
459
+ pass
460
+ elif tgt_len == 1:
461
+ # For query_length == 1, causal attention and bi-directional attention are the same.
462
+ return None
463
+ elif key_value_length == tgt_len:
464
+ return None
465
+ else:
466
+ # Unfortunately, for query_length > 1 and key_value_length != query_length, we can not generally ignore the attention mask, as SDPA causal mask generation
467
+ # may be wrong. We will set is_causal=False in SDPA and rely on Transformers attention_mask instead, hence not setting it to None here.
468
+ # Reference: https://github.com/pytorch/pytorch/issues/108108
469
+ return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
470
+ else:
471
+ return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
472
+
473
+
474
+ def _create_4d_causal_attention_mask(
475
+ input_shape: Union[torch.Size, Tuple, List],
476
+ dtype: torch.dtype,
477
+ device: torch.device,
478
+ past_key_values_length: int = 0,
479
+ sliding_window: Optional[int] = None,
480
+ ) -> Optional[torch.Tensor]:
481
+ """
482
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)`
483
+
484
+ Args:
485
+ input_shape (`tuple(int)` or `list(int)` or `torch.Size`):
486
+ The input shape should be a tuple that defines `(batch_size, query_length)`.
487
+ dtype (`torch.dtype`):
488
+ The torch dtype the created mask shall have.
489
+ device (`int`):
490
+ The torch device the created mask shall have.
491
+ sliding_window (`int`, *optional*):
492
+ If the model uses windowed attention, a sliding window should be passed.
493
+ """
494
+ attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
495
+
496
+ key_value_length = past_key_values_length + input_shape[-1]
497
+ attention_mask = attn_mask_converter.to_causal_4d(
498
+ input_shape[0], input_shape[-1], key_value_length, dtype=dtype, device=device
499
+ )
500
+
501
+ return attention_mask
llava/model/language_model/llava_llama_1stg.py ADDED
@@ -0,0 +1,633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Haotian Liu
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ from torch.nn import CrossEntropyLoss
21
+
22
+ from transformers import AutoConfig, AutoModelForCausalLM, \
23
+ LlamaConfig, LlamaModel
24
+
25
+ from transformers.modeling_outputs import CausalLMOutputWithPast
26
+ from transformers.generation.utils import GenerateOutput
27
+
28
+ from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
29
+ # import torch.distributed as dist
30
+
31
+ from transformers.models.llama import LlamaPreTrainedModel
32
+ from transformers.cache_utils import Cache, DynamicCache
33
+
34
+ from transformers.modeling_attn_mask_utils import (
35
+ AttentionMaskConverter,
36
+ _prepare_4d_attention_mask,
37
+ _prepare_4d_causal_attention_mask,
38
+ _prepare_4d_causal_attention_mask_for_sdpa,
39
+ )
40
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
41
+
42
+ from transformers.models.llama.modeling_llama import (
43
+ LlamaAttention,
44
+ LlamaFlashAttention2,
45
+ LlamaSdpaAttention,
46
+ LlamaMLP,
47
+ LlamaRMSNorm,
48
+ apply_rotary_pos_emb,
49
+ )
50
+
51
+ class LlavaConfig(LlamaConfig):
52
+ model_type = "llava_llama"
53
+
54
+ LLAMA_ATTENTION_CLASSES = {
55
+ "eager": LlamaAttention,
56
+ "flash_attention_2": LlamaFlashAttention2,
57
+ "sdpa": LlamaSdpaAttention,
58
+ }
59
+
60
+
61
+ def reverse_cumsum(x: torch.Tensor) -> torch.Tensor:
62
+ return x + torch.sum(x, dim=-1, keepdims=True) - torch.cumsum(x, dim=-1)
63
+
64
+ def make_mask_post_last_voco(
65
+ inputs: torch.Tensor,
66
+ voco_token: int,
67
+ pad_token: Optional[int] = None,
68
+ dtype=torch.int64,
69
+ ) -> torch.Tensor:
70
+ mask = reverse_cumsum(inputs == voco_token) >= 1
71
+ if pad_token is not None:
72
+ mask = mask & (inputs != pad_token)
73
+ return mask.type(dtype)
74
+
75
+ def make_mask_pre_first_voco(
76
+ inputs: torch.Tensor,
77
+ voco_token: int,
78
+ pad_token: Optional[int] = None,
79
+ dtype=torch.int64,
80
+ ) -> torch.Tensor:
81
+ mask = (inputs == voco_token).cumsum(-1) >= 1
82
+ if pad_token is not None:
83
+ mask = mask & (inputs != pad_token)
84
+ return mask.type(dtype)
85
+
86
+ def make_voco_mask_llava(
87
+ inputs: torch.Tensor,
88
+ voco_token: int,
89
+ dtype=torch.int64,
90
+ ) -> torch.Tensor:
91
+
92
+ pre_voco_mask = make_mask_post_last_voco(inputs, voco_token, dtype=torch.bool)[
93
+ :, None, None
94
+ ]
95
+ # Attention mask for tokens after the last voco token.
96
+ post_voco_mask = make_mask_pre_first_voco(inputs, voco_token, dtype=torch.bool)[
97
+ :, None, None
98
+ ]
99
+ pre_voco_time_mask = pre_voco_mask.permute((0, 1, 3, 2))
100
+ mask = torch.where(pre_voco_time_mask, pre_voco_mask, post_voco_mask)
101
+ has_voco = (inputs == voco_token).any(-1)[:, None, None, None]
102
+ mask = torch.where(has_voco, mask, True)
103
+ return mask.type(dtype)
104
+
105
+ class LlamaDecoderLayer(nn.Module):
106
+ def __init__(self, config: LlamaConfig, layer_idx: int):
107
+ super().__init__()
108
+ self.hidden_size = config.hidden_size
109
+
110
+ self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
111
+ self.mlp = LlamaMLP(config)
112
+ self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
113
+ self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
114
+
115
+ def forward(
116
+ self,
117
+ hidden_states: torch.Tensor,
118
+ attention_mask: Optional[torch.Tensor] = None,
119
+ position_ids: Optional[torch.LongTensor] = None,
120
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
121
+ output_attentions: Optional[bool] = False,
122
+ use_cache: Optional[bool] = False,
123
+ **kwargs,
124
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
125
+ """
126
+ Args:
127
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
128
+ attention_mask (`torch.FloatTensor`, *optional*):
129
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
130
+ query_sequence_length, key_sequence_length)` if default attention is used.
131
+ output_attentions (`bool`, *optional*):
132
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
133
+ returned tensors for more detail.
134
+ use_cache (`bool`, *optional*):
135
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
136
+ (see `past_key_values`).
137
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
138
+ """
139
+ if "padding_mask" in kwargs:
140
+ warnings.warn(
141
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
142
+ )
143
+
144
+ residual = hidden_states
145
+
146
+ hidden_states = self.input_layernorm(hidden_states)
147
+
148
+ # Self Attention
149
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
150
+ hidden_states=hidden_states,
151
+ attention_mask=attention_mask,
152
+ position_ids=position_ids,
153
+ past_key_value=past_key_value,
154
+ output_attentions=output_attentions,
155
+ use_cache=use_cache,
156
+ **kwargs,
157
+ )
158
+ hidden_states = residual + hidden_states
159
+
160
+ # Fully Connected
161
+ residual = hidden_states
162
+ hidden_states = self.post_attention_layernorm(hidden_states)
163
+ hidden_states = self.mlp(hidden_states)
164
+ hidden_states = residual + hidden_states
165
+
166
+ outputs = (hidden_states,)
167
+
168
+ if output_attentions:
169
+ outputs += (self_attn_weights,)
170
+
171
+ if use_cache:
172
+ outputs += (present_key_value,)
173
+
174
+ return outputs
175
+
176
+
177
+ class LlamaModel(LlamaPreTrainedModel):
178
+ """
179
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
180
+
181
+ Args:
182
+ config: LlamaConfig
183
+ """
184
+
185
+ def __init__(self, config: LlamaConfig):
186
+ super().__init__(config)
187
+ self.padding_idx = config.pad_token_id
188
+ self.vocab_size = config.vocab_size
189
+
190
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
191
+ self.layers = nn.ModuleList(
192
+ [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
193
+ )
194
+ self._use_sdpa = config._attn_implementation == "sdpa"
195
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
196
+ self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
197
+
198
+ self.gradient_checkpointing = False
199
+ # Initialize weights and apply final processing
200
+ self.post_init()
201
+
202
+ def get_input_embeddings(self):
203
+ return self.embed_tokens
204
+
205
+ def set_input_embeddings(self, value):
206
+ self.embed_tokens = value
207
+
208
+ def forward(
209
+ self,
210
+ input_ids: torch.LongTensor = None,
211
+ attention_mask: Optional[torch.Tensor] = None,
212
+ position_ids: Optional[torch.LongTensor] = None,
213
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
214
+ inputs_embeds: Optional[torch.FloatTensor] = None,
215
+ use_cache: Optional[bool] = None,
216
+ output_attentions: Optional[bool] = None,
217
+ output_hidden_states: Optional[bool] = None,
218
+ return_dict: Optional[bool] = None,
219
+ voco_loc_back=None
220
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
221
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
222
+ output_hidden_states = (
223
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
224
+ )
225
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
226
+
227
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
228
+
229
+ # retrieve input_ids and inputs_embeds
230
+ if input_ids is not None and inputs_embeds is not None:
231
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
232
+ elif input_ids is not None:
233
+ batch_size, seq_length = input_ids.shape[:2]
234
+ elif inputs_embeds is not None:
235
+ batch_size, seq_length = inputs_embeds.shape[:2]
236
+ else:
237
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
238
+
239
+ if self.gradient_checkpointing and self.training:
240
+ if use_cache:
241
+ logger.warning_once(
242
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
243
+ )
244
+ use_cache = False
245
+
246
+ past_key_values_length = 0
247
+ if use_cache:
248
+ use_legacy_cache = not isinstance(past_key_values, Cache)
249
+ if use_legacy_cache:
250
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
251
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
252
+
253
+ if position_ids is None:
254
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
255
+ position_ids = torch.arange(
256
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
257
+ )
258
+ position_ids = position_ids.unsqueeze(0)
259
+
260
+ if inputs_embeds is None:
261
+ inputs_embeds = self.embed_tokens(input_ids)
262
+
263
+ if self._use_flash_attention_2:
264
+ # 2d mask is passed through the layers
265
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
266
+ elif self._use_sdpa and not output_attentions:
267
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
268
+ # the manual implementation that requires a 4D causal mask in all cases.
269
+ _2d_attention_mask_b = attention_mask
270
+
271
+ # attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
272
+ # attention_mask,
273
+ # (batch_size, seq_length),
274
+ # inputs_embeds,
275
+ # past_key_values_length,
276
+ # )
277
+
278
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
279
+ attention_mask,
280
+ (batch_size, seq_length + past_key_values_length), # Changed from (batch_size, seq_length) to ensure generating the whole mask
281
+ inputs_embeds, # Only uses .dtype and isinstance, so passing this has no impact
282
+ 0, # Changed from past_key_values_length
283
+ )
284
+
285
+ mask_type = attention_mask.dtype
286
+ mask_min = torch.finfo(mask_type).min
287
+
288
+ first_false_indices = (_2d_attention_mask_b == False).int().argmin(dim=1)
289
+
290
+ _2d_attention_mask = _2d_attention_mask_b.to(inputs_embeds.dtype)
291
+ for idx, locs in enumerate(voco_loc_back):
292
+ for loc in locs:
293
+ _2d_attention_mask[idx][seq_length - 1 - loc] = 32000
294
+ attention_mask_voco = make_voco_mask_llava(
295
+ _2d_attention_mask,
296
+ 32000,
297
+ inputs_embeds.dtype
298
+ )
299
+ attention_mask_voco = torch.where(attention_mask_voco == 1, torch.tensor(0), mask_min)
300
+ attention_mask = attention_mask + attention_mask_voco
301
+ attention_mask = torch.where(attention_mask < 0, mask_min, torch.tensor(0)).to(inputs_embeds.dtype)
302
+
303
+ for b in range(attention_mask.size(0)):
304
+ attention_mask[b, 0, :first_false_indices[b], :] = 0
305
+
306
+ else:
307
+ # 4d mask is passed through the layers
308
+ attention_mask = _prepare_4d_causal_attention_mask(
309
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
310
+ )
311
+
312
+ attention_mask = attention_mask[:,:,-seq_length:,:]
313
+ # embed positions
314
+ hidden_states = inputs_embeds
315
+
316
+ # decoder layers
317
+ all_hidden_states = () if output_hidden_states else None
318
+ all_self_attns = () if output_attentions else None
319
+ next_decoder_cache = None
320
+
321
+ for decoder_layer in self.layers:
322
+ if output_hidden_states:
323
+ all_hidden_states += (hidden_states,)
324
+
325
+ if self.gradient_checkpointing and self.training:
326
+ layer_outputs = self._gradient_checkpointing_func(
327
+ decoder_layer.__call__,
328
+ hidden_states,
329
+ attention_mask,
330
+ position_ids,
331
+ past_key_values,
332
+ output_attentions,
333
+ use_cache,
334
+ )
335
+ else:
336
+ layer_outputs = decoder_layer(
337
+ hidden_states,
338
+ attention_mask=attention_mask,
339
+ position_ids=position_ids,
340
+ past_key_value=past_key_values,
341
+ output_attentions=output_attentions,
342
+ use_cache=use_cache,
343
+ )
344
+
345
+ hidden_states = layer_outputs[0]
346
+
347
+ if use_cache:
348
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
349
+
350
+ if output_attentions:
351
+ all_self_attns += (layer_outputs[1],)
352
+
353
+ hidden_states = self.norm(hidden_states)
354
+
355
+ # add hidden states from the last decoder layer
356
+ if output_hidden_states:
357
+ all_hidden_states += (hidden_states,)
358
+
359
+ next_cache = None
360
+ if use_cache:
361
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
362
+ if not return_dict:
363
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
364
+ return BaseModelOutputWithPast(
365
+ last_hidden_state=hidden_states,
366
+ past_key_values=next_cache,
367
+ hidden_states=all_hidden_states,
368
+ attentions=all_self_attns,
369
+ )
370
+
371
+
372
+ class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
373
+ config_class = LlavaConfig
374
+
375
+ def __init__(self, config: LlamaConfig):
376
+ super(LlavaLlamaModel, self).__init__(config)
377
+
378
+
379
+ # LlavaMetaForCausalLM is a method class
380
+ class LlavaLlamaForCausalLM(LlamaPreTrainedModel, LlavaMetaForCausalLM):
381
+ _tied_weights_keys = ["lm_head.weight"]
382
+ config_class = LlavaConfig
383
+
384
+ def __init__(self, config):
385
+ super().__init__(config)
386
+ self.model = LlavaLlamaModel(config)
387
+ self.pretraining_tp = config.pretraining_tp
388
+ self.vocab_size = config.vocab_size
389
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
390
+
391
+ # Initialize weights and apply final processing
392
+ self.post_init()
393
+
394
+ def get_model(self):
395
+ return self.model
396
+
397
+ def get_input_embeddings(self):
398
+ return self.model.embed_tokens
399
+
400
+ def set_input_embeddings(self, value):
401
+ self.model.embed_tokens = value
402
+
403
+ def get_output_embeddings(self):
404
+ return self.lm_head
405
+
406
+ def set_output_embeddings(self, new_embeddings):
407
+ self.lm_head = new_embeddings
408
+
409
+ def set_decoder(self, decoder):
410
+ self.model = decoder
411
+
412
+ def get_decoder(self):
413
+ return self.model
414
+
415
+
416
+ def forward(
417
+ self,
418
+ input_ids: torch.LongTensor = None,
419
+ attention_mask: Optional[torch.Tensor] = None,
420
+ position_ids: Optional[torch.LongTensor] = None,
421
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
422
+ inputs_embeds: Optional[torch.FloatTensor] = None,
423
+ labels: Optional[torch.LongTensor] = None,
424
+ use_cache: Optional[bool] = None,
425
+ output_attentions: Optional[bool] = None,
426
+ output_hidden_states: Optional[bool] = None,
427
+ images: Optional[torch.FloatTensor] = None,
428
+ image_sizes: Optional[List[List[int]]] = None,
429
+ return_dict: Optional[bool] = None,
430
+ voco_loc_back=None,
431
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
432
+
433
+ if inputs_embeds is None:
434
+ (
435
+ input_ids,
436
+ position_ids,
437
+ attention_mask,
438
+ past_key_values,
439
+ inputs_embeds,
440
+ labels,
441
+ voco_loc_back
442
+ ) = self.prepare_inputs_labels_for_multimodal(
443
+ input_ids,
444
+ position_ids,
445
+ attention_mask,
446
+ past_key_values,
447
+ labels,
448
+ images,
449
+ image_sizes,
450
+ voco_loc_back # here voco_loc_back+=1 if input_ids is [B,1] (autogenerate phase)
451
+ )
452
+
453
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
454
+ output_hidden_states = (
455
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
456
+ )
457
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
458
+
459
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
460
+ outputs = self.model(
461
+ input_ids=input_ids,
462
+ attention_mask=attention_mask,
463
+ position_ids=position_ids,
464
+ past_key_values=past_key_values,
465
+ inputs_embeds=inputs_embeds,
466
+ use_cache=use_cache,
467
+ output_attentions=output_attentions,
468
+ output_hidden_states=output_hidden_states,
469
+ return_dict=return_dict,
470
+ voco_loc_back=voco_loc_back
471
+ )
472
+
473
+ hidden_states = outputs[0]
474
+ if self.config.pretraining_tp > 1:
475
+ lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
476
+ logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
477
+ logits = torch.cat(logits, dim=-1)
478
+ else:
479
+ logits = self.lm_head(hidden_states)
480
+ logits = logits.float()
481
+
482
+ loss = None
483
+ if labels is not None:
484
+ # Shift so that tokens < n predict n
485
+ shift_logits = logits[..., :-1, :].contiguous()
486
+ shift_labels = labels[..., 1:].contiguous()
487
+ # Flatten the tokens
488
+ loss_fct = CrossEntropyLoss()
489
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
490
+ shift_labels = shift_labels.view(-1)
491
+ # Enable model parallelism
492
+ shift_labels = shift_labels.to(shift_logits.device)
493
+ loss = loss_fct(shift_logits, shift_labels)
494
+
495
+ if not return_dict:
496
+ output = (logits,) + outputs[1:]
497
+ return (loss,) + output if loss is not None else output
498
+
499
+ return CausalLMOutputWithPast(
500
+ loss=loss,
501
+ logits=logits,
502
+ past_key_values=outputs.past_key_values,
503
+ hidden_states=outputs.hidden_states,
504
+ attentions=outputs.attentions,
505
+ )
506
+
507
+ @torch.no_grad()
508
+ def generate(
509
+ self,
510
+ inputs: Optional[torch.Tensor] = None,
511
+ images: Optional[torch.Tensor] = None,
512
+ image_sizes: Optional[torch.Tensor] = None,
513
+ **kwargs,
514
+ ) -> Union[GenerateOutput, torch.LongTensor]:
515
+ position_ids = kwargs.pop("position_ids", None)
516
+ attention_mask = kwargs.pop("attention_mask", None)
517
+ if "inputs_embeds" in kwargs:
518
+ raise NotImplementedError("`inputs_embeds` is not supported")
519
+
520
+ if images is not None:
521
+ (
522
+ inputs,
523
+ position_ids,
524
+ attention_mask,
525
+ _,
526
+ inputs_embeds,
527
+ _,
528
+ voco_loc_back
529
+ ) = self.prepare_inputs_labels_for_multimodal(
530
+ inputs,
531
+ position_ids,
532
+ attention_mask,
533
+ None,
534
+ None,
535
+ images,
536
+ image_sizes=image_sizes
537
+ )
538
+ else:
539
+ inputs_embeds = self.get_model().embed_tokens(inputs)
540
+
541
+ return super().generate(
542
+ position_ids=position_ids,
543
+ attention_mask=attention_mask,
544
+ inputs_embeds=inputs_embeds,
545
+ voco_loc_back=voco_loc_back,
546
+ **kwargs
547
+ )
548
+
549
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None,
550
+ inputs_embeds=None, **kwargs):
551
+ images = kwargs.pop("images", None)
552
+ image_sizes = kwargs.pop("image_sizes", None)
553
+ voco_loc_back = kwargs.pop("voco_loc_back", None)
554
+
555
+ inputs = self.prepare_inputs_for_generation_llama(
556
+ input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
557
+ )
558
+
559
+ if voco_loc_back is not None:
560
+ inputs['voco_loc_back'] = voco_loc_back
561
+ if images is not None:
562
+ inputs['images'] = images
563
+ if image_sizes is not None:
564
+ inputs['image_sizes'] = image_sizes
565
+ return inputs
566
+
567
+ def prepare_inputs_for_generation_llama(
568
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
569
+ ):
570
+ if past_key_values is not None:
571
+ if isinstance(past_key_values, Cache):
572
+ cache_length = past_key_values.get_seq_length()
573
+ past_length = past_key_values.seen_tokens
574
+ max_cache_length = past_key_values.get_max_length()
575
+ else:
576
+ cache_length = past_length = past_key_values[0][0].shape[2]
577
+ max_cache_length = None
578
+
579
+ # Keep only the unprocessed tokens:
580
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
581
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
582
+ # input)
583
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
584
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
585
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
586
+ # input_ids based on the past_length.
587
+ elif past_length < input_ids.shape[1]:
588
+ input_ids = input_ids[:, past_length:]
589
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
590
+
591
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
592
+ if (
593
+ max_cache_length is not None
594
+ and attention_mask is not None
595
+ and cache_length + input_ids.shape[1] > max_cache_length
596
+ ):
597
+ attention_mask = attention_mask[:, -max_cache_length:]
598
+
599
+ position_ids = kwargs.get("position_ids", None)
600
+ if attention_mask is not None and position_ids is None:
601
+ # create position_ids on the fly for batch generation
602
+ position_ids = attention_mask.long().cumsum(-1) - 1
603
+ position_ids.masked_fill_(attention_mask == 0, 1)
604
+ if past_key_values:
605
+ position_ids = position_ids[:, -input_ids.shape[1] :]
606
+
607
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
608
+ if inputs_embeds is not None and past_key_values is None:
609
+ model_inputs = {"inputs_embeds": inputs_embeds}
610
+ else:
611
+ model_inputs = {"input_ids": input_ids}
612
+
613
+ model_inputs.update(
614
+ {
615
+ "position_ids": position_ids,
616
+ "past_key_values": past_key_values,
617
+ "use_cache": kwargs.get("use_cache"),
618
+ "attention_mask": attention_mask,
619
+ }
620
+ )
621
+ return model_inputs
622
+
623
+ @staticmethod
624
+ def _reorder_cache(past_key_values, beam_idx):
625
+ reordered_past = ()
626
+ for layer_past in past_key_values:
627
+ reordered_past += (
628
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
629
+ )
630
+ return reordered_past
631
+
632
+ AutoConfig.register("llava_llama", LlavaConfig)
633
+ AutoModelForCausalLM.register(LlavaConfig, LlavaLlamaForCausalLM)
llava/model/llava_arch.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adopted from https://github.com/haotian-liu/LLaVA.
2
+ # Copyright 2023 Haotian Liu
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ from abc import ABC, abstractmethod
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+
22
+ from .multimodal_encoder.builder import build_vision_tower
23
+ from .multimodal_projector.builder import build_vision_projector
24
+
25
+ from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
26
+
27
+ from llava.mm_utils import get_anyres_image_grid_shape
28
+ class LlavaMetaModel:
29
+
30
+ def __init__(self, config):
31
+ super(LlavaMetaModel, self).__init__(config)
32
+
33
+ if hasattr(config, "mm_vision_tower"):
34
+ self.vision_tower = build_vision_tower(config, delay_load=True)
35
+ self.mm_projector = build_vision_projector(config)
36
+
37
+ if 'unpad' in getattr(config, 'mm_patch_merge_type', ''):
38
+ self.image_newline = nn.Parameter(
39
+ torch.empty(config.hidden_size, dtype=self.dtype)
40
+ )
41
+
42
+ def get_vision_tower(self):
43
+ vision_tower = getattr(self, 'vision_tower', None)
44
+ if type(vision_tower) is list:
45
+ vision_tower = vision_tower[0]
46
+ return vision_tower
47
+
48
+ def initialize_vision_modules(self, model_args, fsdp=None):
49
+ vision_tower = model_args.vision_tower
50
+ mm_vision_select_layer = model_args.mm_vision_select_layer
51
+ mm_vision_select_feature = model_args.mm_vision_select_feature
52
+ pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
53
+ mm_patch_merge_type = model_args.mm_patch_merge_type
54
+
55
+ self.config.mm_vision_tower = vision_tower
56
+
57
+ if self.get_vision_tower() is None:
58
+ vision_tower = build_vision_tower(model_args)
59
+
60
+ if fsdp is not None and len(fsdp) > 0:
61
+ self.vision_tower = [vision_tower]
62
+ else:
63
+ self.vision_tower = vision_tower
64
+ else:
65
+ if fsdp is not None and len(fsdp) > 0:
66
+ vision_tower = self.vision_tower[0]
67
+ else:
68
+ vision_tower = self.vision_tower
69
+ vision_tower.load_model()
70
+
71
+ self.config.use_mm_proj = True
72
+ self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
73
+ self.config.mm_hidden_size = vision_tower.hidden_size
74
+ self.config.mm_vision_select_layer = mm_vision_select_layer
75
+ self.config.mm_vision_select_feature = mm_vision_select_feature
76
+ self.config.mm_patch_merge_type = mm_patch_merge_type
77
+
78
+ if getattr(self, 'mm_projector', None) is None:
79
+ self.mm_projector = build_vision_projector(self.config)
80
+
81
+ if 'unpad' in mm_patch_merge_type:
82
+ embed_std = 1 / torch.sqrt(torch.tensor(self.config.hidden_size, dtype=self.dtype))
83
+ self.image_newline = nn.Parameter(
84
+ torch.randn(self.config.hidden_size, dtype=self.dtype) * embed_std
85
+ )
86
+ else:
87
+ # In case it is frozen by LoRA
88
+ for p in self.mm_projector.parameters():
89
+ p.requires_grad = True
90
+
91
+ if pretrain_mm_mlp_adapter is not None:
92
+ mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
93
+ def get_w(weights, keyword):
94
+ return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
95
+
96
+ self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
97
+
98
+
99
+ def unpad_image(tensor, original_size):
100
+ """
101
+ Unpads a PyTorch tensor of a padded and resized image.
102
+
103
+ Args:
104
+ tensor (torch.Tensor): The image tensor, assumed to be in CxHxW format.
105
+ original_size (tuple): The original size of the image (height, width).
106
+
107
+ Returns:
108
+ torch.Tensor: The unpadded image tensor.
109
+ """
110
+ original_width, original_height = original_size
111
+ current_height, current_width = tensor.shape[1:]
112
+
113
+ original_aspect_ratio = original_width / original_height
114
+ current_aspect_ratio = current_width / current_height
115
+
116
+ if original_aspect_ratio > current_aspect_ratio:
117
+ scale_factor = current_width / original_width
118
+ new_height = int(original_height * scale_factor)
119
+ padding = (current_height - new_height) // 2
120
+ unpadded_tensor = tensor[:, padding:current_height - padding, :]
121
+ else:
122
+ scale_factor = current_height / original_height
123
+ new_width = int(original_width * scale_factor)
124
+ padding = (current_width - new_width) // 2
125
+ unpadded_tensor = tensor[:, :, padding:current_width - padding]
126
+
127
+ return unpadded_tensor
128
+
129
+
130
+ class LlavaMetaForCausalLM(ABC):
131
+
132
+ @abstractmethod
133
+ def get_model(self):
134
+ pass
135
+
136
+ def get_vision_tower(self):
137
+ return self.get_model().get_vision_tower()
138
+
139
+ def encode_images(self, images):
140
+ image_features = self.get_model().get_vision_tower()(images)
141
+ image_features = self.get_model().mm_projector(image_features)
142
+ return image_features
143
+
144
+ def prepare_inputs_labels_for_multimodal(
145
+ self, input_ids, position_ids, attention_mask, past_key_values, labels,
146
+ images, image_sizes=None, voco_loc_back=None
147
+ ):
148
+ vision_tower = self.get_vision_tower()
149
+ if vision_tower is None or images is None or input_ids.shape[1] == 1:
150
+ if voco_loc_back != None and voco_loc_back != [] and voco_loc_back != [[]]:
151
+ voco_loc_back = [[item + 1 for item in sublist] for sublist in voco_loc_back]
152
+ return input_ids, position_ids, attention_mask, past_key_values, None, labels, voco_loc_back
153
+
154
+ if type(images) is list or images.ndim == 5:
155
+ if type(images) is list:
156
+ images = [x.unsqueeze(0) if x.ndim == 3 else x for x in images]
157
+ concat_images = torch.cat([image for image in images], dim=0)
158
+ image_features = self.encode_images(concat_images)
159
+ split_sizes = [image.shape[0] for image in images]
160
+ image_features = torch.split(image_features, split_sizes, dim=0)
161
+ mm_patch_merge_type = getattr(self.config, 'mm_patch_merge_type', 'flat')
162
+ image_aspect_ratio = getattr(self.config, 'image_aspect_ratio', 'square')
163
+ if mm_patch_merge_type == 'flat':
164
+ image_features = [x.flatten(0, 1) for x in image_features]
165
+ elif mm_patch_merge_type.startswith('spatial'):
166
+ new_image_features = []
167
+ for image_idx, image_feature in enumerate(image_features):
168
+ if image_feature.shape[0] > 1:
169
+ base_image_feature = image_feature[0]
170
+ image_feature = image_feature[1:]
171
+ height = width = self.get_vision_tower().num_patches_per_side
172
+ assert height * width == base_image_feature.shape[0]
173
+ if image_aspect_ratio == 'anyres':
174
+ num_patch_width, num_patch_height = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, self.get_vision_tower().config.image_size)
175
+ image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
176
+ else:
177
+ raise NotImplementedError
178
+ if 'unpad' in mm_patch_merge_type:
179
+ image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
180
+ image_feature = image_feature.flatten(1, 2).flatten(2, 3)
181
+ image_feature = unpad_image(image_feature, image_sizes[image_idx])
182
+ image_feature = torch.cat((
183
+ image_feature,
184
+ self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)
185
+ ), dim=-1)
186
+ image_feature = image_feature.flatten(1, 2).transpose(0, 1)
187
+ else:
188
+ image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous()
189
+ image_feature = image_feature.flatten(0, 3)
190
+ image_feature = torch.cat((base_image_feature, image_feature), dim=0)
191
+ else:
192
+ image_feature = image_feature[0]
193
+ if 'unpad' in mm_patch_merge_type:
194
+ image_feature = torch.cat((
195
+ image_feature,
196
+ self.model.image_newline[None].to(image_feature.device)
197
+ ), dim=0)
198
+ new_image_features.append(image_feature)
199
+ image_features = new_image_features
200
+ else:
201
+ raise ValueError(f"Unexpected mm_patch_merge_type: {self.config.mm_patch_merge_type}")
202
+ else:
203
+ image_features = self.encode_images(images)
204
+
205
+ # TODO: image start / end is not implemented here to support pretraining.
206
+ if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
207
+ raise NotImplementedError
208
+
209
+ # Let's just add dummy tensors if they do not exist,
210
+ # it is a headache to deal with None all the time.
211
+ # But it is not ideal, and if you have a better idea,
212
+ # please open an issue / submit a PR, thanks.
213
+ _labels = labels
214
+ _position_ids = position_ids
215
+ _attention_mask = attention_mask
216
+ if attention_mask is None:
217
+ attention_mask = torch.ones_like(input_ids, dtype=torch.bool)
218
+ else:
219
+ attention_mask = attention_mask.bool()
220
+ if position_ids is None:
221
+ position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
222
+ if labels is None:
223
+ labels = torch.full_like(input_ids, IGNORE_INDEX)
224
+
225
+ # remove the padding using attention_mask -- FIXME
226
+ _input_ids = input_ids
227
+ input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]
228
+ labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
229
+
230
+ voco_loc_back = []
231
+
232
+ for l in input_ids:
233
+ indices = (l == 32000).nonzero(as_tuple=True)[0]
234
+ if indices.size(0) > 0:
235
+ # token num
236
+ voco_num = 2
237
+ assert indices.size(0) == voco_num
238
+ indices = l.size(0) - 1 - indices
239
+ voco_loc_back.append(indices.tolist())
240
+
241
+ new_input_embeds = []
242
+ new_labels = []
243
+ cur_image_idx = 0
244
+ for batch_idx, cur_input_ids in enumerate(input_ids):
245
+ num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum()
246
+ if num_images == 0:
247
+ cur_image_features = image_features[cur_image_idx]
248
+ cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids)
249
+ cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0)
250
+ new_input_embeds.append(cur_input_embeds)
251
+ new_labels.append(labels[batch_idx])
252
+ cur_image_idx += 1
253
+ continue
254
+ image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]]
255
+ cur_input_ids_noim = []
256
+ cur_labels = labels[batch_idx]
257
+ cur_labels_noim = []
258
+ for i in range(len(image_token_indices) - 1):
259
+ cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]])
260
+ cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]])
261
+ split_sizes = [x.shape[0] for x in cur_labels_noim]
262
+ cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim))
263
+ cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0)
264
+ cur_new_input_embeds = []
265
+ cur_new_labels = []
266
+
267
+ for i in range(num_images + 1):
268
+ cur_new_input_embeds.append(cur_input_embeds_no_im[i])
269
+ cur_new_labels.append(cur_labels_noim[i])
270
+ if i < num_images:
271
+ cur_image_features = image_features[cur_image_idx]
272
+ cur_image_idx += 1
273
+ cur_new_input_embeds.append(cur_image_features)
274
+ cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))
275
+ cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds]
276
+ cur_new_input_embeds = torch.cat(cur_new_input_embeds)
277
+ cur_new_labels = torch.cat(cur_new_labels)
278
+ # add on batch
279
+ new_input_embeds.append(cur_new_input_embeds)
280
+ new_labels.append(cur_new_labels)
281
+
282
+ # Truncate sequences to max length as image embeddings can make the sequence longer
283
+ tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None)
284
+ if tokenizer_model_max_length is not None:
285
+ new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds]
286
+ new_labels = [x[:tokenizer_model_max_length] for x in new_labels]
287
+ # Combine them
288
+ max_len = max(x.shape[0] for x in new_input_embeds)
289
+ batch_size = len(new_input_embeds)
290
+ new_input_embeds_padded = []
291
+ new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device)
292
+ attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device)
293
+ position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)
294
+ for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)):
295
+ cur_len = cur_new_embed.shape[0]
296
+ if getattr(self.config, 'tokenizer_padding_side', 'right') == "left":
297
+ new_input_embeds_padded.append(torch.cat((
298
+ torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device),
299
+ cur_new_embed
300
+ ), dim=0))
301
+ if cur_len > 0:
302
+ new_labels_padded[i, -cur_len:] = cur_new_labels
303
+ attention_mask[i, -cur_len:] = True
304
+ position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
305
+ else:
306
+ new_input_embeds_padded.append(torch.cat((
307
+ cur_new_embed,
308
+ torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)
309
+ ), dim=0))
310
+ if cur_len > 0:
311
+ new_labels_padded[i, :cur_len] = cur_new_labels
312
+ attention_mask[i, :cur_len] = True
313
+ position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
314
+
315
+ new_input_embeds = torch.stack(new_input_embeds_padded, dim=0)
316
+
317
+ if _labels is None:
318
+ new_labels = None
319
+ else:
320
+ new_labels = new_labels_padded
321
+
322
+ if _attention_mask is None:
323
+ attention_mask = None
324
+ else:
325
+ attention_mask = attention_mask.to(dtype=_attention_mask.dtype)
326
+
327
+ if _position_ids is None:
328
+ position_ids = None
329
+
330
+ return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels, voco_loc_back
331
+
332
+
333
+ def initialize_vision_tokenizer(self, model_args, tokenizer):
334
+ if model_args.mm_use_im_patch_token:
335
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
336
+ self.resize_token_embeddings(len(tokenizer))
337
+
338
+ if model_args.mm_use_im_start_end:
339
+ num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
340
+ self.resize_token_embeddings(len(tokenizer))
341
+
342
+ if num_new_tokens > 0:
343
+ input_embeddings = self.get_input_embeddings().weight.data
344
+ output_embeddings = self.get_output_embeddings().weight.data
345
+
346
+ input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
347
+ dim=0, keepdim=True)
348
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
349
+ dim=0, keepdim=True)
350
+
351
+ input_embeddings[-num_new_tokens:] = input_embeddings_avg
352
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
353
+
354
+ if model_args.tune_mm_mlp_adapter:
355
+ for p in self.get_input_embeddings().parameters():
356
+ p.requires_grad = True
357
+ for p in self.get_output_embeddings().parameters():
358
+ p.requires_grad = False
359
+
360
+ if model_args.pretrain_mm_mlp_adapter:
361
+ mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')
362
+ embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']
363
+ assert num_new_tokens == 2
364
+ if input_embeddings.shape == embed_tokens_weight.shape:
365
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
366
+ elif embed_tokens_weight.shape[0] == num_new_tokens:
367
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight
368
+ else:
369
+ raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.")
370
+ elif model_args.mm_use_im_patch_token:
371
+ if model_args.tune_mm_mlp_adapter:
372
+ for p in self.get_input_embeddings().parameters():
373
+ p.requires_grad = False
374
+ for p in self.get_output_embeddings().parameters():
375
+ p.requires_grad = False
llava/model/make_delta.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Usage:
3
+ python3 -m llava.model.make_delta --base ~/model_weights/llama-7b --target ~/model_weights/llava-7b --delta ~/model_weights/llava-7b-delta --hub-repo-id liuhaotian/llava-7b-delta
4
+ """
5
+ import argparse
6
+
7
+ import torch
8
+ from tqdm import tqdm
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM
10
+ from llava.model.utils import auto_upgrade
11
+
12
+
13
+ def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id):
14
+ print("Loading base model")
15
+ base = AutoModelForCausalLM.from_pretrained(
16
+ base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
17
+
18
+ print("Loading target model")
19
+ auto_upgrade(target_model_path)
20
+ target = AutoModelForCausalLM.from_pretrained(target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
21
+
22
+ print("Calculating delta")
23
+ for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"):
24
+ if name not in base.state_dict():
25
+ assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model'
26
+ continue
27
+ if param.data.shape == base.state_dict()[name].shape:
28
+ param.data -= base.state_dict()[name]
29
+ else:
30
+ assert name in ['model.embed_tokens.weight', 'lm_head.weight'], f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}'
31
+ bparam = base.state_dict()[name]
32
+ param.data[:bparam.shape[0], :bparam.shape[1]] -= bparam
33
+
34
+ print("Saving delta")
35
+ if hub_repo_id:
36
+ kwargs = {"push_to_hub": True, "repo_id": hub_repo_id}
37
+ else:
38
+ kwargs = {}
39
+ target.save_pretrained(delta_path, **kwargs)
40
+ target_tokenizer = AutoTokenizer.from_pretrained(target_model_path)
41
+ target_tokenizer.save_pretrained(delta_path, **kwargs)
42
+
43
+
44
+ if __name__ == "__main__":
45
+ parser = argparse.ArgumentParser()
46
+ parser.add_argument("--base-model-path", type=str, required=True)
47
+ parser.add_argument("--target-model-path", type=str, required=True)
48
+ parser.add_argument("--delta-path", type=str, required=True)
49
+ parser.add_argument("--hub-repo-id", type=str, default=None)
50
+ args = parser.parse_args()
51
+
52
+ make_delta(args.base_model_path, args.target_model_path, args.delta_path, args.hub_repo_id)
llava/model/multimodal_encoder/__pycache__/builder.cpython-310.pyc ADDED
Binary file (653 Bytes). View file
 
llava/model/multimodal_encoder/__pycache__/clip_encoder.cpython-310.pyc ADDED
Binary file (3.33 kB). View file
 
llava/model/multimodal_encoder/builder.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from .clip_encoder import CLIPVisionTower
3
+
4
+
5
+ def build_vision_tower(vision_tower_cfg, **kwargs):
6
+ vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))
7
+ is_absolute_path_exists = os.path.exists(vision_tower)
8
+ if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion") or "ShareGPT4V" in vision_tower:
9
+ return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
10
+
11
+ raise ValueError(f'Unknown vision tower: {vision_tower}')
llava/model/multimodal_encoder/clip_encoder.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig
5
+
6
+ # 这个clip不训,只有linear是训练的
7
+ class CLIPVisionTower(nn.Module):
8
+ def __init__(self, vision_tower, args, delay_load=False):
9
+ super().__init__()
10
+
11
+ self.is_loaded = False
12
+
13
+ self.vision_tower_name = vision_tower
14
+ self.select_layer = args.mm_vision_select_layer
15
+ self.select_feature = getattr(args, 'mm_vision_select_feature', 'patch')
16
+
17
+ if not delay_load:
18
+ self.load_model()
19
+ elif getattr(args, 'unfreeze_mm_vision_tower', False):
20
+ self.load_model()
21
+ else:
22
+ self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name)
23
+
24
+ def load_model(self, device_map=None):
25
+ if self.is_loaded:
26
+ print('{} is already loaded, `load_model` called again, skipping.'.format(self.vision_tower_name))
27
+ return
28
+
29
+ self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name)
30
+ self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name, device_map=device_map)
31
+ self.vision_tower.requires_grad_(False)
32
+
33
+ self.is_loaded = True
34
+
35
+ def feature_select(self, image_forward_outs):
36
+ image_features = image_forward_outs.hidden_states[self.select_layer]
37
+ if self.select_feature == 'patch':
38
+ image_features = image_features[:, 1:]
39
+ elif self.select_feature == 'cls_patch':
40
+ image_features = image_features
41
+ else:
42
+ raise ValueError(f'Unexpected select feature: {self.select_feature}')
43
+ return image_features
44
+
45
+ @torch.no_grad()
46
+ def forward(self, images):
47
+ if type(images) is list:
48
+ image_features = []
49
+ for image in images:
50
+ image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True)
51
+ image_feature = self.feature_select(image_forward_out).to(image.dtype)
52
+ image_features.append(image_feature)
53
+ else:
54
+ image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True)
55
+ image_features = self.feature_select(image_forward_outs).to(images.dtype)
56
+
57
+ return image_features
58
+
59
+ @property
60
+ def dummy_feature(self):
61
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
62
+
63
+ @property
64
+ def dtype(self):
65
+ return self.vision_tower.dtype
66
+
67
+ @property
68
+ def device(self):
69
+ return self.vision_tower.device
70
+
71
+ @property
72
+ def config(self):
73
+ if self.is_loaded:
74
+ return self.vision_tower.config
75
+ else:
76
+ return self.cfg_only
77
+
78
+ @property
79
+ def hidden_size(self):
80
+ return self.config.hidden_size
81
+
82
+ @property
83
+ def num_patches_per_side(self):
84
+ return self.config.image_size // self.config.patch_size
85
+
86
+ @property
87
+ def num_patches(self):
88
+ return (self.config.image_size // self.config.patch_size) ** 2
llava/model/multimodal_projector/__pycache__/builder.cpython-310.pyc ADDED
Binary file (2.03 kB). View file
 
llava/model/multimodal_projector/builder.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import re
4
+
5
+
6
+ class IdentityMap(nn.Module):
7
+ def __init__(self):
8
+ super().__init__()
9
+
10
+ def forward(self, x, *args, **kwargs):
11
+ return x
12
+
13
+ @property
14
+ def config(self):
15
+ return {"mm_projector_type": 'identity'}
16
+
17
+
18
+ class SimpleResBlock(nn.Module):
19
+ def __init__(self, channels):
20
+ super().__init__()
21
+ self.pre_norm = nn.LayerNorm(channels)
22
+
23
+ self.proj = nn.Sequential(
24
+ nn.Linear(channels, channels),
25
+ nn.GELU(),
26
+ nn.Linear(channels, channels)
27
+ )
28
+ def forward(self, x):
29
+ x = self.pre_norm(x)
30
+ return x + self.proj(x)
31
+
32
+
33
+ def build_vision_projector(config, delay_load=False, **kwargs):
34
+ projector_type = getattr(config, 'mm_projector_type', 'linear')
35
+
36
+ if projector_type == 'linear':
37
+ return nn.Linear(config.mm_hidden_size, config.hidden_size)
38
+
39
+ mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
40
+ if mlp_gelu_match:
41
+ mlp_depth = int(mlp_gelu_match.group(1))
42
+ modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]
43
+ for _ in range(1, mlp_depth):
44
+ modules.append(nn.GELU())
45
+ modules.append(nn.Linear(config.hidden_size, config.hidden_size))
46
+ return nn.Sequential(*modules)
47
+
48
+ if projector_type == 'identity':
49
+ return IdentityMap()
50
+
51
+ raise ValueError(f'Unknown projector type: {projector_type}')
llava/model/utils.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoConfig
2
+
3
+
4
+ def auto_upgrade(config):
5
+ cfg = AutoConfig.from_pretrained(config)
6
+ if 'llava' in config and 'llava' not in cfg.model_type:
7
+ assert cfg.model_type == 'llama'
8
+ print("You are using newer LLaVA code base, while the checkpoint of v0 is from older code base.")
9
+ print("You must upgrade the checkpoint to the new code base (this can be done automatically).")
10
+ confirm = input("Please confirm that you want to upgrade the checkpoint. [Y/N]")
11
+ if confirm.lower() in ["y", "yes"]:
12
+ print("Upgrading checkpoint...")
13
+ assert len(cfg.architectures) == 1
14
+ setattr(cfg.__class__, "model_type", "llava")
15
+ cfg.architectures[0] = 'LlavaLlamaForCausalLM'
16
+ cfg.save_pretrained(config)
17
+ print("Checkpoint upgraded.")
18
+ else:
19
+ print("Checkpoint upgrade aborted.")
20
+ exit(1)
llava/serve/__init__.py ADDED
File without changes
llava/serve/cli.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+
4
+ from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
5
+ from llava.conversation import conv_templates, SeparatorStyle
6
+ from llava.model.builder import load_pretrained_model
7
+ from llava.utils import disable_torch_init
8
+ from llava.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path
9
+
10
+ from PIL import Image
11
+
12
+ import requests
13
+ from PIL import Image
14
+ from io import BytesIO
15
+ from transformers import TextStreamer
16
+
17
+
18
+ def load_image(image_file):
19
+ if image_file.startswith('http://') or image_file.startswith('https://'):
20
+ response = requests.get(image_file)
21
+ image = Image.open(BytesIO(response.content)).convert('RGB')
22
+ else:
23
+ image = Image.open(image_file).convert('RGB')
24
+ return image
25
+
26
+
27
+ def main(args):
28
+ # Model
29
+ disable_torch_init()
30
+
31
+ model_name = get_model_name_from_path(args.model_path)
32
+ tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device)
33
+
34
+ if "llama-2" in model_name.lower():
35
+ conv_mode = "llava_llama_2"
36
+ elif "mistral" in model_name.lower():
37
+ conv_mode = "mistral_instruct"
38
+ elif "v1.6-34b" in model_name.lower():
39
+ conv_mode = "chatml_direct"
40
+ elif "v1" in model_name.lower():
41
+ conv_mode = "llava_v1"
42
+ elif "mpt" in model_name.lower():
43
+ conv_mode = "mpt"
44
+ else:
45
+ conv_mode = "llava_v0"
46
+
47
+ if args.conv_mode is not None and conv_mode != args.conv_mode:
48
+ print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode))
49
+ else:
50
+ args.conv_mode = conv_mode
51
+
52
+ conv = conv_templates[args.conv_mode].copy()
53
+ if "mpt" in model_name.lower():
54
+ roles = ('user', 'assistant')
55
+ else:
56
+ roles = conv.roles
57
+
58
+ image = load_image(args.image_file)
59
+ image_size = image.size
60
+ # Similar operation in model_worker.py
61
+ image_tensor = process_images([image], image_processor, model.config)
62
+ if type(image_tensor) is list:
63
+ image_tensor = [image.to(model.device, dtype=torch.float16) for image in image_tensor]
64
+ else:
65
+ image_tensor = image_tensor.to(model.device, dtype=torch.float16)
66
+
67
+ while True:
68
+ try:
69
+ inp = input(f"{roles[0]}: ")
70
+ except EOFError:
71
+ inp = ""
72
+ if not inp:
73
+ print("exit...")
74
+ break
75
+
76
+ print(f"{roles[1]}: ", end="")
77
+
78
+ if image is not None:
79
+ # first message
80
+ if model.config.mm_use_im_start_end:
81
+ inp = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + inp
82
+ else:
83
+ inp = DEFAULT_IMAGE_TOKEN + '\n' + inp
84
+ conv.append_message(conv.roles[0], inp)
85
+ image = None
86
+ else:
87
+ # later messages
88
+ conv.append_message(conv.roles[0], inp)
89
+ conv.append_message(conv.roles[1], None)
90
+ prompt = conv.get_prompt()
91
+
92
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
93
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
94
+ keywords = [stop_str]
95
+ streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
96
+
97
+ with torch.inference_mode():
98
+ output_ids = model.generate(
99
+ input_ids,
100
+ images=image_tensor,
101
+ image_sizes=[image_size],
102
+ do_sample=True if args.temperature > 0 else False,
103
+ temperature=args.temperature,
104
+ max_new_tokens=args.max_new_tokens,
105
+ streamer=streamer,
106
+ use_cache=True)
107
+
108
+ outputs = tokenizer.decode(output_ids[0]).strip()
109
+ conv.messages[-1][-1] = outputs
110
+
111
+ if args.debug:
112
+ print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
113
+
114
+
115
+ if __name__ == "__main__":
116
+ parser = argparse.ArgumentParser()
117
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
118
+ parser.add_argument("--model-base", type=str, default=None)
119
+ parser.add_argument("--image-file", type=str, required=True)
120
+ parser.add_argument("--device", type=str, default="cuda")
121
+ parser.add_argument("--conv-mode", type=str, default=None)
122
+ parser.add_argument("--temperature", type=float, default=0.2)
123
+ parser.add_argument("--max-new-tokens", type=int, default=512)
124
+ parser.add_argument("--load-8bit", action="store_true")
125
+ parser.add_argument("--load-4bit", action="store_true")
126
+ parser.add_argument("--debug", action="store_true")
127
+ args = parser.parse_args()
128
+ main(args)