Spaces:
Sleeping
Sleeping
สร้างimage_classification_peft_lora.ipynb
Browse files
image_classification_peft_lora.ipynb
ADDED
@@ -0,0 +1,712 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Introduction
|
2 |
+
In this notebook, we will learn how to use LoRA from 🤗 PEFT to fine-tune an image classification model by ONLY using 0.77% of the original trainable parameters of the model.
|
3 |
+
|
4 |
+
LoRA adds low-rank "update matrices" to certain blocks in the underlying model (in this case the attention blocks) and ONLY trains those matrices during fine-tuning. During inference, these update matrices are merged with the original model parameters. For more details, check out the original LoRA paper.
|
5 |
+
|
6 |
+
Let's get started by installing the dependencies.
|
7 |
+
|
8 |
+
Note that this notebook builds on top the official image classification example notebook.
|
9 |
+
|
10 |
+
Install dependencies
|
11 |
+
Here we're installing peft from source to ensure we have access to all the bleeding edge features of peft.
|
12 |
+
|
13 |
+
!pip install transformers accelerate evaluate datasets git+https://github.com/huggingface/peft -q
|
14 |
+
Installing build dependencies ... done
|
15 |
+
Getting requirements to build wheel ... done
|
16 |
+
Preparing metadata (pyproject.toml) ... done
|
17 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.3/6.3 MB 53.1 MB/s eta 0:00:00
|
18 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 199.7/199.7 KB 24.5 MB/s eta 0:00:00
|
19 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 81.4/81.4 KB 11.3 MB/s eta 0:00:00
|
20 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 462.8/462.8 KB 46.9 MB/s eta 0:00:00
|
21 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 190.3/190.3 KB 23.1 MB/s eta 0:00:00
|
22 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 7.6/7.6 MB 102.9 MB/s eta 0:00:00
|
23 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 213.0/213.0 KB 25.4 MB/s eta 0:00:00
|
24 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 132.0/132.0 KB 15.2 MB/s eta 0:00:00
|
25 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 76.3/76.3 MB 23.0 MB/s eta 0:00:00
|
26 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 140.6/140.6 KB 20.0 MB/s eta 0:00:00
|
27 |
+
Building wheel for peft (pyproject.toml) ... done
|
28 |
+
Authentication
|
29 |
+
We will share our fine-tuned model at the end of training. So, to do that we just authenticate using our 🤗 token. This token is available from here. If you don't have a 🤗 account already, we highly encourage you to do so; it's free!
|
30 |
+
|
31 |
+
from huggingface_hub import notebook_login
|
32 |
+
|
33 |
+
notebook_login()
|
34 |
+
Token is valid.
|
35 |
+
Your token has been saved in your configured git credential helpers (store).
|
36 |
+
Your token has been saved to /root/.cache/huggingface/token
|
37 |
+
Login successful
|
38 |
+
Check the library versions
|
39 |
+
import transformers
|
40 |
+
import accelerate
|
41 |
+
import peft
|
42 |
+
===================================BUG REPORT===================================
|
43 |
+
Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues
|
44 |
+
================================================================================
|
45 |
+
print(f"Transformers version: {transformers.__version__}")
|
46 |
+
print(f"Accelerate version: {accelerate.__version__}")
|
47 |
+
print(f"PEFT version: {peft.__version__}")
|
48 |
+
Transformers version: 4.26.0
|
49 |
+
Accelerate version: 0.16.0
|
50 |
+
PEFT version: 0.1.0.dev0
|
51 |
+
Select a model checkpoint to fine-tune
|
52 |
+
model_checkpoint = "google/vit-base-patch16-224-in21k" # pre-trained model from which to fine-tune
|
53 |
+
Load a dataset
|
54 |
+
We're only loading the first 5000 instances from the training set of the Food-101 dataset to keep this example runtime short.
|
55 |
+
|
56 |
+
from datasets import load_dataset
|
57 |
+
|
58 |
+
dataset = load_dataset("food101", split="train[:5000]")
|
59 |
+
Downloading builder script: 0%| | 0.00/6.21k [00:00<?, ?B/s]
|
60 |
+
Downloading metadata: 0%| | 0.00/5.56k [00:00<?, ?B/s]
|
61 |
+
Downloading readme: 0%| | 0.00/10.3k [00:00<?, ?B/s]
|
62 |
+
Downloading and preparing dataset food101/default to /root/.cache/huggingface/datasets/food101/default/0.0.0/7cebe41a80fb2da3f08fcbef769c8874073a86346f7fb96dc0847d4dfc318295...
|
63 |
+
Downloading data: 0%| | 0.00/5.00G [00:00<?, ?B/s]
|
64 |
+
WARNING:datasets.download.download_manager:Computing checksums of downloaded files. They can be used for integrity verification. You can disable this by passing ignore_verifications=True to load_dataset
|
65 |
+
Computing checksums: 100%|##########| 1/1 [00:14<00:00, 14.25s/it]
|
66 |
+
Downloading data files: 0%| | 0/2 [00:00<?, ?it/s]
|
67 |
+
Downloading data: 0%| | 0.00/1.47M [00:00<?, ?B/s]
|
68 |
+
Downloading data: 0%| | 0.00/489k [00:00<?, ?B/s]
|
69 |
+
Generating train split: 0%| | 0/75750 [00:00<?, ? examples/s]
|
70 |
+
Generating validation split: 0%| | 0/25250 [00:00<?, ? examples/s]
|
71 |
+
Dataset food101 downloaded and prepared to /root/.cache/huggingface/datasets/food101/default/0.0.0/7cebe41a80fb2da3f08fcbef769c8874073a86346f7fb96dc0847d4dfc318295. Subsequent calls will reuse this data.
|
72 |
+
Prepare datasets for training and evaluation
|
73 |
+
Prepare label2id and id2label dictionaries. This will come in handy when performing inference and for metadata information.
|
74 |
+
labels = dataset.features["label"].names
|
75 |
+
label2id, id2label = dict(), dict()
|
76 |
+
for i, label in enumerate(labels):
|
77 |
+
label2id[label] = i
|
78 |
+
id2label[i] = label
|
79 |
+
|
80 |
+
id2label[2]
|
81 |
+
'baklava'
|
82 |
+
We load the image processor of the model we're fine-tuning.
|
83 |
+
from transformers import AutoImageProcessor
|
84 |
+
|
85 |
+
image_processor = AutoImageProcessor.from_pretrained(model_checkpoint)
|
86 |
+
image_processor
|
87 |
+
Downloading (…)rocessor_config.json: 0%| | 0.00/160 [00:00<?, ?B/s]
|
88 |
+
Downloading (…)lve/main/config.json: 0%| | 0.00/502 [00:00<?, ?B/s]
|
89 |
+
ViTImageProcessor {
|
90 |
+
"do_normalize": true,
|
91 |
+
"do_rescale": true,
|
92 |
+
"do_resize": true,
|
93 |
+
"image_mean": [
|
94 |
+
0.5,
|
95 |
+
0.5,
|
96 |
+
0.5
|
97 |
+
],
|
98 |
+
"image_processor_type": "ViTImageProcessor",
|
99 |
+
"image_std": [
|
100 |
+
0.5,
|
101 |
+
0.5,
|
102 |
+
0.5
|
103 |
+
],
|
104 |
+
"resample": 2,
|
105 |
+
"rescale_factor": 0.00392156862745098,
|
106 |
+
"size": {
|
107 |
+
"height": 224,
|
108 |
+
"width": 224
|
109 |
+
}
|
110 |
+
}
|
111 |
+
As one might notice, the image_processor has useful information on which size the training and evaluation images should be resized, stats that should be used to normalize the pixel values, etc.
|
112 |
+
|
113 |
+
Using the image processor we prepare transformation functions for the datasets. These functions will include augmentation and pixel scaling.
|
114 |
+
from torchvision.transforms import (
|
115 |
+
CenterCrop,
|
116 |
+
Compose,
|
117 |
+
Normalize,
|
118 |
+
RandomHorizontalFlip,
|
119 |
+
RandomResizedCrop,
|
120 |
+
Resize,
|
121 |
+
ToTensor,
|
122 |
+
)
|
123 |
+
|
124 |
+
normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std)
|
125 |
+
train_transforms = Compose(
|
126 |
+
[
|
127 |
+
RandomResizedCrop(image_processor.size["height"]),
|
128 |
+
RandomHorizontalFlip(),
|
129 |
+
ToTensor(),
|
130 |
+
normalize,
|
131 |
+
]
|
132 |
+
)
|
133 |
+
|
134 |
+
val_transforms = Compose(
|
135 |
+
[
|
136 |
+
Resize(image_processor.size["height"]),
|
137 |
+
CenterCrop(image_processor.size["height"]),
|
138 |
+
ToTensor(),
|
139 |
+
normalize,
|
140 |
+
]
|
141 |
+
)
|
142 |
+
|
143 |
+
|
144 |
+
def preprocess_train(example_batch):
|
145 |
+
"""Apply train_transforms across a batch."""
|
146 |
+
example_batch["pixel_values"] = [train_transforms(image.convert("RGB")) for image in example_batch["image"]]
|
147 |
+
return example_batch
|
148 |
+
|
149 |
+
|
150 |
+
def preprocess_val(example_batch):
|
151 |
+
"""Apply val_transforms across a batch."""
|
152 |
+
example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]]
|
153 |
+
return example_batch
|
154 |
+
We split our mini dataset into training and validation.
|
155 |
+
# split up training into training + validation
|
156 |
+
splits = dataset.train_test_split(test_size=0.1)
|
157 |
+
train_ds = splits["train"]
|
158 |
+
val_ds = splits["test"]
|
159 |
+
We set the transformation functions to the datasets accordingly.
|
160 |
+
train_ds.set_transform(preprocess_train)
|
161 |
+
val_ds.set_transform(preprocess_val)
|
162 |
+
Load and prepare a model
|
163 |
+
In this section, we first load the model we want to fine-tune.
|
164 |
+
|
165 |
+
def print_trainable_parameters(model):
|
166 |
+
"""
|
167 |
+
Prints the number of trainable parameters in the model.
|
168 |
+
"""
|
169 |
+
trainable_params = 0
|
170 |
+
all_param = 0
|
171 |
+
for _, param in model.named_parameters():
|
172 |
+
all_param += param.numel()
|
173 |
+
if param.requires_grad:
|
174 |
+
trainable_params += param.numel()
|
175 |
+
print(
|
176 |
+
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}"
|
177 |
+
)
|
178 |
+
The get_peft_model() method that we will use in a moment wraps the original model to be fine-tuned as a PeftModel. So, it's important for us to initialize the original model correctly. As such, we initialize it by specifying the label2id and id2label so that AutoModelForImageClassification can initialize a append classification head to the underlying model, adapted for our dataset. We can confirm this from the warning below:
|
179 |
+
|
180 |
+
Some weights of ViTForImageClassification were not initialized from the model checkpoint at google/vit-base-patch16-224-in21k and are newly initialized: ['classifier.weight', 'classifier.bias']
|
181 |
+
from transformers import AutoModelForImageClassification, TrainingArguments, Trainer
|
182 |
+
|
183 |
+
model = AutoModelForImageClassification.from_pretrained(
|
184 |
+
model_checkpoint,
|
185 |
+
label2id=label2id,
|
186 |
+
id2label=id2label,
|
187 |
+
ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint
|
188 |
+
)
|
189 |
+
print_trainable_parameters(model)
|
190 |
+
Downloading (…)"pytorch_model.bin";: 0%| | 0.00/346M [00:00<?, ?B/s]
|
191 |
+
Some weights of the model checkpoint at google/vit-base-patch16-224-in21k were not used when initializing ViTForImageClassification: ['pooler.dense.weight', 'pooler.dense.bias']
|
192 |
+
- This IS expected if you are initializing ViTForImageClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
|
193 |
+
- This IS NOT expected if you are initializing ViTForImageClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
|
194 |
+
Some weights of ViTForImageClassification were not initialized from the model checkpoint at google/vit-base-patch16-224-in21k and are newly initialized: ['classifier.bias', 'classifier.weight']
|
195 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
196 |
+
trainable params: 85876325 || all params: 85876325 || trainable%: 100.00
|
197 |
+
Also, take note of the number of total trainable parameters of model: it's 100%! We'll compare this number to that of the LoRA model.
|
198 |
+
|
199 |
+
We now use the PeftModel to wrap model so that the "update" matrices are added to the respective places.
|
200 |
+
|
201 |
+
from peft import LoraConfig, get_peft_model
|
202 |
+
|
203 |
+
config = LoraConfig(
|
204 |
+
r=16,
|
205 |
+
lora_alpha=16,
|
206 |
+
target_modules=["query", "value"],
|
207 |
+
lora_dropout=0.1,
|
208 |
+
bias="none",
|
209 |
+
modules_to_save=["classifier"],
|
210 |
+
)
|
211 |
+
lora_model = get_peft_model(model, config)
|
212 |
+
print_trainable_parameters(lora_model)
|
213 |
+
trainable params: 667493 || all params: 86466149 || trainable%: 0.77
|
214 |
+
Let's unpack what's going on here.
|
215 |
+
|
216 |
+
In order for LoRA to take effect, we need to specify the target modules to LoraConfig so that get_peft_model() knows which modules inside our model needs to be amended with LoRA matrices. In this case, we're only interested in targetting the query and value matrices of the attention blocks of the base model. Since the parameters corresponding to these matrices are "named" with query and value respectively, we specify them accordingly in the target_modules argument of LoraConfig.
|
217 |
+
|
218 |
+
We also specify modules_to_save. After we wrap our base model model with get_peft_model() along with the config, we get a new model where only the LoRA parameters are trainable (so-called "update matrices") while the pre-trained parameters are kept frozen. These include the parameters of the randomly initialized classifier parameters too. This is NOT we want when fine-tuning the base model on our custom dataset. To ensure that the classifier parameters are also trained, we specify modules_to_save. This also ensures that these modules are serialized alongside the LoRA trainable parameters when using utilities like save_pretrained() and push_to_hub().
|
219 |
+
|
220 |
+
Regarding the other parameters:
|
221 |
+
|
222 |
+
r: The dimension used by the LoRA update matrices.
|
223 |
+
alpha: Scaling factor.
|
224 |
+
bias: Specifying if the bias parameters should be trained. None denotes none of the bias parameters will be trained.
|
225 |
+
r and alpha together control the total number of final trainable parameters when using LoRA giving us the flexbility to balance a trade-off between end performance and compute efficiency.
|
226 |
+
|
227 |
+
We can also how many parameters we're actually training. Since we're interested in performing parameter-efficient fine-tuning, we should expect to notice a less number of trainable parameters from the lora_model in comparison to the original model which is indeed the case here.
|
228 |
+
|
229 |
+
Training arguments
|
230 |
+
We will leverage 🤗 Trainer for fine-tuning. It accepts several arguments which we wrap using TrainingArguments.
|
231 |
+
|
232 |
+
from transformers import TrainingArguments, Trainer
|
233 |
+
|
234 |
+
|
235 |
+
model_name = model_checkpoint.split("/")[-1]
|
236 |
+
batch_size = 128
|
237 |
+
|
238 |
+
args = TrainingArguments(
|
239 |
+
f"{model_name}-finetuned-lora-food101",
|
240 |
+
remove_unused_columns=False,
|
241 |
+
evaluation_strategy="epoch",
|
242 |
+
save_strategy="epoch",
|
243 |
+
learning_rate=5e-3,
|
244 |
+
per_device_train_batch_size=batch_size,
|
245 |
+
gradient_accumulation_steps=4,
|
246 |
+
per_device_eval_batch_size=batch_size,
|
247 |
+
fp16=True,
|
248 |
+
num_train_epochs=5,
|
249 |
+
logging_steps=10,
|
250 |
+
load_best_model_at_end=True,
|
251 |
+
metric_for_best_model="accuracy",
|
252 |
+
push_to_hub=True,
|
253 |
+
label_names=["labels"],
|
254 |
+
)
|
255 |
+
Some things to note here:
|
256 |
+
|
257 |
+
We're using a larger batch size since there is only a handful of parameters to train.
|
258 |
+
Larger learning rate than the normal (1e-5 for example).
|
259 |
+
All of these things are a byproduct of the fact that we're training only a small number of parameters. This can potentially also reduce the need to conduct expensive hyperparameter tuning experiments.
|
260 |
+
|
261 |
+
Prepare evaluation metric
|
262 |
+
import numpy as np
|
263 |
+
import evaluate
|
264 |
+
|
265 |
+
metric = evaluate.load("accuracy")
|
266 |
+
|
267 |
+
|
268 |
+
# the compute_metrics function takes a Named Tuple as input:
|
269 |
+
# predictions, which are the logits of the model as Numpy arrays,
|
270 |
+
# and label_ids, which are the ground-truth labels as Numpy arrays.
|
271 |
+
def compute_metrics(eval_pred):
|
272 |
+
"""Computes accuracy on a batch of predictions"""
|
273 |
+
predictions = np.argmax(eval_pred.predictions, axis=1)
|
274 |
+
return metric.compute(predictions=predictions, references=eval_pred.label_ids)
|
275 |
+
Downloading builder script: 0%| | 0.00/4.20k [00:00<?, ?B/s]
|
276 |
+
Collation function
|
277 |
+
This is used by Trainer to gather a batch of training and evaluation examples and prepare them in a format that is acceptable by the underlying model.
|
278 |
+
|
279 |
+
import torch
|
280 |
+
|
281 |
+
|
282 |
+
def collate_fn(examples):
|
283 |
+
pixel_values = torch.stack([example["pixel_values"] for example in examples])
|
284 |
+
labels = torch.tensor([example["label"] for example in examples])
|
285 |
+
return {"pixel_values": pixel_values, "labels": labels}
|
286 |
+
Train and evaluate
|
287 |
+
trainer = Trainer(
|
288 |
+
model,
|
289 |
+
args,
|
290 |
+
train_dataset=train_ds,
|
291 |
+
eval_dataset=val_ds,
|
292 |
+
tokenizer=image_processor,
|
293 |
+
compute_metrics=compute_metrics,
|
294 |
+
data_collator=collate_fn,
|
295 |
+
)
|
296 |
+
train_results = trainer.train()
|
297 |
+
Cloning https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101 into local empty directory.
|
298 |
+
WARNING:huggingface_hub.repository:Cloning https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101 into local empty directory.
|
299 |
+
Download file pytorch_model.bin: 0%| | 8.00k/330M [00:00<?, ?B/s]
|
300 |
+
Download file runs/Feb07_02-43-38_319afa680fd7/1675737843.2328734/events.out.tfevents.1675737843.319afa680fd7.…
|
301 |
+
Download file runs/Feb07_02-50-30_319afa680fd7/events.out.tfevents.1675738246.319afa680fd7.10047.0: 100%|#####…
|
302 |
+
Download file runs/Feb07_03-56-51_319afa680fd7/1675742273.001745/events.out.tfevents.1675742273.319afa680fd7.2…
|
303 |
+
Download file training_args.bin: 100%|##########| 3.50k/3.50k [00:00<?, ?B/s]
|
304 |
+
Download file runs/Feb07_03-56-51_319afa680fd7/events.out.tfevents.1675742272.319afa680fd7.27769.0: 100%|#####…
|
305 |
+
Clean file runs/Feb07_02-43-38_319afa680fd7/1675737843.2328734/events.out.tfevents.1675737843.319afa680fd7.718…
|
306 |
+
Download file runs/Feb07_02-50-30_319afa680fd7/1675738246.1183074/events.out.tfevents.1675738246.319afa680fd7.…
|
307 |
+
Clean file runs/Feb07_02-50-30_319afa680fd7/events.out.tfevents.1675738246.319afa680fd7.10047.0: 10%|# …
|
308 |
+
Download file runs/Feb07_02-43-38_319afa680fd7/events.out.tfevents.1675737843.319afa680fd7.7189.0: 100%|######…
|
309 |
+
Clean file runs/Feb07_03-56-51_319afa680fd7/1675742273.001745/events.out.tfevents.1675742273.319afa680fd7.2776…
|
310 |
+
Clean file training_args.bin: 29%|##8 | 1.00k/3.50k [00:00<?, ?B/s]
|
311 |
+
Clean file runs/Feb07_03-56-51_319afa680fd7/events.out.tfevents.1675742272.319afa680fd7.27769.0: 9%|9 …
|
312 |
+
Clean file runs/Feb07_02-50-30_319afa680fd7/1675738246.1183074/events.out.tfevents.1675738246.319afa680fd7.100…
|
313 |
+
Clean file runs/Feb07_02-43-38_319afa680fd7/events.out.tfevents.1675737843.319afa680fd7.7189.0: 10%|# …
|
314 |
+
Download file runs/Feb07_02-50-30_319afa680fd7/events.out.tfevents.1675738403.319afa680fd7.10047.2: 100%|#####…
|
315 |
+
Clean file runs/Feb07_02-50-30_319afa680fd7/events.out.tfevents.1675738403.319afa680fd7.10047.2: 100%|########…
|
316 |
+
Clean file pytorch_model.bin: 0%| | 1.00k/330M [00:00<?, ?B/s]
|
317 |
+
Using cuda_amp half precision backend
|
318 |
+
/usr/local/lib/python3.8/dist-packages/transformers/optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning
|
319 |
+
warnings.warn(
|
320 |
+
***** Running training *****
|
321 |
+
Num examples = 4500
|
322 |
+
Num Epochs = 5
|
323 |
+
Instantaneous batch size per device = 128
|
324 |
+
Total train batch size (w. parallel, distributed & accumulation) = 512
|
325 |
+
Gradient Accumulation steps = 4
|
326 |
+
Total optimization steps = 45
|
327 |
+
Number of trainable parameters = 667493
|
328 |
+
[45/45 04:44, Epoch 5/5]
|
329 |
+
Epoch Training Loss Validation Loss Accuracy
|
330 |
+
1 No log 0.506871 0.896000
|
331 |
+
2 2.162700 0.189141 0.946000
|
332 |
+
3 0.345100 0.144759 0.960000
|
333 |
+
4 0.211600 0.150886 0.958000
|
334 |
+
5 0.171100 0.149751 0.958000
|
335 |
+
|
336 |
+
***** Running Evaluation *****
|
337 |
+
Num examples = 500
|
338 |
+
Batch size = 128
|
339 |
+
Saving model checkpoint to vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-9
|
340 |
+
Configuration saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-9/config.json
|
341 |
+
Model weights saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-9/pytorch_model.bin
|
342 |
+
Image processor saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-9/preprocessor_config.json
|
343 |
+
Image processor saved in vit-base-patch16-224-in21k-finetuned-lora-food101/preprocessor_config.json
|
344 |
+
***** Running Evaluation *****
|
345 |
+
Num examples = 500
|
346 |
+
Batch size = 128
|
347 |
+
Saving model checkpoint to vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-18
|
348 |
+
Configuration saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-18/config.json
|
349 |
+
Model weights saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-18/pytorch_model.bin
|
350 |
+
Image processor saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-18/preprocessor_config.json
|
351 |
+
Image processor saved in vit-base-patch16-224-in21k-finetuned-lora-food101/preprocessor_config.json
|
352 |
+
***** Running Evaluation *****
|
353 |
+
Num examples = 500
|
354 |
+
Batch size = 128
|
355 |
+
Saving model checkpoint to vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-27
|
356 |
+
Configuration saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-27/config.json
|
357 |
+
Model weights saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-27/pytorch_model.bin
|
358 |
+
Image processor saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-27/preprocessor_config.json
|
359 |
+
Image processor saved in vit-base-patch16-224-in21k-finetuned-lora-food101/preprocessor_config.json
|
360 |
+
***** Running Evaluation *****
|
361 |
+
Num examples = 500
|
362 |
+
Batch size = 128
|
363 |
+
Saving model checkpoint to vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-36
|
364 |
+
Configuration saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-36/config.json
|
365 |
+
Model weights saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-36/pytorch_model.bin
|
366 |
+
Image processor saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-36/preprocessor_config.json
|
367 |
+
Image processor saved in vit-base-patch16-224-in21k-finetuned-lora-food101/preprocessor_config.json
|
368 |
+
***** Running Evaluation *****
|
369 |
+
Num examples = 500
|
370 |
+
Batch size = 128
|
371 |
+
Saving model checkpoint to vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-45
|
372 |
+
Configuration saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-45/config.json
|
373 |
+
Model weights saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-45/pytorch_model.bin
|
374 |
+
Image processor saved in vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-45/preprocessor_config.json
|
375 |
+
Image processor saved in vit-base-patch16-224-in21k-finetuned-lora-food101/preprocessor_config.json
|
376 |
+
|
377 |
+
|
378 |
+
Training completed. Do not forget to share your model on huggingface.co/models =)
|
379 |
+
|
380 |
+
|
381 |
+
Loading best model from vit-base-patch16-224-in21k-finetuned-lora-food101/checkpoint-27 (score: 0.96).
|
382 |
+
In just a few minutes, we have a fine-tuned model with 96% validation accuracy. Also, note that we used a very small subset of the training dataset which is definitely impacting the results.
|
383 |
+
|
384 |
+
trainer.evaluate(val_ds)
|
385 |
+
***** Running Evaluation *****
|
386 |
+
Num examples = 500
|
387 |
+
Batch size = 128
|
388 |
+
[4/4 01:48]
|
389 |
+
{'eval_loss': 0.14475855231285095,
|
390 |
+
'eval_accuracy': 0.96,
|
391 |
+
'eval_runtime': 3.5725,
|
392 |
+
'eval_samples_per_second': 139.958,
|
393 |
+
'eval_steps_per_second': 1.12,
|
394 |
+
'epoch': 5.0}
|
395 |
+
Sharing your model and inference
|
396 |
+
Once the fine-tuning is done, we can share the LoRA parameters with the community like so:
|
397 |
+
|
398 |
+
repo_name = f"sayakpaul/{model_name}-finetuned-lora-food101"
|
399 |
+
lora_model.push_to_hub(repo_name)
|
400 |
+
Uploading the following files to sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101: adapter_config.json,adapter_model.bin
|
401 |
+
Upload 1 LFS files: 0%| | 0/1 [00:00<?, ?it/s]
|
402 |
+
adapter_model.bin: 0%| | 0.00/2.69M [00:00<?, ?B/s]
|
403 |
+
CommitInfo(commit_url='https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101/commit/64e17d1cda300041cbc67428242a3136060772a3', commit_message='Upload model', commit_description='', oid='64e17d1cda300041cbc67428242a3136060772a3', pr_url=None, pr_revision=None, pr_num=None)
|
404 |
+
When we call push_to_hub() on the lora_model, only the LoRA parameters along with any modules specified in modules_to_save are saved. If we take a look at the trained LoRA parameters, we see that it's only 2.6 MB! This greatly helps with portability especially when we're using a very large model to fine-tune (such as BLOOM).
|
405 |
+
|
406 |
+
Next, we see how to load the LoRA updated parameters along with our base model for inference. When we wrap a base model with PeftModel that modifications are DONE in place. So to mitigate any concerns that might stem from in place modifications, we newly initialize our base model just like we did earlier and construct our inference model.
|
407 |
+
|
408 |
+
from peft import PeftConfig, PeftModel
|
409 |
+
|
410 |
+
|
411 |
+
config = PeftConfig.from_pretrained(repo_name)
|
412 |
+
model = model = AutoModelForImageClassification.from_pretrained(
|
413 |
+
config.base_model_name_or_path,
|
414 |
+
label2id=label2id,
|
415 |
+
id2label=id2label,
|
416 |
+
ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint
|
417 |
+
)
|
418 |
+
# Load the Lora model
|
419 |
+
inference_model = PeftModel.from_pretrained(model, repo_name)
|
420 |
+
loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--google--vit-base-patch16-224-in21k/snapshots/1ba429d32753f33a0660b80ac6f43a3c80c18938/config.json
|
421 |
+
Model config ViTConfig {
|
422 |
+
"_name_or_path": "google/vit-base-patch16-224-in21k",
|
423 |
+
"architectures": [
|
424 |
+
"ViTModel"
|
425 |
+
],
|
426 |
+
"attention_probs_dropout_prob": 0.0,
|
427 |
+
"encoder_stride": 16,
|
428 |
+
"hidden_act": "gelu",
|
429 |
+
"hidden_dropout_prob": 0.0,
|
430 |
+
"hidden_size": 768,
|
431 |
+
"id2label": {
|
432 |
+
"0": "apple_pie",
|
433 |
+
"1": "baby_back_ribs",
|
434 |
+
"2": "baklava",
|
435 |
+
"3": "beef_carpaccio",
|
436 |
+
"4": "beef_tartare",
|
437 |
+
"5": "beet_salad",
|
438 |
+
"6": "beignets",
|
439 |
+
"7": "bibimbap",
|
440 |
+
"8": "bread_pudding",
|
441 |
+
"9": "breakfast_burrito",
|
442 |
+
"10": "bruschetta",
|
443 |
+
"11": "caesar_salad",
|
444 |
+
"12": "cannoli",
|
445 |
+
"13": "caprese_salad",
|
446 |
+
"14": "carrot_cake",
|
447 |
+
"15": "ceviche",
|
448 |
+
"16": "cheesecake",
|
449 |
+
"17": "cheese_plate",
|
450 |
+
"18": "chicken_curry",
|
451 |
+
"19": "chicken_quesadilla",
|
452 |
+
"20": "chicken_wings",
|
453 |
+
"21": "chocolate_cake",
|
454 |
+
"22": "chocolate_mousse",
|
455 |
+
"23": "churros",
|
456 |
+
"24": "clam_chowder",
|
457 |
+
"25": "club_sandwich",
|
458 |
+
"26": "crab_cakes",
|
459 |
+
"27": "creme_brulee",
|
460 |
+
"28": "croque_madame",
|
461 |
+
"29": "cup_cakes",
|
462 |
+
"30": "deviled_eggs",
|
463 |
+
"31": "donuts",
|
464 |
+
"32": "dumplings",
|
465 |
+
"33": "edamame",
|
466 |
+
"34": "eggs_benedict",
|
467 |
+
"35": "escargots",
|
468 |
+
"36": "falafel",
|
469 |
+
"37": "filet_mignon",
|
470 |
+
"38": "fish_and_chips",
|
471 |
+
"39": "foie_gras",
|
472 |
+
"40": "french_fries",
|
473 |
+
"41": "french_onion_soup",
|
474 |
+
"42": "french_toast",
|
475 |
+
"43": "fried_calamari",
|
476 |
+
"44": "fried_rice",
|
477 |
+
"45": "frozen_yogurt",
|
478 |
+
"46": "garlic_bread",
|
479 |
+
"47": "gnocchi",
|
480 |
+
"48": "greek_salad",
|
481 |
+
"49": "grilled_cheese_sandwich",
|
482 |
+
"50": "grilled_salmon",
|
483 |
+
"51": "guacamole",
|
484 |
+
"52": "gyoza",
|
485 |
+
"53": "hamburger",
|
486 |
+
"54": "hot_and_sour_soup",
|
487 |
+
"55": "hot_dog",
|
488 |
+
"56": "huevos_rancheros",
|
489 |
+
"57": "hummus",
|
490 |
+
"58": "ice_cream",
|
491 |
+
"59": "lasagna",
|
492 |
+
"60": "lobster_bisque",
|
493 |
+
"61": "lobster_roll_sandwich",
|
494 |
+
"62": "macaroni_and_cheese",
|
495 |
+
"63": "macarons",
|
496 |
+
"64": "miso_soup",
|
497 |
+
"65": "mussels",
|
498 |
+
"66": "nachos",
|
499 |
+
"67": "omelette",
|
500 |
+
"68": "onion_rings",
|
501 |
+
"69": "oysters",
|
502 |
+
"70": "pad_thai",
|
503 |
+
"71": "paella",
|
504 |
+
"72": "pancakes",
|
505 |
+
"73": "panna_cotta",
|
506 |
+
"74": "peking_duck",
|
507 |
+
"75": "pho",
|
508 |
+
"76": "pizza",
|
509 |
+
"77": "pork_chop",
|
510 |
+
"78": "poutine",
|
511 |
+
"79": "prime_rib",
|
512 |
+
"80": "pulled_pork_sandwich",
|
513 |
+
"81": "ramen",
|
514 |
+
"82": "ravioli",
|
515 |
+
"83": "red_velvet_cake",
|
516 |
+
"84": "risotto",
|
517 |
+
"85": "samosa",
|
518 |
+
"86": "sashimi",
|
519 |
+
"87": "scallops",
|
520 |
+
"88": "seaweed_salad",
|
521 |
+
"89": "shrimp_and_grits",
|
522 |
+
"90": "spaghetti_bolognese",
|
523 |
+
"91": "spaghetti_carbonara",
|
524 |
+
"92": "spring_rolls",
|
525 |
+
"93": "steak",
|
526 |
+
"94": "strawberry_shortcake",
|
527 |
+
"95": "sushi",
|
528 |
+
"96": "tacos",
|
529 |
+
"97": "takoyaki",
|
530 |
+
"98": "tiramisu",
|
531 |
+
"99": "tuna_tartare",
|
532 |
+
"100": "waffles"
|
533 |
+
},
|
534 |
+
"image_size": 224,
|
535 |
+
"initializer_range": 0.02,
|
536 |
+
"intermediate_size": 3072,
|
537 |
+
"label2id": {
|
538 |
+
"apple_pie": 0,
|
539 |
+
"baby_back_ribs": 1,
|
540 |
+
"baklava": 2,
|
541 |
+
"beef_carpaccio": 3,
|
542 |
+
"beef_tartare": 4,
|
543 |
+
"beet_salad": 5,
|
544 |
+
"beignets": 6,
|
545 |
+
"bibimbap": 7,
|
546 |
+
"bread_pudding": 8,
|
547 |
+
"breakfast_burrito": 9,
|
548 |
+
"bruschetta": 10,
|
549 |
+
"caesar_salad": 11,
|
550 |
+
"cannoli": 12,
|
551 |
+
"caprese_salad": 13,
|
552 |
+
"carrot_cake": 14,
|
553 |
+
"ceviche": 15,
|
554 |
+
"cheese_plate": 17,
|
555 |
+
"cheesecake": 16,
|
556 |
+
"chicken_curry": 18,
|
557 |
+
"chicken_quesadilla": 19,
|
558 |
+
"chicken_wings": 20,
|
559 |
+
"chocolate_cake": 21,
|
560 |
+
"chocolate_mousse": 22,
|
561 |
+
"churros": 23,
|
562 |
+
"clam_chowder": 24,
|
563 |
+
"club_sandwich": 25,
|
564 |
+
"crab_cakes": 26,
|
565 |
+
"creme_brulee": 27,
|
566 |
+
"croque_madame": 28,
|
567 |
+
"cup_cakes": 29,
|
568 |
+
"deviled_eggs": 30,
|
569 |
+
"donuts": 31,
|
570 |
+
"dumplings": 32,
|
571 |
+
"edamame": 33,
|
572 |
+
"eggs_benedict": 34,
|
573 |
+
"escargots": 35,
|
574 |
+
"falafel": 36,
|
575 |
+
"filet_mignon": 37,
|
576 |
+
"fish_and_chips": 38,
|
577 |
+
"foie_gras": 39,
|
578 |
+
"french_fries": 40,
|
579 |
+
"french_onion_soup": 41,
|
580 |
+
"french_toast": 42,
|
581 |
+
"fried_calamari": 43,
|
582 |
+
"fried_rice": 44,
|
583 |
+
"frozen_yogurt": 45,
|
584 |
+
"garlic_bread": 46,
|
585 |
+
"gnocchi": 47,
|
586 |
+
"greek_salad": 48,
|
587 |
+
"grilled_cheese_sandwich": 49,
|
588 |
+
"grilled_salmon": 50,
|
589 |
+
"guacamole": 51,
|
590 |
+
"gyoza": 52,
|
591 |
+
"hamburger": 53,
|
592 |
+
"hot_and_sour_soup": 54,
|
593 |
+
"hot_dog": 55,
|
594 |
+
"huevos_rancheros": 56,
|
595 |
+
"hummus": 57,
|
596 |
+
"ice_cream": 58,
|
597 |
+
"lasagna": 59,
|
598 |
+
"lobster_bisque": 60,
|
599 |
+
"lobster_roll_sandwich": 61,
|
600 |
+
"macaroni_and_cheese": 62,
|
601 |
+
"macarons": 63,
|
602 |
+
"miso_soup": 64,
|
603 |
+
"mussels": 65,
|
604 |
+
"nachos": 66,
|
605 |
+
"omelette": 67,
|
606 |
+
"onion_rings": 68,
|
607 |
+
"oysters": 69,
|
608 |
+
"pad_thai": 70,
|
609 |
+
"paella": 71,
|
610 |
+
"pancakes": 72,
|
611 |
+
"panna_cotta": 73,
|
612 |
+
"peking_duck": 74,
|
613 |
+
"pho": 75,
|
614 |
+
"pizza": 76,
|
615 |
+
"pork_chop": 77,
|
616 |
+
"poutine": 78,
|
617 |
+
"prime_rib": 79,
|
618 |
+
"pulled_pork_sandwich": 80,
|
619 |
+
"ramen": 81,
|
620 |
+
"ravioli": 82,
|
621 |
+
"red_velvet_cake": 83,
|
622 |
+
"risotto": 84,
|
623 |
+
"samosa": 85,
|
624 |
+
"sashimi": 86,
|
625 |
+
"scallops": 87,
|
626 |
+
"seaweed_salad": 88,
|
627 |
+
"shrimp_and_grits": 89,
|
628 |
+
"spaghetti_bolognese": 90,
|
629 |
+
"spaghetti_carbonara": 91,
|
630 |
+
"spring_rolls": 92,
|
631 |
+
"steak": 93,
|
632 |
+
"strawberry_shortcake": 94,
|
633 |
+
"sushi": 95,
|
634 |
+
"tacos": 96,
|
635 |
+
"takoyaki": 97,
|
636 |
+
"tiramisu": 98,
|
637 |
+
"tuna_tartare": 99,
|
638 |
+
"waffles": 100
|
639 |
+
},
|
640 |
+
"layer_norm_eps": 1e-12,
|
641 |
+
"model_type": "vit",
|
642 |
+
"num_attention_heads": 12,
|
643 |
+
"num_channels": 3,
|
644 |
+
"num_hidden_layers": 12,
|
645 |
+
"patch_size": 16,
|
646 |
+
"qkv_bias": true,
|
647 |
+
"transformers_version": "4.26.0"
|
648 |
+
}
|
649 |
+
|
650 |
+
loading weights file pytorch_model.bin from cache at /root/.cache/huggingface/hub/models--google--vit-base-patch16-224-in21k/snapshots/1ba429d32753f33a0660b80ac6f43a3c80c18938/pytorch_model.bin
|
651 |
+
Some weights of the model checkpoint at google/vit-base-patch16-224-in21k were not used when initializing ViTForImageClassification: ['pooler.dense.weight', 'pooler.dense.bias']
|
652 |
+
- This IS expected if you are initializing ViTForImageClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
|
653 |
+
- This IS NOT expected if you are initializing ViTForImageClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
|
654 |
+
Some weights of ViTForImageClassification were not initialized from the model checkpoint at google/vit-base-patch16-224-in21k and are newly initialized: ['classifier.bias', 'classifier.weight']
|
655 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
656 |
+
Downloading (…)"adapter_model.bin";: 0%| | 0.00/2.69M [00:00<?, ?B/s]
|
657 |
+
Don't worry about the warnings, they're harmless.
|
658 |
+
|
659 |
+
Let's now fetch a sample for inference.
|
660 |
+
|
661 |
+
from PIL import Image
|
662 |
+
import requests
|
663 |
+
|
664 |
+
url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/beignets.jpeg"
|
665 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
666 |
+
image
|
667 |
+
|
668 |
+
We first instantiate an image_processor from the underlying model repo.
|
669 |
+
|
670 |
+
image_processor = AutoImageProcessor.from_pretrained(repo_name)
|
671 |
+
loading configuration file preprocessor_config.json from cache at /root/.cache/huggingface/hub/models--sayakpaul--vit-base-patch16-224-in21k-finetuned-lora-food101/snapshots/fa2503cc7d91e0dd69728c1dc66ed80d7bd3289b/preprocessor_config.json
|
672 |
+
Image processor ViTImageProcessor {
|
673 |
+
"do_normalize": true,
|
674 |
+
"do_rescale": true,
|
675 |
+
"do_resize": true,
|
676 |
+
"image_mean": [
|
677 |
+
0.5,
|
678 |
+
0.5,
|
679 |
+
0.5
|
680 |
+
],
|
681 |
+
"image_processor_type": "ViTImageProcessor",
|
682 |
+
"image_std": [
|
683 |
+
0.5,
|
684 |
+
0.5,
|
685 |
+
0.5
|
686 |
+
],
|
687 |
+
"resample": 2,
|
688 |
+
"rescale_factor": 0.00392156862745098,
|
689 |
+
"size": {
|
690 |
+
"height": 224,
|
691 |
+
"width": 224
|
692 |
+
}
|
693 |
+
}
|
694 |
+
|
695 |
+
We then prepare the sample for inference.
|
696 |
+
|
697 |
+
# prepare image for the model
|
698 |
+
encoding = image_processor(image.convert("RGB"), return_tensors="pt")
|
699 |
+
print(encoding.pixel_values.shape)
|
700 |
+
torch.Size([1, 3, 224, 224])
|
701 |
+
And run inference!
|
702 |
+
|
703 |
+
import torch
|
704 |
+
|
705 |
+
# forward pass
|
706 |
+
with torch.no_grad():
|
707 |
+
outputs = inference_model(**encoding)
|
708 |
+
logits = outputs.logits
|
709 |
+
|
710 |
+
predicted_class_idx = logits.argmax(-1).item()
|
711 |
+
print("Predicted class:", inference_model.config.id2label[predicted_class_idx])
|
712 |
+
Predicted class: beignets
|