Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- README.md +46 -0
- adapter_config.json +34 -0
- adapter_model.safetensors +3 -0
- added_tokens.json +12 -0
- merges.txt +0 -0
- runs/May01_22-53-54_r-dewvinci-joey-bot-abpg1at2-e211f-kicn8/events.out.tfevents.1746140089.r-dewvinci-joey-bot-abpg1at2-e211f-kicn8.81.0 +2 -2
- special_tokens_map.json +30 -0
- tokenizer.json +3 -0
- tokenizer_config.json +112 -0
- training_args.bin +3 -0
- training_params.json +49 -0
- vocab.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- autotrain
|
4 |
+
- text-generation-inference
|
5 |
+
- text-generation
|
6 |
+
- peft
|
7 |
+
library_name: transformers
|
8 |
+
base_model: microsoft/Phi-4-mini-instruct
|
9 |
+
widget:
|
10 |
+
- messages:
|
11 |
+
- role: user
|
12 |
+
content: What is your favorite condiment?
|
13 |
+
license: other
|
14 |
+
---
|
15 |
+
|
16 |
+
# Model Trained Using AutoTrain
|
17 |
+
|
18 |
+
This model was trained using AutoTrain. For more information, please visit [AutoTrain](https://hf.co/docs/autotrain).
|
19 |
+
|
20 |
+
# Usage
|
21 |
+
|
22 |
+
```python
|
23 |
+
|
24 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
25 |
+
|
26 |
+
model_path = "PATH_TO_THIS_REPO"
|
27 |
+
|
28 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
29 |
+
model = AutoModelForCausalLM.from_pretrained(
|
30 |
+
model_path,
|
31 |
+
device_map="auto",
|
32 |
+
torch_dtype='auto'
|
33 |
+
).eval()
|
34 |
+
|
35 |
+
# Prompt content: "hi"
|
36 |
+
messages = [
|
37 |
+
{"role": "user", "content": "hi"}
|
38 |
+
]
|
39 |
+
|
40 |
+
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
|
41 |
+
output_ids = model.generate(input_ids.to('cuda'))
|
42 |
+
response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
|
43 |
+
|
44 |
+
# Model response: "Hello! How can I assist you today?"
|
45 |
+
print(response)
|
46 |
+
```
|
adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "microsoft/Phi-4-mini-instruct",
|
5 |
+
"bias": "none",
|
6 |
+
"eva_config": null,
|
7 |
+
"exclude_modules": null,
|
8 |
+
"fan_in_fan_out": false,
|
9 |
+
"inference_mode": true,
|
10 |
+
"init_lora_weights": true,
|
11 |
+
"layer_replication": null,
|
12 |
+
"layers_pattern": null,
|
13 |
+
"layers_to_transform": null,
|
14 |
+
"loftq_config": {},
|
15 |
+
"lora_alpha": 32,
|
16 |
+
"lora_bias": false,
|
17 |
+
"lora_dropout": 0.05,
|
18 |
+
"megatron_config": null,
|
19 |
+
"megatron_core": "megatron.core",
|
20 |
+
"modules_to_save": null,
|
21 |
+
"peft_type": "LORA",
|
22 |
+
"r": 16,
|
23 |
+
"rank_pattern": {},
|
24 |
+
"revision": null,
|
25 |
+
"target_modules": [
|
26 |
+
"qkv_proj",
|
27 |
+
"gate_up_proj",
|
28 |
+
"down_proj",
|
29 |
+
"o_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": false
|
34 |
+
}
|
adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:183a613d1c8931af67172efdc790f7ad1e73cc476b9d7d9bc3f2aa5cfb1ce99d
|
3 |
+
size 5008223128
|
added_tokens.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<|/tool_call|>": 200026,
|
3 |
+
"<|/tool|>": 200024,
|
4 |
+
"<|assistant|>": 200019,
|
5 |
+
"<|end|>": 200020,
|
6 |
+
"<|system|>": 200022,
|
7 |
+
"<|tag|>": 200028,
|
8 |
+
"<|tool_call|>": 200025,
|
9 |
+
"<|tool_response|>": 200027,
|
10 |
+
"<|tool|>": 200023,
|
11 |
+
"<|user|>": 200021
|
12 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
runs/May01_22-53-54_r-dewvinci-joey-bot-abpg1at2-e211f-kicn8/events.out.tfevents.1746140089.r-dewvinci-joey-bot-abpg1at2-e211f-kicn8.81.0
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3cf30e18c30f2533ae0fa17ef41900635fd193884a57f8848694dd411b85a8e1
|
3 |
+
size 9460
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|endoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<|endoftext|>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<|endoftext|>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:382cc235b56c725945e149cc25f191da667c836655efd0857b004320e90e91ea
|
3 |
+
size 15524095
|
tokenizer_config.json
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": false,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"199999": {
|
7 |
+
"content": "<|endoftext|>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"200018": {
|
15 |
+
"content": "<|endofprompt|>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
},
|
22 |
+
"200019": {
|
23 |
+
"content": "<|assistant|>",
|
24 |
+
"lstrip": false,
|
25 |
+
"normalized": false,
|
26 |
+
"rstrip": true,
|
27 |
+
"single_word": false,
|
28 |
+
"special": true
|
29 |
+
},
|
30 |
+
"200020": {
|
31 |
+
"content": "<|end|>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": true,
|
35 |
+
"single_word": false,
|
36 |
+
"special": true
|
37 |
+
},
|
38 |
+
"200021": {
|
39 |
+
"content": "<|user|>",
|
40 |
+
"lstrip": false,
|
41 |
+
"normalized": false,
|
42 |
+
"rstrip": true,
|
43 |
+
"single_word": false,
|
44 |
+
"special": true
|
45 |
+
},
|
46 |
+
"200022": {
|
47 |
+
"content": "<|system|>",
|
48 |
+
"lstrip": false,
|
49 |
+
"normalized": false,
|
50 |
+
"rstrip": true,
|
51 |
+
"single_word": false,
|
52 |
+
"special": true
|
53 |
+
},
|
54 |
+
"200023": {
|
55 |
+
"content": "<|tool|>",
|
56 |
+
"lstrip": false,
|
57 |
+
"normalized": false,
|
58 |
+
"rstrip": true,
|
59 |
+
"single_word": false,
|
60 |
+
"special": false
|
61 |
+
},
|
62 |
+
"200024": {
|
63 |
+
"content": "<|/tool|>",
|
64 |
+
"lstrip": false,
|
65 |
+
"normalized": false,
|
66 |
+
"rstrip": true,
|
67 |
+
"single_word": false,
|
68 |
+
"special": false
|
69 |
+
},
|
70 |
+
"200025": {
|
71 |
+
"content": "<|tool_call|>",
|
72 |
+
"lstrip": false,
|
73 |
+
"normalized": false,
|
74 |
+
"rstrip": true,
|
75 |
+
"single_word": false,
|
76 |
+
"special": false
|
77 |
+
},
|
78 |
+
"200026": {
|
79 |
+
"content": "<|/tool_call|>",
|
80 |
+
"lstrip": false,
|
81 |
+
"normalized": false,
|
82 |
+
"rstrip": true,
|
83 |
+
"single_word": false,
|
84 |
+
"special": false
|
85 |
+
},
|
86 |
+
"200027": {
|
87 |
+
"content": "<|tool_response|>",
|
88 |
+
"lstrip": false,
|
89 |
+
"normalized": false,
|
90 |
+
"rstrip": true,
|
91 |
+
"single_word": false,
|
92 |
+
"special": false
|
93 |
+
},
|
94 |
+
"200028": {
|
95 |
+
"content": "<|tag|>",
|
96 |
+
"lstrip": false,
|
97 |
+
"normalized": false,
|
98 |
+
"rstrip": true,
|
99 |
+
"single_word": false,
|
100 |
+
"special": true
|
101 |
+
}
|
102 |
+
},
|
103 |
+
"bos_token": "<|endoftext|>",
|
104 |
+
"chat_template": "{% for message in messages %}{% if message['role'] == 'system' and 'tools' in message and message['tools'] is not none %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|tool|>' + message['tools'] + '<|/tool|>' + '<|end|>' }}{% else %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|end|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>' }}{% else %}{{ eos_token }}{% endif %}",
|
105 |
+
"clean_up_tokenization_spaces": false,
|
106 |
+
"eos_token": "<|endoftext|>",
|
107 |
+
"extra_special_tokens": {},
|
108 |
+
"model_max_length": 2048,
|
109 |
+
"pad_token": "<|endoftext|>",
|
110 |
+
"tokenizer_class": "GPT2Tokenizer",
|
111 |
+
"unk_token": "<|endoftext|>"
|
112 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:83e196fb1e6b756186c5bd9fbf13ed713d63c34b458643dbc56c4b0752139b94
|
3 |
+
size 5624
|
training_params.json
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model": "microsoft/Phi-4-mini-instruct",
|
3 |
+
"project_name": "autotrain-5hlcf-bzdsw",
|
4 |
+
"data_path": "autotrain-5hlcf-bzdsw/autotrain-data",
|
5 |
+
"train_split": "train",
|
6 |
+
"valid_split": null,
|
7 |
+
"add_eos_token": true,
|
8 |
+
"block_size": 1024,
|
9 |
+
"model_max_length": 2048,
|
10 |
+
"padding": "right",
|
11 |
+
"trainer": "sft",
|
12 |
+
"use_flash_attention_2": false,
|
13 |
+
"log": "tensorboard",
|
14 |
+
"disable_gradient_checkpointing": false,
|
15 |
+
"logging_steps": -1,
|
16 |
+
"eval_strategy": "epoch",
|
17 |
+
"save_total_limit": 1,
|
18 |
+
"auto_find_batch_size": false,
|
19 |
+
"mixed_precision": "fp16",
|
20 |
+
"lr": 3e-05,
|
21 |
+
"epochs": 3,
|
22 |
+
"batch_size": 2,
|
23 |
+
"warmup_ratio": 0.1,
|
24 |
+
"gradient_accumulation": 4,
|
25 |
+
"optimizer": "adamw_torch",
|
26 |
+
"scheduler": "linear",
|
27 |
+
"weight_decay": 0.0,
|
28 |
+
"max_grad_norm": 1.0,
|
29 |
+
"seed": 42,
|
30 |
+
"chat_template": "tokenizer",
|
31 |
+
"quantization": "int4",
|
32 |
+
"target_modules": "all-linear",
|
33 |
+
"merge_adapter": false,
|
34 |
+
"peft": true,
|
35 |
+
"lora_r": 16,
|
36 |
+
"lora_alpha": 32,
|
37 |
+
"lora_dropout": 0.05,
|
38 |
+
"model_ref": null,
|
39 |
+
"dpo_beta": 0.1,
|
40 |
+
"max_prompt_length": 128,
|
41 |
+
"max_completion_length": null,
|
42 |
+
"prompt_text_column": "autotrain_prompt",
|
43 |
+
"text_column": "autotrain_text",
|
44 |
+
"rejected_text_column": "autotrain_rejected_text",
|
45 |
+
"push_to_hub": true,
|
46 |
+
"username": "DewVinci",
|
47 |
+
"unsloth": false,
|
48 |
+
"distributed_backend": "ddp"
|
49 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|