davidberenstein1957 commited on
Commit
1907e26
·
verified ·
1 Parent(s): 2cb5e68

Add files using upload-large-folder tool

Browse files
Files changed (4) hide show
  1. README.md +137 -0
  2. config.json +66 -0
  3. model.safetensors +3 -0
  4. smash_config.json +20 -0
README.md ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - pruna-ai
5
+ ---
6
+
7
+ # Model Card for PrunaAI/test-tiny-random-llama4-smashed
8
+
9
+ This model was created using the [pruna](https://github.com/PrunaAI/pruna) library. Pruna is a model optimization framework built for developers, enabling you to deliver more efficient models with minimal implementation overhead.
10
+
11
+ ## Usage
12
+
13
+ First things first, you need to install the pruna library:
14
+
15
+ ```bash
16
+ pip install pruna
17
+ ```
18
+
19
+ You can then load this model using the following code:
20
+
21
+ ```python
22
+ from pruna import PrunaModel
23
+
24
+ loaded_model = PrunaModel.from_hub("PrunaAI/test-tiny-random-llama4-smashed")
25
+ ```
26
+
27
+ After loading the model, you can use the inference methods of the original model.
28
+
29
+ ## Smash Configuration
30
+
31
+ The compression configuration of the model is stored in the `smash_config.json` file.
32
+
33
+ ```bash
34
+ {
35
+ "batcher": null,
36
+ "cacher": null,
37
+ "compiler": null,
38
+ "pruner": null,
39
+ "quantizer": null,
40
+ "max_batch_size": 1,
41
+ "device": "cuda",
42
+ "save_fns": [],
43
+ "load_fns": [
44
+ "transformers"
45
+ ],
46
+ "reapply_after_load": {
47
+ "pruner": null,
48
+ "quantizer": null,
49
+ "cacher": null,
50
+ "compiler": null,
51
+ "batcher": null
52
+ }
53
+ }
54
+ ```
55
+
56
+ ## Model Configuration
57
+
58
+ The configuration of the model is stored in the `config.json` file.
59
+
60
+ ```bash
61
+ {
62
+ "config": {
63
+ "architectures": [
64
+ "Llama4TextModel"
65
+ ],
66
+ "attention_bias": false,
67
+ "attention_chunk_size": 8192,
68
+ "attention_dropout": 0.0,
69
+ "attn_scale": 0.1,
70
+ "attn_temperature_tuning": 4,
71
+ "bos_token_id": 200000,
72
+ "cache_implementation": "hybrid",
73
+ "eos_token_id": [
74
+ 200001,
75
+ 200007,
76
+ 200008
77
+ ],
78
+ "floor_scale": 8192,
79
+ "for_llm_compressor": false,
80
+ "head_dim": 8,
81
+ "hidden_act": "silu",
82
+ "hidden_size": 16,
83
+ "initializer_range": 0.02,
84
+ "interleave_moe_layer_step": 1,
85
+ "intermediate_size": 32,
86
+ "intermediate_size_mlp": 64,
87
+ "max_position_embeddings": 10485760,
88
+ "model_type": "llama4_text",
89
+ "moe_layers": [
90
+ 0,
91
+ 1,
92
+ 2,
93
+ 3,
94
+ 4
95
+ ],
96
+ "no_rope_layers": [
97
+ 1,
98
+ 1,
99
+ 1,
100
+ 0,
101
+ 1
102
+ ],
103
+ "num_attention_heads": 10,
104
+ "num_experts_per_tok": 1,
105
+ "num_hidden_layers": 5,
106
+ "num_key_value_heads": 2,
107
+ "num_local_experts": 4,
108
+ "output_router_logits": false,
109
+ "pad_token_id": 200018,
110
+ "rms_norm_eps": 1e-05,
111
+ "rope_scaling": {
112
+ "factor": 8.0,
113
+ "high_freq_factor": 4.0,
114
+ "low_freq_factor": 1.0,
115
+ "original_max_position_embeddings": 8192,
116
+ "rope_type": "llama3"
117
+ },
118
+ "rope_theta": 500000.0,
119
+ "router_aux_loss_coef": 0.001,
120
+ "router_jitter_noise": 0.0,
121
+ "tie_word_embeddings": false,
122
+ "torch_dtype": "float32",
123
+ "transformers_version": "4.51.3",
124
+ "use_cache": true,
125
+ "use_qk_norm": true,
126
+ "vocab_size": 202048
127
+ }
128
+ }
129
+ ```
130
+
131
+ ## 🌍 Join the Pruna AI community!
132
+
133
+ [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI)
134
+ [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI)
135
+ [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following)
136
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.com/invite/rskEr4BZJx)
137
+ [![Reddit](https://img.shields.io/reddit/subreddit-subscribers/PrunaAI?style=social)](https://www.reddit.com/r/PrunaAI/)
config.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Llama4TextModel"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": 4,
10
+ "bos_token_id": 200000,
11
+ "cache_implementation": "hybrid",
12
+ "eos_token_id": [
13
+ 200001,
14
+ 200007,
15
+ 200008
16
+ ],
17
+ "floor_scale": 8192,
18
+ "for_llm_compressor": false,
19
+ "head_dim": 8,
20
+ "hidden_act": "silu",
21
+ "hidden_size": 16,
22
+ "initializer_range": 0.02,
23
+ "interleave_moe_layer_step": 1,
24
+ "intermediate_size": 32,
25
+ "intermediate_size_mlp": 64,
26
+ "max_position_embeddings": 10485760,
27
+ "model_type": "llama4_text",
28
+ "moe_layers": [
29
+ 0,
30
+ 1,
31
+ 2,
32
+ 3,
33
+ 4
34
+ ],
35
+ "no_rope_layers": [
36
+ 1,
37
+ 1,
38
+ 1,
39
+ 0,
40
+ 1
41
+ ],
42
+ "num_attention_heads": 10,
43
+ "num_experts_per_tok": 1,
44
+ "num_hidden_layers": 5,
45
+ "num_key_value_heads": 2,
46
+ "num_local_experts": 4,
47
+ "output_router_logits": false,
48
+ "pad_token_id": 200018,
49
+ "rms_norm_eps": 1e-05,
50
+ "rope_scaling": {
51
+ "factor": 8.0,
52
+ "high_freq_factor": 4.0,
53
+ "low_freq_factor": 1.0,
54
+ "original_max_position_embeddings": 8192,
55
+ "rope_type": "llama3"
56
+ },
57
+ "rope_theta": 500000.0,
58
+ "router_aux_loss_coef": 0.001,
59
+ "router_jitter_noise": 0.0,
60
+ "tie_word_embeddings": false,
61
+ "torch_dtype": "float32",
62
+ "transformers_version": "4.51.3",
63
+ "use_cache": true,
64
+ "use_qk_norm": true,
65
+ "vocab_size": 202048
66
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dd8a64d1ebcf93bec89f3d856962742de0f7d94931d1c2e456d3c30b291bd0e
3
+ size 13154832
smash_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "batcher": null,
3
+ "cacher": null,
4
+ "compiler": null,
5
+ "pruner": null,
6
+ "quantizer": null,
7
+ "max_batch_size": 1,
8
+ "device": "cuda",
9
+ "save_fns": [],
10
+ "load_fns": [
11
+ "transformers"
12
+ ],
13
+ "reapply_after_load": {
14
+ "pruner": null,
15
+ "quantizer": null,
16
+ "cacher": null,
17
+ "compiler": null,
18
+ "batcher": null
19
+ }
20
+ }