Athipan01 commited on
Commit
f7d483b
·
verified ·
1 Parent(s): 6c4c2f4

End of training

Browse files
README.md CHANGED
@@ -1,36 +1,60 @@
1
  ---
2
- license: apache-2.0
 
3
  tags:
4
- - code
5
- - roberta
6
- - peft
7
- - lora
8
- - fine-tuned
9
- - emotion
10
- - natural-language-processing
11
- - code-generation
12
- - prompt-engineering
13
- - code-understanding
14
- - text-to-prompt
15
- - instruction-tuning
16
  datasets:
17
- - CM/codexglue_code2text_php
18
- - CM/codexglue_code2text_python
19
- - CM/codexglue_code2text_javascript
20
- - microsoft/codexglue_method_generation
21
- language:
22
- - th
23
- - en
24
- metrics:
25
- - accuracy
26
- - bertscore
27
- - bleu
28
- base_model:
29
- - microsoft/codebert-base
30
- pipeline_tag: text2text-generation
31
- library_name: adapter-transformers
32
  ---
33
 
34
- # CodeBERT V2 (LoRA Fine-Tuned)
 
35
 
36
- Fine-tuned version of CodeBERT with LoRA on `dair-ai/emotion` dataset. Trained using Hugging Face Transformers and PEFT.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: peft
3
+ base_model: microsoft/codebert-base
4
  tags:
5
+ - generated_from_trainer
 
 
 
 
 
 
 
 
 
 
 
6
  datasets:
7
+ - code_search_net
8
+ model-index:
9
+ - name: codebert-model
10
+ results: []
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
 
16
+ # codebert-model
17
+
18
+ This model is a fine-tuned version of [microsoft/codebert-base](https://huggingface.co/microsoft/codebert-base) on the code_search_net dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - eval_loss: 0.8346
21
+ - eval_model_preparation_time: 0.0057
22
+ - eval_accuracy: {'accuracy': 0.21967491508976225}
23
+ - eval_f1: {'f1': 0.0}
24
+ - eval_runtime: 9384.6382
25
+ - eval_samples_per_second: 0.878
26
+ - eval_steps_per_second: 0.11
27
+ - step: 0
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 5e-05
47
+ - train_batch_size: 8
48
+ - eval_batch_size: 8
49
+ - seed: 42
50
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
51
+ - lr_scheduler_type: linear
52
+ - num_epochs: 3
53
+
54
+ ### Framework versions
55
+
56
+ - PEFT 0.15.2
57
+ - Transformers 4.51.3
58
+ - Pytorch 2.6.0+cu124
59
+ - Datasets 3.6.0
60
+ - Tokenizers 0.21.1
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "Athipan01/codebert-model",
5
  "bias": "none",
6
  "corda_config": null,
7
  "eva_config": null,
@@ -13,7 +13,7 @@
13
  "layers_pattern": null,
14
  "layers_to_transform": null,
15
  "loftq_config": {},
16
- "lora_alpha": 16,
17
  "lora_bias": false,
18
  "lora_dropout": 0.1,
19
  "megatron_config": null,
@@ -27,8 +27,8 @@
27
  "rank_pattern": {},
28
  "revision": null,
29
  "target_modules": [
30
- "value",
31
- "query"
32
  ],
33
  "task_type": "SEQ_CLS",
34
  "trainable_token_indices": null,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/codebert-base",
5
  "bias": "none",
6
  "corda_config": null,
7
  "eva_config": null,
 
13
  "layers_pattern": null,
14
  "layers_to_transform": null,
15
  "loftq_config": {},
16
+ "lora_alpha": 32,
17
  "lora_bias": false,
18
  "lora_dropout": 0.1,
19
  "megatron_config": null,
 
27
  "rank_pattern": {},
28
  "revision": null,
29
  "target_modules": [
30
+ "query",
31
+ "value"
32
  ],
33
  "task_type": "SEQ_CLS",
34
  "trainable_token_indices": null,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16c0c47103351969fb000f9300207536eef5fdcc26d55c3b817dde8af0b5c081
3
- size 3567808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64c1852e9de5ac6ebc9a8aa5faf0c85e3d8ce81aef69b36350dba25275eea1cc
3
+ size 3555504
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:164d9c8a6e014f5d4ddb7e7fbfd90f0ff7765bf99b7467b521e1ff7ca8170cfc
3
- size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5d2992ae7cf00ab35f42ff5caa407f1021c209acfca749548cdb8b0180e72f0
3
+ size 5368