felcas93 commited on
Commit
4fb7846
·
verified ·
1 Parent(s): 96b1616

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .ipynb_checkpoints/training_summary_iam-checkpoint.json +19 -0
  2. README.md +62 -0
  3. adapter_config.json +43 -0
  4. adapter_model.safetensors +3 -0
  5. checkpoint-1000/README.md +209 -0
  6. checkpoint-1000/adapter_config.json +43 -0
  7. checkpoint-1000/adapter_model.safetensors +3 -0
  8. checkpoint-1000/merges.txt +0 -0
  9. checkpoint-1000/optimizer.pt +3 -0
  10. checkpoint-1000/rng_state.pth +3 -0
  11. checkpoint-1000/scheduler.pt +3 -0
  12. checkpoint-1000/special_tokens_map.json +63 -0
  13. checkpoint-1000/tokenizer.json +0 -0
  14. checkpoint-1000/tokenizer_config.json +357 -0
  15. checkpoint-1000/trainer_state.json +234 -0
  16. checkpoint-1000/training_args.bin +3 -0
  17. checkpoint-1000/vocab.json +0 -0
  18. checkpoint-1500/README.md +209 -0
  19. checkpoint-1500/adapter_config.json +43 -0
  20. checkpoint-1500/adapter_model.safetensors +3 -0
  21. checkpoint-1500/merges.txt +0 -0
  22. checkpoint-1500/optimizer.pt +3 -0
  23. checkpoint-1500/rng_state.pth +3 -0
  24. checkpoint-1500/scheduler.pt +3 -0
  25. checkpoint-1500/special_tokens_map.json +63 -0
  26. checkpoint-1500/tokenizer.json +0 -0
  27. checkpoint-1500/tokenizer_config.json +357 -0
  28. checkpoint-1500/trainer_state.json +334 -0
  29. checkpoint-1500/training_args.bin +3 -0
  30. checkpoint-1500/vocab.json +0 -0
  31. checkpoint-2000/README.md +209 -0
  32. checkpoint-2000/adapter_config.json +43 -0
  33. checkpoint-2000/adapter_model.safetensors +3 -0
  34. checkpoint-2000/merges.txt +0 -0
  35. checkpoint-2000/optimizer.pt +3 -0
  36. checkpoint-2000/rng_state.pth +3 -0
  37. checkpoint-2000/scheduler.pt +3 -0
  38. checkpoint-2000/special_tokens_map.json +63 -0
  39. checkpoint-2000/tokenizer.json +0 -0
  40. checkpoint-2000/tokenizer_config.json +357 -0
  41. checkpoint-2000/trainer_state.json +434 -0
  42. checkpoint-2000/training_args.bin +3 -0
  43. checkpoint-2000/vocab.json +0 -0
  44. checkpoint-2025/README.md +209 -0
  45. checkpoint-2025/adapter_config.json +43 -0
  46. checkpoint-2025/adapter_model.safetensors +3 -0
  47. checkpoint-2025/merges.txt +0 -0
  48. checkpoint-2025/optimizer.pt +3 -0
  49. checkpoint-2025/rng_state.pth +3 -0
  50. checkpoint-2025/scheduler.pt +3 -0
.ipynb_checkpoints/training_summary_iam-checkpoint.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "service": "IAM",
3
+ "epochs": 3.0,
4
+ "global_step": 2025,
5
+ "train_runtime_sec": 10042.064852237701,
6
+ "train_samples": 5400,
7
+ "eval_samples": 600,
8
+ "used_bitsandbytes": false,
9
+ "eval_metrics": {
10
+ "eval_loss": 0.06809257715940475,
11
+ "eval_runtime": 106.1734,
12
+ "eval_samples_per_second": 5.651,
13
+ "eval_steps_per_second": 0.706,
14
+ "eval_entropy": 0.06766340777277946,
15
+ "eval_num_tokens": 16535475.0,
16
+ "eval_mean_token_accuracy": 0.9819109582901001,
17
+ "epoch": 3.0
18
+ }
19
+ }
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: bigcode/starcoder2-7b
3
+ library_name: peft
4
+ model_name: starcoder2_7b_lora_iam
5
+ tags:
6
+ - base_model:adapter:bigcode/starcoder2-7b
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ licence: license
12
+ pipeline_tag: text-generation
13
+ ---
14
+
15
+ # Model Card for starcoder2_7b_lora_iam
16
+
17
+ This model is a fine-tuned version of [bigcode/starcoder2-7b](https://huggingface.co/bigcode/starcoder2-7b).
18
+ It has been trained using [TRL](https://github.com/huggingface/trl).
19
+
20
+ ## Quick start
21
+
22
+ ```python
23
+ from transformers import pipeline
24
+
25
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
26
+ generator = pipeline("text-generation", model="None", device="cuda")
27
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
28
+ print(output["generated_text"])
29
+ ```
30
+
31
+ ## Training procedure
32
+
33
+
34
+
35
+
36
+ This model was trained with SFT.
37
+
38
+ ### Framework versions
39
+
40
+ - PEFT 0.18.0
41
+ - TRL: 0.25.1
42
+ - Transformers: 4.57.3
43
+ - Pytorch: 2.8.0+cu128
44
+ - Datasets: 4.4.1
45
+ - Tokenizers: 0.22.1
46
+
47
+ ## Citations
48
+
49
+
50
+
51
+ Cite TRL as:
52
+
53
+ ```bibtex
54
+ @misc{vonwerra2022trl,
55
+ title = {{TRL: Transformer Reinforcement Learning}},
56
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
57
+ year = 2020,
58
+ journal = {GitHub repository},
59
+ publisher = {GitHub},
60
+ howpublished = {\url{https://github.com/huggingface/trl}}
61
+ }
62
+ ```
adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "bigcode/starcoder2-7b",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 16,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "k_proj",
33
+ "v_proj",
34
+ "o_proj",
35
+ "q_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f9a9fbadd0b35342e1993f10bde6b04ed43f8e16cbc6027b05d97c66c518dc6
3
+ size 58754616
checkpoint-1000/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: bigcode/starcoder2-7b
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:bigcode/starcoder2-7b
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ ---
12
+
13
+ # Model Card for Model ID
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+
19
+ ## Model Details
20
+
21
+ ### Model Description
22
+
23
+ <!-- Provide a longer summary of what this model is. -->
24
+
25
+
26
+
27
+ - **Developed by:** [More Information Needed]
28
+ - **Funded by [optional]:** [More Information Needed]
29
+ - **Shared by [optional]:** [More Information Needed]
30
+ - **Model type:** [More Information Needed]
31
+ - **Language(s) (NLP):** [More Information Needed]
32
+ - **License:** [More Information Needed]
33
+ - **Finetuned from model [optional]:** [More Information Needed]
34
+
35
+ ### Model Sources [optional]
36
+
37
+ <!-- Provide the basic links for the model. -->
38
+
39
+ - **Repository:** [More Information Needed]
40
+ - **Paper [optional]:** [More Information Needed]
41
+ - **Demo [optional]:** [More Information Needed]
42
+
43
+ ## Uses
44
+
45
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
46
+
47
+ ### Direct Use
48
+
49
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Downstream Use [optional]
54
+
55
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
56
+
57
+ [More Information Needed]
58
+
59
+ ### Out-of-Scope Use
60
+
61
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ## Bias, Risks, and Limitations
66
+
67
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
68
+
69
+ [More Information Needed]
70
+
71
+ ### Recommendations
72
+
73
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
74
+
75
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
76
+
77
+ ## How to Get Started with the Model
78
+
79
+ Use the code below to get started with the model.
80
+
81
+ [More Information Needed]
82
+
83
+ ## Training Details
84
+
85
+ ### Training Data
86
+
87
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
88
+
89
+ [More Information Needed]
90
+
91
+ ### Training Procedure
92
+
93
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
94
+
95
+ #### Preprocessing [optional]
96
+
97
+ [More Information Needed]
98
+
99
+
100
+ #### Training Hyperparameters
101
+
102
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
103
+
104
+ #### Speeds, Sizes, Times [optional]
105
+
106
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
107
+
108
+ [More Information Needed]
109
+
110
+ ## Evaluation
111
+
112
+ <!-- This section describes the evaluation protocols and provides the results. -->
113
+
114
+ ### Testing Data, Factors & Metrics
115
+
116
+ #### Testing Data
117
+
118
+ <!-- This should link to a Dataset Card if possible. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Factors
123
+
124
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
125
+
126
+ [More Information Needed]
127
+
128
+ #### Metrics
129
+
130
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
131
+
132
+ [More Information Needed]
133
+
134
+ ### Results
135
+
136
+ [More Information Needed]
137
+
138
+ #### Summary
139
+
140
+
141
+
142
+ ## Model Examination [optional]
143
+
144
+ <!-- Relevant interpretability work for the model goes here -->
145
+
146
+ [More Information Needed]
147
+
148
+ ## Environmental Impact
149
+
150
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
151
+
152
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
153
+
154
+ - **Hardware Type:** [More Information Needed]
155
+ - **Hours used:** [More Information Needed]
156
+ - **Cloud Provider:** [More Information Needed]
157
+ - **Compute Region:** [More Information Needed]
158
+ - **Carbon Emitted:** [More Information Needed]
159
+
160
+ ## Technical Specifications [optional]
161
+
162
+ ### Model Architecture and Objective
163
+
164
+ [More Information Needed]
165
+
166
+ ### Compute Infrastructure
167
+
168
+ [More Information Needed]
169
+
170
+ #### Hardware
171
+
172
+ [More Information Needed]
173
+
174
+ #### Software
175
+
176
+ [More Information Needed]
177
+
178
+ ## Citation [optional]
179
+
180
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
181
+
182
+ **BibTeX:**
183
+
184
+ [More Information Needed]
185
+
186
+ **APA:**
187
+
188
+ [More Information Needed]
189
+
190
+ ## Glossary [optional]
191
+
192
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
193
+
194
+ [More Information Needed]
195
+
196
+ ## More Information [optional]
197
+
198
+ [More Information Needed]
199
+
200
+ ## Model Card Authors [optional]
201
+
202
+ [More Information Needed]
203
+
204
+ ## Model Card Contact
205
+
206
+ [More Information Needed]
207
+ ### Framework versions
208
+
209
+ - PEFT 0.18.0
checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "bigcode/starcoder2-7b",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 16,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "k_proj",
33
+ "v_proj",
34
+ "o_proj",
35
+ "q_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
checkpoint-1000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e3cb8d4fbaa7a095b55b5c135ac12b4522be09f2a6d9e78aa916a9986ea5dd4
3
+ size 58754616
checkpoint-1000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caae63283d10a16eea1cfb241a9775608cd96a6b92aeb1e6a2c8a4d61d76fff9
3
+ size 117660107
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:405bb81709f17b3ac4eb03c3d986bf79a4c2b03d08f1d49fe2f55aec79a559d8
3
+ size 14645
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b06201d9403a12d93447a0c39b7483b78fdd17e56d2db9cc8f99c3ba8cf744f1
3
+ size 1465
checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<fim_prefix>",
5
+ "<fim_middle>",
6
+ "<fim_suffix>",
7
+ "<fim_pad>",
8
+ "<repo_name>",
9
+ "<file_sep>",
10
+ "<issue_start>",
11
+ "<issue_comment>",
12
+ "<issue_closed>",
13
+ "<jupyter_start>",
14
+ "<jupyter_text>",
15
+ "<jupyter_code>",
16
+ "<jupyter_output>",
17
+ "<jupyter_script>",
18
+ "<empty_output>",
19
+ "<code_to_intermediate>",
20
+ "<intermediate_to_code>",
21
+ "<pr>",
22
+ "<pr_status>",
23
+ "<pr_is_merged>",
24
+ "<pr_base>",
25
+ "<pr_file>",
26
+ "<pr_base_code>",
27
+ "<pr_diff>",
28
+ "<pr_diff_hunk>",
29
+ "<pr_comment>",
30
+ "<pr_event_id>",
31
+ "<pr_review>",
32
+ "<pr_review_state>",
33
+ "<pr_review_comment>",
34
+ "<pr_in_reply_to_review_id>",
35
+ "<pr_in_reply_to_comment_id>",
36
+ "<pr_diff_hunk_comment_line>",
37
+ "<NAME>",
38
+ "<EMAIL>",
39
+ "<KEY>",
40
+ "<PASSWORD>"
41
+ ],
42
+ "bos_token": {
43
+ "content": "<|endoftext|>",
44
+ "lstrip": false,
45
+ "normalized": false,
46
+ "rstrip": false,
47
+ "single_word": false
48
+ },
49
+ "eos_token": {
50
+ "content": "<|endoftext|>",
51
+ "lstrip": false,
52
+ "normalized": false,
53
+ "rstrip": false,
54
+ "single_word": false
55
+ },
56
+ "unk_token": {
57
+ "content": "<|endoftext|>",
58
+ "lstrip": false,
59
+ "normalized": false,
60
+ "rstrip": false,
61
+ "single_word": false
62
+ }
63
+ }
checkpoint-1000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<fim_prefix>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<fim_middle>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<fim_suffix>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<fim_pad>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<repo_name>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<file_sep>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<issue_start>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_comment>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_closed>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<jupyter_start>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_text>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_code>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_output>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_script>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<empty_output>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<code_to_intermediate>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ },
140
+ "17": {
141
+ "content": "<intermediate_to_code>",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": true
147
+ },
148
+ "18": {
149
+ "content": "<pr>",
150
+ "lstrip": false,
151
+ "normalized": false,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": true
155
+ },
156
+ "19": {
157
+ "content": "<pr_status>",
158
+ "lstrip": false,
159
+ "normalized": false,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": true
163
+ },
164
+ "20": {
165
+ "content": "<pr_is_merged>",
166
+ "lstrip": false,
167
+ "normalized": false,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": true
171
+ },
172
+ "21": {
173
+ "content": "<pr_base>",
174
+ "lstrip": false,
175
+ "normalized": false,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": true
179
+ },
180
+ "22": {
181
+ "content": "<pr_file>",
182
+ "lstrip": false,
183
+ "normalized": false,
184
+ "rstrip": false,
185
+ "single_word": false,
186
+ "special": true
187
+ },
188
+ "23": {
189
+ "content": "<pr_base_code>",
190
+ "lstrip": false,
191
+ "normalized": false,
192
+ "rstrip": false,
193
+ "single_word": false,
194
+ "special": true
195
+ },
196
+ "24": {
197
+ "content": "<pr_diff>",
198
+ "lstrip": false,
199
+ "normalized": false,
200
+ "rstrip": false,
201
+ "single_word": false,
202
+ "special": true
203
+ },
204
+ "25": {
205
+ "content": "<pr_diff_hunk>",
206
+ "lstrip": false,
207
+ "normalized": false,
208
+ "rstrip": false,
209
+ "single_word": false,
210
+ "special": true
211
+ },
212
+ "26": {
213
+ "content": "<pr_comment>",
214
+ "lstrip": false,
215
+ "normalized": false,
216
+ "rstrip": false,
217
+ "single_word": false,
218
+ "special": true
219
+ },
220
+ "27": {
221
+ "content": "<pr_event_id>",
222
+ "lstrip": false,
223
+ "normalized": false,
224
+ "rstrip": false,
225
+ "single_word": false,
226
+ "special": true
227
+ },
228
+ "28": {
229
+ "content": "<pr_review>",
230
+ "lstrip": false,
231
+ "normalized": false,
232
+ "rstrip": false,
233
+ "single_word": false,
234
+ "special": true
235
+ },
236
+ "29": {
237
+ "content": "<pr_review_state>",
238
+ "lstrip": false,
239
+ "normalized": false,
240
+ "rstrip": false,
241
+ "single_word": false,
242
+ "special": true
243
+ },
244
+ "30": {
245
+ "content": "<pr_review_comment>",
246
+ "lstrip": false,
247
+ "normalized": false,
248
+ "rstrip": false,
249
+ "single_word": false,
250
+ "special": true
251
+ },
252
+ "31": {
253
+ "content": "<pr_in_reply_to_review_id>",
254
+ "lstrip": false,
255
+ "normalized": false,
256
+ "rstrip": false,
257
+ "single_word": false,
258
+ "special": true
259
+ },
260
+ "32": {
261
+ "content": "<pr_in_reply_to_comment_id>",
262
+ "lstrip": false,
263
+ "normalized": false,
264
+ "rstrip": false,
265
+ "single_word": false,
266
+ "special": true
267
+ },
268
+ "33": {
269
+ "content": "<pr_diff_hunk_comment_line>",
270
+ "lstrip": false,
271
+ "normalized": false,
272
+ "rstrip": false,
273
+ "single_word": false,
274
+ "special": true
275
+ },
276
+ "34": {
277
+ "content": "<NAME>",
278
+ "lstrip": false,
279
+ "normalized": false,
280
+ "rstrip": false,
281
+ "single_word": false,
282
+ "special": true
283
+ },
284
+ "35": {
285
+ "content": "<EMAIL>",
286
+ "lstrip": false,
287
+ "normalized": false,
288
+ "rstrip": false,
289
+ "single_word": false,
290
+ "special": true
291
+ },
292
+ "36": {
293
+ "content": "<KEY>",
294
+ "lstrip": false,
295
+ "normalized": false,
296
+ "rstrip": false,
297
+ "single_word": false,
298
+ "special": true
299
+ },
300
+ "37": {
301
+ "content": "<PASSWORD>",
302
+ "lstrip": false,
303
+ "normalized": false,
304
+ "rstrip": false,
305
+ "single_word": false,
306
+ "special": true
307
+ }
308
+ },
309
+ "additional_special_tokens": [
310
+ "<|endoftext|>",
311
+ "<fim_prefix>",
312
+ "<fim_middle>",
313
+ "<fim_suffix>",
314
+ "<fim_pad>",
315
+ "<repo_name>",
316
+ "<file_sep>",
317
+ "<issue_start>",
318
+ "<issue_comment>",
319
+ "<issue_closed>",
320
+ "<jupyter_start>",
321
+ "<jupyter_text>",
322
+ "<jupyter_code>",
323
+ "<jupyter_output>",
324
+ "<jupyter_script>",
325
+ "<empty_output>",
326
+ "<code_to_intermediate>",
327
+ "<intermediate_to_code>",
328
+ "<pr>",
329
+ "<pr_status>",
330
+ "<pr_is_merged>",
331
+ "<pr_base>",
332
+ "<pr_file>",
333
+ "<pr_base_code>",
334
+ "<pr_diff>",
335
+ "<pr_diff_hunk>",
336
+ "<pr_comment>",
337
+ "<pr_event_id>",
338
+ "<pr_review>",
339
+ "<pr_review_state>",
340
+ "<pr_review_comment>",
341
+ "<pr_in_reply_to_review_id>",
342
+ "<pr_in_reply_to_comment_id>",
343
+ "<pr_diff_hunk_comment_line>",
344
+ "<NAME>",
345
+ "<EMAIL>",
346
+ "<KEY>",
347
+ "<PASSWORD>"
348
+ ],
349
+ "bos_token": "<|endoftext|>",
350
+ "clean_up_tokenization_spaces": true,
351
+ "eos_token": "<|endoftext|>",
352
+ "extra_special_tokens": {},
353
+ "model_max_length": 1000000000000000019884624838656,
354
+ "tokenizer_class": "GPT2Tokenizer",
355
+ "unk_token": "<|endoftext|>",
356
+ "vocab_size": 49152
357
+ }
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.4814814814814814,
6
+ "eval_steps": 500,
7
+ "global_step": 1000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 1.805763495862484,
14
+ "epoch": 0.07407407407407407,
15
+ "grad_norm": 0.6806204319000244,
16
+ "learning_rate": 8.032786885245902e-05,
17
+ "loss": 1.5484,
18
+ "mean_token_accuracy": 0.6665007689595223,
19
+ "num_tokens": 408149.0,
20
+ "step": 50
21
+ },
22
+ {
23
+ "entropy": 0.41144788280129435,
24
+ "epoch": 0.14814814814814814,
25
+ "grad_norm": 0.38455039262771606,
26
+ "learning_rate": 9.990765991730485e-05,
27
+ "loss": 0.3321,
28
+ "mean_token_accuracy": 0.9129975068569184,
29
+ "num_tokens": 816230.0,
30
+ "step": 100
31
+ },
32
+ {
33
+ "entropy": 0.16323913749307395,
34
+ "epoch": 0.2222222222222222,
35
+ "grad_norm": 0.29704713821411133,
36
+ "learning_rate": 9.950545603782162e-05,
37
+ "loss": 0.1447,
38
+ "mean_token_accuracy": 0.9618216013908386,
39
+ "num_tokens": 1224471.0,
40
+ "step": 150
41
+ },
42
+ {
43
+ "entropy": 0.1175146003998816,
44
+ "epoch": 0.2962962962962963,
45
+ "grad_norm": 0.35487300157546997,
46
+ "learning_rate": 9.878674879048427e-05,
47
+ "loss": 0.1071,
48
+ "mean_token_accuracy": 0.9733556269109249,
49
+ "num_tokens": 1632497.0,
50
+ "step": 200
51
+ },
52
+ {
53
+ "entropy": 0.1009879010822624,
54
+ "epoch": 0.37037037037037035,
55
+ "grad_norm": 0.17419321835041046,
56
+ "learning_rate": 9.775613308830824e-05,
57
+ "loss": 0.0925,
58
+ "mean_token_accuracy": 0.9769376286864281,
59
+ "num_tokens": 2041392.0,
60
+ "step": 250
61
+ },
62
+ {
63
+ "entropy": 0.09154447751119733,
64
+ "epoch": 0.4444444444444444,
65
+ "grad_norm": 0.20543242990970612,
66
+ "learning_rate": 9.642019796948866e-05,
67
+ "loss": 0.0836,
68
+ "mean_token_accuracy": 0.9787026332318782,
69
+ "num_tokens": 2450311.0,
70
+ "step": 300
71
+ },
72
+ {
73
+ "entropy": 0.08632300381548702,
74
+ "epoch": 0.5185185185185185,
75
+ "grad_norm": 0.17172595858573914,
76
+ "learning_rate": 9.478748447168449e-05,
77
+ "loss": 0.0812,
78
+ "mean_token_accuracy": 0.9789653661847114,
79
+ "num_tokens": 2858744.0,
80
+ "step": 350
81
+ },
82
+ {
83
+ "entropy": 0.08412999271415174,
84
+ "epoch": 0.5925925925925926,
85
+ "grad_norm": 0.1447569578886032,
86
+ "learning_rate": 9.28684310265789e-05,
87
+ "loss": 0.0805,
88
+ "mean_token_accuracy": 0.9786932443082332,
89
+ "num_tokens": 3265542.0,
90
+ "step": 400
91
+ },
92
+ {
93
+ "entropy": 0.08065679710358381,
94
+ "epoch": 0.6666666666666666,
95
+ "grad_norm": 0.19630704820156097,
96
+ "learning_rate": 9.067530672382544e-05,
97
+ "loss": 0.0773,
98
+ "mean_token_accuracy": 0.9797722736001014,
99
+ "num_tokens": 3674162.0,
100
+ "step": 450
101
+ },
102
+ {
103
+ "entropy": 0.07874332463368773,
104
+ "epoch": 0.7407407407407407,
105
+ "grad_norm": 0.08524929732084274,
106
+ "learning_rate": 8.822213287104348e-05,
107
+ "loss": 0.0762,
108
+ "mean_token_accuracy": 0.9801681047677994,
109
+ "num_tokens": 4082734.0,
110
+ "step": 500
111
+ },
112
+ {
113
+ "entropy": 0.07778633100911975,
114
+ "epoch": 0.8148148148148148,
115
+ "grad_norm": 0.10848797112703323,
116
+ "learning_rate": 8.552459335135381e-05,
117
+ "loss": 0.0753,
118
+ "mean_token_accuracy": 0.9801766823232174,
119
+ "num_tokens": 4491115.0,
120
+ "step": 550
121
+ },
122
+ {
123
+ "entropy": 0.07791180345229805,
124
+ "epoch": 0.8888888888888888,
125
+ "grad_norm": 0.12547598779201508,
126
+ "learning_rate": 8.259993435156559e-05,
127
+ "loss": 0.0752,
128
+ "mean_token_accuracy": 0.9802092918753624,
129
+ "num_tokens": 4899794.0,
130
+ "step": 600
131
+ },
132
+ {
133
+ "entropy": 0.07790746555663645,
134
+ "epoch": 0.9629629629629629,
135
+ "grad_norm": 0.0992884486913681,
136
+ "learning_rate": 7.946685410208296e-05,
137
+ "loss": 0.0759,
138
+ "mean_token_accuracy": 0.979848040342331,
139
+ "num_tokens": 5307342.0,
140
+ "step": 650
141
+ },
142
+ {
143
+ "entropy": 0.07524958597496152,
144
+ "epoch": 1.037037037037037,
145
+ "grad_norm": 0.086652472615242,
146
+ "learning_rate": 7.614538333345735e-05,
147
+ "loss": 0.0731,
148
+ "mean_token_accuracy": 0.9808213406801224,
149
+ "num_tokens": 5716156.0,
150
+ "step": 700
151
+ },
152
+ {
153
+ "entropy": 0.07525249728001654,
154
+ "epoch": 1.1111111111111112,
155
+ "grad_norm": 0.13188883662223816,
156
+ "learning_rate": 7.265675721386285e-05,
157
+ "loss": 0.0728,
158
+ "mean_token_accuracy": 0.9810096868872642,
159
+ "num_tokens": 6123905.0,
160
+ "step": 750
161
+ },
162
+ {
163
+ "entropy": 0.07601501471363008,
164
+ "epoch": 1.1851851851851851,
165
+ "grad_norm": 0.0819055363535881,
166
+ "learning_rate": 6.902327958623736e-05,
167
+ "loss": 0.0736,
168
+ "mean_token_accuracy": 0.9805573572218418,
169
+ "num_tokens": 6532143.0,
170
+ "step": 800
171
+ },
172
+ {
173
+ "entropy": 0.07429057988338172,
174
+ "epoch": 1.2592592592592593,
175
+ "grad_norm": 0.09344803541898727,
176
+ "learning_rate": 6.526818037306228e-05,
177
+ "loss": 0.0727,
178
+ "mean_token_accuracy": 0.9813062380254268,
179
+ "num_tokens": 6940424.0,
180
+ "step": 850
181
+ },
182
+ {
183
+ "entropy": 0.07440330957062542,
184
+ "epoch": 1.3333333333333333,
185
+ "grad_norm": 0.11321987956762314,
186
+ "learning_rate": 6.14154670604355e-05,
187
+ "loss": 0.0725,
188
+ "mean_token_accuracy": 0.9811275874078274,
189
+ "num_tokens": 7348719.0,
190
+ "step": 900
191
+ },
192
+ {
193
+ "entropy": 0.07393304943107068,
194
+ "epoch": 1.4074074074074074,
195
+ "grad_norm": 0.087185338139534,
196
+ "learning_rate": 5.7489771210944564e-05,
197
+ "loss": 0.0726,
198
+ "mean_token_accuracy": 0.9807305666804313,
199
+ "num_tokens": 7756710.0,
200
+ "step": 950
201
+ },
202
+ {
203
+ "entropy": 0.07433672657236456,
204
+ "epoch": 1.4814814814814814,
205
+ "grad_norm": 0.10077723860740662,
206
+ "learning_rate": 5.351619098663021e-05,
207
+ "loss": 0.0726,
208
+ "mean_token_accuracy": 0.9807634821534157,
209
+ "num_tokens": 8165027.0,
210
+ "step": 1000
211
+ }
212
+ ],
213
+ "logging_steps": 50,
214
+ "max_steps": 2025,
215
+ "num_input_tokens_seen": 0,
216
+ "num_train_epochs": 3,
217
+ "save_steps": 500,
218
+ "stateful_callbacks": {
219
+ "TrainerControl": {
220
+ "args": {
221
+ "should_epoch_stop": false,
222
+ "should_evaluate": false,
223
+ "should_log": false,
224
+ "should_save": true,
225
+ "should_training_stop": false
226
+ },
227
+ "attributes": {}
228
+ }
229
+ },
230
+ "total_flos": 3.4107496965918106e+17,
231
+ "train_batch_size": 1,
232
+ "trial_name": null,
233
+ "trial_params": null
234
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99474405bb9e12c9e6287ba75ece0944d511441013aa0f9574c0a7847af2b991
3
+ size 6225
checkpoint-1000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1500/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: bigcode/starcoder2-7b
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:bigcode/starcoder2-7b
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ ---
12
+
13
+ # Model Card for Model ID
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+
19
+ ## Model Details
20
+
21
+ ### Model Description
22
+
23
+ <!-- Provide a longer summary of what this model is. -->
24
+
25
+
26
+
27
+ - **Developed by:** [More Information Needed]
28
+ - **Funded by [optional]:** [More Information Needed]
29
+ - **Shared by [optional]:** [More Information Needed]
30
+ - **Model type:** [More Information Needed]
31
+ - **Language(s) (NLP):** [More Information Needed]
32
+ - **License:** [More Information Needed]
33
+ - **Finetuned from model [optional]:** [More Information Needed]
34
+
35
+ ### Model Sources [optional]
36
+
37
+ <!-- Provide the basic links for the model. -->
38
+
39
+ - **Repository:** [More Information Needed]
40
+ - **Paper [optional]:** [More Information Needed]
41
+ - **Demo [optional]:** [More Information Needed]
42
+
43
+ ## Uses
44
+
45
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
46
+
47
+ ### Direct Use
48
+
49
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Downstream Use [optional]
54
+
55
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
56
+
57
+ [More Information Needed]
58
+
59
+ ### Out-of-Scope Use
60
+
61
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ## Bias, Risks, and Limitations
66
+
67
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
68
+
69
+ [More Information Needed]
70
+
71
+ ### Recommendations
72
+
73
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
74
+
75
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
76
+
77
+ ## How to Get Started with the Model
78
+
79
+ Use the code below to get started with the model.
80
+
81
+ [More Information Needed]
82
+
83
+ ## Training Details
84
+
85
+ ### Training Data
86
+
87
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
88
+
89
+ [More Information Needed]
90
+
91
+ ### Training Procedure
92
+
93
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
94
+
95
+ #### Preprocessing [optional]
96
+
97
+ [More Information Needed]
98
+
99
+
100
+ #### Training Hyperparameters
101
+
102
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
103
+
104
+ #### Speeds, Sizes, Times [optional]
105
+
106
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
107
+
108
+ [More Information Needed]
109
+
110
+ ## Evaluation
111
+
112
+ <!-- This section describes the evaluation protocols and provides the results. -->
113
+
114
+ ### Testing Data, Factors & Metrics
115
+
116
+ #### Testing Data
117
+
118
+ <!-- This should link to a Dataset Card if possible. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Factors
123
+
124
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
125
+
126
+ [More Information Needed]
127
+
128
+ #### Metrics
129
+
130
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
131
+
132
+ [More Information Needed]
133
+
134
+ ### Results
135
+
136
+ [More Information Needed]
137
+
138
+ #### Summary
139
+
140
+
141
+
142
+ ## Model Examination [optional]
143
+
144
+ <!-- Relevant interpretability work for the model goes here -->
145
+
146
+ [More Information Needed]
147
+
148
+ ## Environmental Impact
149
+
150
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
151
+
152
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
153
+
154
+ - **Hardware Type:** [More Information Needed]
155
+ - **Hours used:** [More Information Needed]
156
+ - **Cloud Provider:** [More Information Needed]
157
+ - **Compute Region:** [More Information Needed]
158
+ - **Carbon Emitted:** [More Information Needed]
159
+
160
+ ## Technical Specifications [optional]
161
+
162
+ ### Model Architecture and Objective
163
+
164
+ [More Information Needed]
165
+
166
+ ### Compute Infrastructure
167
+
168
+ [More Information Needed]
169
+
170
+ #### Hardware
171
+
172
+ [More Information Needed]
173
+
174
+ #### Software
175
+
176
+ [More Information Needed]
177
+
178
+ ## Citation [optional]
179
+
180
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
181
+
182
+ **BibTeX:**
183
+
184
+ [More Information Needed]
185
+
186
+ **APA:**
187
+
188
+ [More Information Needed]
189
+
190
+ ## Glossary [optional]
191
+
192
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
193
+
194
+ [More Information Needed]
195
+
196
+ ## More Information [optional]
197
+
198
+ [More Information Needed]
199
+
200
+ ## Model Card Authors [optional]
201
+
202
+ [More Information Needed]
203
+
204
+ ## Model Card Contact
205
+
206
+ [More Information Needed]
207
+ ### Framework versions
208
+
209
+ - PEFT 0.18.0
checkpoint-1500/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "bigcode/starcoder2-7b",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 16,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "k_proj",
33
+ "v_proj",
34
+ "o_proj",
35
+ "q_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
checkpoint-1500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51440351365e4163d078c6553c2ce5af93f0ee4fd98568324dea27cbfa4f81ad
3
+ size 58754616
checkpoint-1500/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec49af2f6ddb12761771490bd6bb517536c4a3fb21021f57c9c104e2230df302
3
+ size 117660107
checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:519d2868f3fe16de7c023a3db67a6535bad4a88579318ddd99606f1ecc8e7cec
3
+ size 14645
checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b10d09adf0489af5dc688ca572941043ef06e06d7551388176d2e6574474a5a
3
+ size 1465
checkpoint-1500/special_tokens_map.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<fim_prefix>",
5
+ "<fim_middle>",
6
+ "<fim_suffix>",
7
+ "<fim_pad>",
8
+ "<repo_name>",
9
+ "<file_sep>",
10
+ "<issue_start>",
11
+ "<issue_comment>",
12
+ "<issue_closed>",
13
+ "<jupyter_start>",
14
+ "<jupyter_text>",
15
+ "<jupyter_code>",
16
+ "<jupyter_output>",
17
+ "<jupyter_script>",
18
+ "<empty_output>",
19
+ "<code_to_intermediate>",
20
+ "<intermediate_to_code>",
21
+ "<pr>",
22
+ "<pr_status>",
23
+ "<pr_is_merged>",
24
+ "<pr_base>",
25
+ "<pr_file>",
26
+ "<pr_base_code>",
27
+ "<pr_diff>",
28
+ "<pr_diff_hunk>",
29
+ "<pr_comment>",
30
+ "<pr_event_id>",
31
+ "<pr_review>",
32
+ "<pr_review_state>",
33
+ "<pr_review_comment>",
34
+ "<pr_in_reply_to_review_id>",
35
+ "<pr_in_reply_to_comment_id>",
36
+ "<pr_diff_hunk_comment_line>",
37
+ "<NAME>",
38
+ "<EMAIL>",
39
+ "<KEY>",
40
+ "<PASSWORD>"
41
+ ],
42
+ "bos_token": {
43
+ "content": "<|endoftext|>",
44
+ "lstrip": false,
45
+ "normalized": false,
46
+ "rstrip": false,
47
+ "single_word": false
48
+ },
49
+ "eos_token": {
50
+ "content": "<|endoftext|>",
51
+ "lstrip": false,
52
+ "normalized": false,
53
+ "rstrip": false,
54
+ "single_word": false
55
+ },
56
+ "unk_token": {
57
+ "content": "<|endoftext|>",
58
+ "lstrip": false,
59
+ "normalized": false,
60
+ "rstrip": false,
61
+ "single_word": false
62
+ }
63
+ }
checkpoint-1500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1500/tokenizer_config.json ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<fim_prefix>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<fim_middle>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<fim_suffix>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<fim_pad>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<repo_name>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<file_sep>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<issue_start>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_comment>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_closed>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<jupyter_start>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_text>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_code>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_output>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_script>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<empty_output>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<code_to_intermediate>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ },
140
+ "17": {
141
+ "content": "<intermediate_to_code>",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": true
147
+ },
148
+ "18": {
149
+ "content": "<pr>",
150
+ "lstrip": false,
151
+ "normalized": false,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": true
155
+ },
156
+ "19": {
157
+ "content": "<pr_status>",
158
+ "lstrip": false,
159
+ "normalized": false,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": true
163
+ },
164
+ "20": {
165
+ "content": "<pr_is_merged>",
166
+ "lstrip": false,
167
+ "normalized": false,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": true
171
+ },
172
+ "21": {
173
+ "content": "<pr_base>",
174
+ "lstrip": false,
175
+ "normalized": false,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": true
179
+ },
180
+ "22": {
181
+ "content": "<pr_file>",
182
+ "lstrip": false,
183
+ "normalized": false,
184
+ "rstrip": false,
185
+ "single_word": false,
186
+ "special": true
187
+ },
188
+ "23": {
189
+ "content": "<pr_base_code>",
190
+ "lstrip": false,
191
+ "normalized": false,
192
+ "rstrip": false,
193
+ "single_word": false,
194
+ "special": true
195
+ },
196
+ "24": {
197
+ "content": "<pr_diff>",
198
+ "lstrip": false,
199
+ "normalized": false,
200
+ "rstrip": false,
201
+ "single_word": false,
202
+ "special": true
203
+ },
204
+ "25": {
205
+ "content": "<pr_diff_hunk>",
206
+ "lstrip": false,
207
+ "normalized": false,
208
+ "rstrip": false,
209
+ "single_word": false,
210
+ "special": true
211
+ },
212
+ "26": {
213
+ "content": "<pr_comment>",
214
+ "lstrip": false,
215
+ "normalized": false,
216
+ "rstrip": false,
217
+ "single_word": false,
218
+ "special": true
219
+ },
220
+ "27": {
221
+ "content": "<pr_event_id>",
222
+ "lstrip": false,
223
+ "normalized": false,
224
+ "rstrip": false,
225
+ "single_word": false,
226
+ "special": true
227
+ },
228
+ "28": {
229
+ "content": "<pr_review>",
230
+ "lstrip": false,
231
+ "normalized": false,
232
+ "rstrip": false,
233
+ "single_word": false,
234
+ "special": true
235
+ },
236
+ "29": {
237
+ "content": "<pr_review_state>",
238
+ "lstrip": false,
239
+ "normalized": false,
240
+ "rstrip": false,
241
+ "single_word": false,
242
+ "special": true
243
+ },
244
+ "30": {
245
+ "content": "<pr_review_comment>",
246
+ "lstrip": false,
247
+ "normalized": false,
248
+ "rstrip": false,
249
+ "single_word": false,
250
+ "special": true
251
+ },
252
+ "31": {
253
+ "content": "<pr_in_reply_to_review_id>",
254
+ "lstrip": false,
255
+ "normalized": false,
256
+ "rstrip": false,
257
+ "single_word": false,
258
+ "special": true
259
+ },
260
+ "32": {
261
+ "content": "<pr_in_reply_to_comment_id>",
262
+ "lstrip": false,
263
+ "normalized": false,
264
+ "rstrip": false,
265
+ "single_word": false,
266
+ "special": true
267
+ },
268
+ "33": {
269
+ "content": "<pr_diff_hunk_comment_line>",
270
+ "lstrip": false,
271
+ "normalized": false,
272
+ "rstrip": false,
273
+ "single_word": false,
274
+ "special": true
275
+ },
276
+ "34": {
277
+ "content": "<NAME>",
278
+ "lstrip": false,
279
+ "normalized": false,
280
+ "rstrip": false,
281
+ "single_word": false,
282
+ "special": true
283
+ },
284
+ "35": {
285
+ "content": "<EMAIL>",
286
+ "lstrip": false,
287
+ "normalized": false,
288
+ "rstrip": false,
289
+ "single_word": false,
290
+ "special": true
291
+ },
292
+ "36": {
293
+ "content": "<KEY>",
294
+ "lstrip": false,
295
+ "normalized": false,
296
+ "rstrip": false,
297
+ "single_word": false,
298
+ "special": true
299
+ },
300
+ "37": {
301
+ "content": "<PASSWORD>",
302
+ "lstrip": false,
303
+ "normalized": false,
304
+ "rstrip": false,
305
+ "single_word": false,
306
+ "special": true
307
+ }
308
+ },
309
+ "additional_special_tokens": [
310
+ "<|endoftext|>",
311
+ "<fim_prefix>",
312
+ "<fim_middle>",
313
+ "<fim_suffix>",
314
+ "<fim_pad>",
315
+ "<repo_name>",
316
+ "<file_sep>",
317
+ "<issue_start>",
318
+ "<issue_comment>",
319
+ "<issue_closed>",
320
+ "<jupyter_start>",
321
+ "<jupyter_text>",
322
+ "<jupyter_code>",
323
+ "<jupyter_output>",
324
+ "<jupyter_script>",
325
+ "<empty_output>",
326
+ "<code_to_intermediate>",
327
+ "<intermediate_to_code>",
328
+ "<pr>",
329
+ "<pr_status>",
330
+ "<pr_is_merged>",
331
+ "<pr_base>",
332
+ "<pr_file>",
333
+ "<pr_base_code>",
334
+ "<pr_diff>",
335
+ "<pr_diff_hunk>",
336
+ "<pr_comment>",
337
+ "<pr_event_id>",
338
+ "<pr_review>",
339
+ "<pr_review_state>",
340
+ "<pr_review_comment>",
341
+ "<pr_in_reply_to_review_id>",
342
+ "<pr_in_reply_to_comment_id>",
343
+ "<pr_diff_hunk_comment_line>",
344
+ "<NAME>",
345
+ "<EMAIL>",
346
+ "<KEY>",
347
+ "<PASSWORD>"
348
+ ],
349
+ "bos_token": "<|endoftext|>",
350
+ "clean_up_tokenization_spaces": true,
351
+ "eos_token": "<|endoftext|>",
352
+ "extra_special_tokens": {},
353
+ "model_max_length": 1000000000000000019884624838656,
354
+ "tokenizer_class": "GPT2Tokenizer",
355
+ "unk_token": "<|endoftext|>",
356
+ "vocab_size": 49152
357
+ }
checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 2.2222222222222223,
6
+ "eval_steps": 500,
7
+ "global_step": 1500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 1.805763495862484,
14
+ "epoch": 0.07407407407407407,
15
+ "grad_norm": 0.6806204319000244,
16
+ "learning_rate": 8.032786885245902e-05,
17
+ "loss": 1.5484,
18
+ "mean_token_accuracy": 0.6665007689595223,
19
+ "num_tokens": 408149.0,
20
+ "step": 50
21
+ },
22
+ {
23
+ "entropy": 0.41144788280129435,
24
+ "epoch": 0.14814814814814814,
25
+ "grad_norm": 0.38455039262771606,
26
+ "learning_rate": 9.990765991730485e-05,
27
+ "loss": 0.3321,
28
+ "mean_token_accuracy": 0.9129975068569184,
29
+ "num_tokens": 816230.0,
30
+ "step": 100
31
+ },
32
+ {
33
+ "entropy": 0.16323913749307395,
34
+ "epoch": 0.2222222222222222,
35
+ "grad_norm": 0.29704713821411133,
36
+ "learning_rate": 9.950545603782162e-05,
37
+ "loss": 0.1447,
38
+ "mean_token_accuracy": 0.9618216013908386,
39
+ "num_tokens": 1224471.0,
40
+ "step": 150
41
+ },
42
+ {
43
+ "entropy": 0.1175146003998816,
44
+ "epoch": 0.2962962962962963,
45
+ "grad_norm": 0.35487300157546997,
46
+ "learning_rate": 9.878674879048427e-05,
47
+ "loss": 0.1071,
48
+ "mean_token_accuracy": 0.9733556269109249,
49
+ "num_tokens": 1632497.0,
50
+ "step": 200
51
+ },
52
+ {
53
+ "entropy": 0.1009879010822624,
54
+ "epoch": 0.37037037037037035,
55
+ "grad_norm": 0.17419321835041046,
56
+ "learning_rate": 9.775613308830824e-05,
57
+ "loss": 0.0925,
58
+ "mean_token_accuracy": 0.9769376286864281,
59
+ "num_tokens": 2041392.0,
60
+ "step": 250
61
+ },
62
+ {
63
+ "entropy": 0.09154447751119733,
64
+ "epoch": 0.4444444444444444,
65
+ "grad_norm": 0.20543242990970612,
66
+ "learning_rate": 9.642019796948866e-05,
67
+ "loss": 0.0836,
68
+ "mean_token_accuracy": 0.9787026332318782,
69
+ "num_tokens": 2450311.0,
70
+ "step": 300
71
+ },
72
+ {
73
+ "entropy": 0.08632300381548702,
74
+ "epoch": 0.5185185185185185,
75
+ "grad_norm": 0.17172595858573914,
76
+ "learning_rate": 9.478748447168449e-05,
77
+ "loss": 0.0812,
78
+ "mean_token_accuracy": 0.9789653661847114,
79
+ "num_tokens": 2858744.0,
80
+ "step": 350
81
+ },
82
+ {
83
+ "entropy": 0.08412999271415174,
84
+ "epoch": 0.5925925925925926,
85
+ "grad_norm": 0.1447569578886032,
86
+ "learning_rate": 9.28684310265789e-05,
87
+ "loss": 0.0805,
88
+ "mean_token_accuracy": 0.9786932443082332,
89
+ "num_tokens": 3265542.0,
90
+ "step": 400
91
+ },
92
+ {
93
+ "entropy": 0.08065679710358381,
94
+ "epoch": 0.6666666666666666,
95
+ "grad_norm": 0.19630704820156097,
96
+ "learning_rate": 9.067530672382544e-05,
97
+ "loss": 0.0773,
98
+ "mean_token_accuracy": 0.9797722736001014,
99
+ "num_tokens": 3674162.0,
100
+ "step": 450
101
+ },
102
+ {
103
+ "entropy": 0.07874332463368773,
104
+ "epoch": 0.7407407407407407,
105
+ "grad_norm": 0.08524929732084274,
106
+ "learning_rate": 8.822213287104348e-05,
107
+ "loss": 0.0762,
108
+ "mean_token_accuracy": 0.9801681047677994,
109
+ "num_tokens": 4082734.0,
110
+ "step": 500
111
+ },
112
+ {
113
+ "entropy": 0.07778633100911975,
114
+ "epoch": 0.8148148148148148,
115
+ "grad_norm": 0.10848797112703323,
116
+ "learning_rate": 8.552459335135381e-05,
117
+ "loss": 0.0753,
118
+ "mean_token_accuracy": 0.9801766823232174,
119
+ "num_tokens": 4491115.0,
120
+ "step": 550
121
+ },
122
+ {
123
+ "entropy": 0.07791180345229805,
124
+ "epoch": 0.8888888888888888,
125
+ "grad_norm": 0.12547598779201508,
126
+ "learning_rate": 8.259993435156559e-05,
127
+ "loss": 0.0752,
128
+ "mean_token_accuracy": 0.9802092918753624,
129
+ "num_tokens": 4899794.0,
130
+ "step": 600
131
+ },
132
+ {
133
+ "entropy": 0.07790746555663645,
134
+ "epoch": 0.9629629629629629,
135
+ "grad_norm": 0.0992884486913681,
136
+ "learning_rate": 7.946685410208296e-05,
137
+ "loss": 0.0759,
138
+ "mean_token_accuracy": 0.979848040342331,
139
+ "num_tokens": 5307342.0,
140
+ "step": 650
141
+ },
142
+ {
143
+ "entropy": 0.07524958597496152,
144
+ "epoch": 1.037037037037037,
145
+ "grad_norm": 0.086652472615242,
146
+ "learning_rate": 7.614538333345735e-05,
147
+ "loss": 0.0731,
148
+ "mean_token_accuracy": 0.9808213406801224,
149
+ "num_tokens": 5716156.0,
150
+ "step": 700
151
+ },
152
+ {
153
+ "entropy": 0.07525249728001654,
154
+ "epoch": 1.1111111111111112,
155
+ "grad_norm": 0.13188883662223816,
156
+ "learning_rate": 7.265675721386285e-05,
157
+ "loss": 0.0728,
158
+ "mean_token_accuracy": 0.9810096868872642,
159
+ "num_tokens": 6123905.0,
160
+ "step": 750
161
+ },
162
+ {
163
+ "entropy": 0.07601501471363008,
164
+ "epoch": 1.1851851851851851,
165
+ "grad_norm": 0.0819055363535881,
166
+ "learning_rate": 6.902327958623736e-05,
167
+ "loss": 0.0736,
168
+ "mean_token_accuracy": 0.9805573572218418,
169
+ "num_tokens": 6532143.0,
170
+ "step": 800
171
+ },
172
+ {
173
+ "entropy": 0.07429057988338172,
174
+ "epoch": 1.2592592592592593,
175
+ "grad_norm": 0.09344803541898727,
176
+ "learning_rate": 6.526818037306228e-05,
177
+ "loss": 0.0727,
178
+ "mean_token_accuracy": 0.9813062380254268,
179
+ "num_tokens": 6940424.0,
180
+ "step": 850
181
+ },
182
+ {
183
+ "entropy": 0.07440330957062542,
184
+ "epoch": 1.3333333333333333,
185
+ "grad_norm": 0.11321987956762314,
186
+ "learning_rate": 6.14154670604355e-05,
187
+ "loss": 0.0725,
188
+ "mean_token_accuracy": 0.9811275874078274,
189
+ "num_tokens": 7348719.0,
190
+ "step": 900
191
+ },
192
+ {
193
+ "entropy": 0.07393304943107068,
194
+ "epoch": 1.4074074074074074,
195
+ "grad_norm": 0.087185338139534,
196
+ "learning_rate": 5.7489771210944564e-05,
197
+ "loss": 0.0726,
198
+ "mean_token_accuracy": 0.9807305666804313,
199
+ "num_tokens": 7756710.0,
200
+ "step": 950
201
+ },
202
+ {
203
+ "entropy": 0.07433672657236456,
204
+ "epoch": 1.4814814814814814,
205
+ "grad_norm": 0.10077723860740662,
206
+ "learning_rate": 5.351619098663021e-05,
207
+ "loss": 0.0726,
208
+ "mean_token_accuracy": 0.9807634821534157,
209
+ "num_tokens": 8165027.0,
210
+ "step": 1000
211
+ },
212
+ {
213
+ "entropy": 0.07262381819076837,
214
+ "epoch": 1.5555555555555556,
215
+ "grad_norm": 0.05774468928575516,
216
+ "learning_rate": 4.952013068883795e-05,
217
+ "loss": 0.0717,
218
+ "mean_token_accuracy": 0.9813279174268246,
219
+ "num_tokens": 8573428.0,
220
+ "step": 1050
221
+ },
222
+ {
223
+ "entropy": 0.07313465082086623,
224
+ "epoch": 1.6296296296296298,
225
+ "grad_norm": 0.156134694814682,
226
+ "learning_rate": 4.5527138340828776e-05,
227
+ "loss": 0.0717,
228
+ "mean_token_accuracy": 0.9811334984004497,
229
+ "num_tokens": 8981661.0,
230
+ "step": 1100
231
+ },
232
+ {
233
+ "entropy": 0.07226949028670787,
234
+ "epoch": 1.7037037037037037,
235
+ "grad_norm": 0.060666773468256,
236
+ "learning_rate": 4.156274235153189e-05,
237
+ "loss": 0.071,
238
+ "mean_token_accuracy": 0.9813579262793064,
239
+ "num_tokens": 9390026.0,
240
+ "step": 1150
241
+ },
242
+ {
243
+ "entropy": 0.07272680706344545,
244
+ "epoch": 1.7777777777777777,
245
+ "grad_norm": 0.10639354586601257,
246
+ "learning_rate": 3.765228830469794e-05,
247
+ "loss": 0.0711,
248
+ "mean_token_accuracy": 0.981099860817194,
249
+ "num_tokens": 9798433.0,
250
+ "step": 1200
251
+ },
252
+ {
253
+ "entropy": 0.07212639102712273,
254
+ "epoch": 1.8518518518518519,
255
+ "grad_norm": 0.06637933105230331,
256
+ "learning_rate": 3.3820776916908857e-05,
257
+ "loss": 0.0711,
258
+ "mean_token_accuracy": 0.9812221045792103,
259
+ "num_tokens": 10206854.0,
260
+ "step": 1250
261
+ },
262
+ {
263
+ "entropy": 0.07320461552590132,
264
+ "epoch": 1.925925925925926,
265
+ "grad_norm": 0.10055620223283768,
266
+ "learning_rate": 3.0092704200428058e-05,
267
+ "loss": 0.0719,
268
+ "mean_token_accuracy": 0.9807316599786282,
269
+ "num_tokens": 10615186.0,
270
+ "step": 1300
271
+ },
272
+ {
273
+ "entropy": 0.07290558220818638,
274
+ "epoch": 2.0,
275
+ "grad_norm": 0.0677887499332428,
276
+ "learning_rate": 2.649190485277792e-05,
277
+ "loss": 0.0718,
278
+ "mean_token_accuracy": 0.9811730526387692,
279
+ "num_tokens": 11023650.0,
280
+ "step": 1350
281
+ },
282
+ {
283
+ "entropy": 0.07251124914735556,
284
+ "epoch": 2.074074074074074,
285
+ "grad_norm": 0.11045810580253601,
286
+ "learning_rate": 2.3041399874302905e-05,
287
+ "loss": 0.0709,
288
+ "mean_token_accuracy": 0.9809805656969547,
289
+ "num_tokens": 11430802.0,
290
+ "step": 1400
291
+ },
292
+ {
293
+ "entropy": 0.07181317125447094,
294
+ "epoch": 2.148148148148148,
295
+ "grad_norm": 0.07149960100650787,
296
+ "learning_rate": 1.976324938794482e-05,
297
+ "loss": 0.0708,
298
+ "mean_token_accuracy": 0.9813783176243305,
299
+ "num_tokens": 11839809.0,
300
+ "step": 1450
301
+ },
302
+ {
303
+ "entropy": 0.0717768538929522,
304
+ "epoch": 2.2222222222222223,
305
+ "grad_norm": 0.05869750306010246,
306
+ "learning_rate": 1.667841160219835e-05,
307
+ "loss": 0.0707,
308
+ "mean_token_accuracy": 0.9814658354222775,
309
+ "num_tokens": 12248612.0,
310
+ "step": 1500
311
+ }
312
+ ],
313
+ "logging_steps": 50,
314
+ "max_steps": 2025,
315
+ "num_input_tokens_seen": 0,
316
+ "num_train_epochs": 3,
317
+ "save_steps": 500,
318
+ "stateful_callbacks": {
319
+ "TrainerControl": {
320
+ "args": {
321
+ "should_epoch_stop": false,
322
+ "should_evaluate": false,
323
+ "should_log": false,
324
+ "should_save": true,
325
+ "should_training_stop": false
326
+ },
327
+ "attributes": {}
328
+ }
329
+ },
330
+ "total_flos": 5.1165721390352794e+17,
331
+ "train_batch_size": 1,
332
+ "trial_name": null,
333
+ "trial_params": null
334
+ }
checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99474405bb9e12c9e6287ba75ece0944d511441013aa0f9574c0a7847af2b991
3
+ size 6225
checkpoint-1500/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: bigcode/starcoder2-7b
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:bigcode/starcoder2-7b
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ ---
12
+
13
+ # Model Card for Model ID
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+
19
+ ## Model Details
20
+
21
+ ### Model Description
22
+
23
+ <!-- Provide a longer summary of what this model is. -->
24
+
25
+
26
+
27
+ - **Developed by:** [More Information Needed]
28
+ - **Funded by [optional]:** [More Information Needed]
29
+ - **Shared by [optional]:** [More Information Needed]
30
+ - **Model type:** [More Information Needed]
31
+ - **Language(s) (NLP):** [More Information Needed]
32
+ - **License:** [More Information Needed]
33
+ - **Finetuned from model [optional]:** [More Information Needed]
34
+
35
+ ### Model Sources [optional]
36
+
37
+ <!-- Provide the basic links for the model. -->
38
+
39
+ - **Repository:** [More Information Needed]
40
+ - **Paper [optional]:** [More Information Needed]
41
+ - **Demo [optional]:** [More Information Needed]
42
+
43
+ ## Uses
44
+
45
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
46
+
47
+ ### Direct Use
48
+
49
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Downstream Use [optional]
54
+
55
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
56
+
57
+ [More Information Needed]
58
+
59
+ ### Out-of-Scope Use
60
+
61
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ## Bias, Risks, and Limitations
66
+
67
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
68
+
69
+ [More Information Needed]
70
+
71
+ ### Recommendations
72
+
73
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
74
+
75
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
76
+
77
+ ## How to Get Started with the Model
78
+
79
+ Use the code below to get started with the model.
80
+
81
+ [More Information Needed]
82
+
83
+ ## Training Details
84
+
85
+ ### Training Data
86
+
87
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
88
+
89
+ [More Information Needed]
90
+
91
+ ### Training Procedure
92
+
93
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
94
+
95
+ #### Preprocessing [optional]
96
+
97
+ [More Information Needed]
98
+
99
+
100
+ #### Training Hyperparameters
101
+
102
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
103
+
104
+ #### Speeds, Sizes, Times [optional]
105
+
106
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
107
+
108
+ [More Information Needed]
109
+
110
+ ## Evaluation
111
+
112
+ <!-- This section describes the evaluation protocols and provides the results. -->
113
+
114
+ ### Testing Data, Factors & Metrics
115
+
116
+ #### Testing Data
117
+
118
+ <!-- This should link to a Dataset Card if possible. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Factors
123
+
124
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
125
+
126
+ [More Information Needed]
127
+
128
+ #### Metrics
129
+
130
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
131
+
132
+ [More Information Needed]
133
+
134
+ ### Results
135
+
136
+ [More Information Needed]
137
+
138
+ #### Summary
139
+
140
+
141
+
142
+ ## Model Examination [optional]
143
+
144
+ <!-- Relevant interpretability work for the model goes here -->
145
+
146
+ [More Information Needed]
147
+
148
+ ## Environmental Impact
149
+
150
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
151
+
152
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
153
+
154
+ - **Hardware Type:** [More Information Needed]
155
+ - **Hours used:** [More Information Needed]
156
+ - **Cloud Provider:** [More Information Needed]
157
+ - **Compute Region:** [More Information Needed]
158
+ - **Carbon Emitted:** [More Information Needed]
159
+
160
+ ## Technical Specifications [optional]
161
+
162
+ ### Model Architecture and Objective
163
+
164
+ [More Information Needed]
165
+
166
+ ### Compute Infrastructure
167
+
168
+ [More Information Needed]
169
+
170
+ #### Hardware
171
+
172
+ [More Information Needed]
173
+
174
+ #### Software
175
+
176
+ [More Information Needed]
177
+
178
+ ## Citation [optional]
179
+
180
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
181
+
182
+ **BibTeX:**
183
+
184
+ [More Information Needed]
185
+
186
+ **APA:**
187
+
188
+ [More Information Needed]
189
+
190
+ ## Glossary [optional]
191
+
192
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
193
+
194
+ [More Information Needed]
195
+
196
+ ## More Information [optional]
197
+
198
+ [More Information Needed]
199
+
200
+ ## Model Card Authors [optional]
201
+
202
+ [More Information Needed]
203
+
204
+ ## Model Card Contact
205
+
206
+ [More Information Needed]
207
+ ### Framework versions
208
+
209
+ - PEFT 0.18.0
checkpoint-2000/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "bigcode/starcoder2-7b",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 16,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "k_proj",
33
+ "v_proj",
34
+ "o_proj",
35
+ "q_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
checkpoint-2000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53788087d95ab47ebd4d9d954a183a37247ae189fa7fe014e4d2b77153e50819
3
+ size 58754616
checkpoint-2000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a6b49f0449304a1a162741c5817dd937ed429729770ff1c516073c81d70ef17
3
+ size 117660107
checkpoint-2000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:334838e136c1a65b2f6261b884ef1f869ed891021ed06ed0a4c6c7f127147cfa
3
+ size 14645
checkpoint-2000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:591d55d90adc2dbaeabd46529428d7ba9aa1ba92348998be2f0a794eeb555bea
3
+ size 1465
checkpoint-2000/special_tokens_map.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<fim_prefix>",
5
+ "<fim_middle>",
6
+ "<fim_suffix>",
7
+ "<fim_pad>",
8
+ "<repo_name>",
9
+ "<file_sep>",
10
+ "<issue_start>",
11
+ "<issue_comment>",
12
+ "<issue_closed>",
13
+ "<jupyter_start>",
14
+ "<jupyter_text>",
15
+ "<jupyter_code>",
16
+ "<jupyter_output>",
17
+ "<jupyter_script>",
18
+ "<empty_output>",
19
+ "<code_to_intermediate>",
20
+ "<intermediate_to_code>",
21
+ "<pr>",
22
+ "<pr_status>",
23
+ "<pr_is_merged>",
24
+ "<pr_base>",
25
+ "<pr_file>",
26
+ "<pr_base_code>",
27
+ "<pr_diff>",
28
+ "<pr_diff_hunk>",
29
+ "<pr_comment>",
30
+ "<pr_event_id>",
31
+ "<pr_review>",
32
+ "<pr_review_state>",
33
+ "<pr_review_comment>",
34
+ "<pr_in_reply_to_review_id>",
35
+ "<pr_in_reply_to_comment_id>",
36
+ "<pr_diff_hunk_comment_line>",
37
+ "<NAME>",
38
+ "<EMAIL>",
39
+ "<KEY>",
40
+ "<PASSWORD>"
41
+ ],
42
+ "bos_token": {
43
+ "content": "<|endoftext|>",
44
+ "lstrip": false,
45
+ "normalized": false,
46
+ "rstrip": false,
47
+ "single_word": false
48
+ },
49
+ "eos_token": {
50
+ "content": "<|endoftext|>",
51
+ "lstrip": false,
52
+ "normalized": false,
53
+ "rstrip": false,
54
+ "single_word": false
55
+ },
56
+ "unk_token": {
57
+ "content": "<|endoftext|>",
58
+ "lstrip": false,
59
+ "normalized": false,
60
+ "rstrip": false,
61
+ "single_word": false
62
+ }
63
+ }
checkpoint-2000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/tokenizer_config.json ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<fim_prefix>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<fim_middle>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<fim_suffix>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<fim_pad>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<repo_name>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<file_sep>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<issue_start>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_comment>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_closed>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<jupyter_start>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_text>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_code>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_output>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_script>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<empty_output>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<code_to_intermediate>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ },
140
+ "17": {
141
+ "content": "<intermediate_to_code>",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": true
147
+ },
148
+ "18": {
149
+ "content": "<pr>",
150
+ "lstrip": false,
151
+ "normalized": false,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": true
155
+ },
156
+ "19": {
157
+ "content": "<pr_status>",
158
+ "lstrip": false,
159
+ "normalized": false,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": true
163
+ },
164
+ "20": {
165
+ "content": "<pr_is_merged>",
166
+ "lstrip": false,
167
+ "normalized": false,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": true
171
+ },
172
+ "21": {
173
+ "content": "<pr_base>",
174
+ "lstrip": false,
175
+ "normalized": false,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": true
179
+ },
180
+ "22": {
181
+ "content": "<pr_file>",
182
+ "lstrip": false,
183
+ "normalized": false,
184
+ "rstrip": false,
185
+ "single_word": false,
186
+ "special": true
187
+ },
188
+ "23": {
189
+ "content": "<pr_base_code>",
190
+ "lstrip": false,
191
+ "normalized": false,
192
+ "rstrip": false,
193
+ "single_word": false,
194
+ "special": true
195
+ },
196
+ "24": {
197
+ "content": "<pr_diff>",
198
+ "lstrip": false,
199
+ "normalized": false,
200
+ "rstrip": false,
201
+ "single_word": false,
202
+ "special": true
203
+ },
204
+ "25": {
205
+ "content": "<pr_diff_hunk>",
206
+ "lstrip": false,
207
+ "normalized": false,
208
+ "rstrip": false,
209
+ "single_word": false,
210
+ "special": true
211
+ },
212
+ "26": {
213
+ "content": "<pr_comment>",
214
+ "lstrip": false,
215
+ "normalized": false,
216
+ "rstrip": false,
217
+ "single_word": false,
218
+ "special": true
219
+ },
220
+ "27": {
221
+ "content": "<pr_event_id>",
222
+ "lstrip": false,
223
+ "normalized": false,
224
+ "rstrip": false,
225
+ "single_word": false,
226
+ "special": true
227
+ },
228
+ "28": {
229
+ "content": "<pr_review>",
230
+ "lstrip": false,
231
+ "normalized": false,
232
+ "rstrip": false,
233
+ "single_word": false,
234
+ "special": true
235
+ },
236
+ "29": {
237
+ "content": "<pr_review_state>",
238
+ "lstrip": false,
239
+ "normalized": false,
240
+ "rstrip": false,
241
+ "single_word": false,
242
+ "special": true
243
+ },
244
+ "30": {
245
+ "content": "<pr_review_comment>",
246
+ "lstrip": false,
247
+ "normalized": false,
248
+ "rstrip": false,
249
+ "single_word": false,
250
+ "special": true
251
+ },
252
+ "31": {
253
+ "content": "<pr_in_reply_to_review_id>",
254
+ "lstrip": false,
255
+ "normalized": false,
256
+ "rstrip": false,
257
+ "single_word": false,
258
+ "special": true
259
+ },
260
+ "32": {
261
+ "content": "<pr_in_reply_to_comment_id>",
262
+ "lstrip": false,
263
+ "normalized": false,
264
+ "rstrip": false,
265
+ "single_word": false,
266
+ "special": true
267
+ },
268
+ "33": {
269
+ "content": "<pr_diff_hunk_comment_line>",
270
+ "lstrip": false,
271
+ "normalized": false,
272
+ "rstrip": false,
273
+ "single_word": false,
274
+ "special": true
275
+ },
276
+ "34": {
277
+ "content": "<NAME>",
278
+ "lstrip": false,
279
+ "normalized": false,
280
+ "rstrip": false,
281
+ "single_word": false,
282
+ "special": true
283
+ },
284
+ "35": {
285
+ "content": "<EMAIL>",
286
+ "lstrip": false,
287
+ "normalized": false,
288
+ "rstrip": false,
289
+ "single_word": false,
290
+ "special": true
291
+ },
292
+ "36": {
293
+ "content": "<KEY>",
294
+ "lstrip": false,
295
+ "normalized": false,
296
+ "rstrip": false,
297
+ "single_word": false,
298
+ "special": true
299
+ },
300
+ "37": {
301
+ "content": "<PASSWORD>",
302
+ "lstrip": false,
303
+ "normalized": false,
304
+ "rstrip": false,
305
+ "single_word": false,
306
+ "special": true
307
+ }
308
+ },
309
+ "additional_special_tokens": [
310
+ "<|endoftext|>",
311
+ "<fim_prefix>",
312
+ "<fim_middle>",
313
+ "<fim_suffix>",
314
+ "<fim_pad>",
315
+ "<repo_name>",
316
+ "<file_sep>",
317
+ "<issue_start>",
318
+ "<issue_comment>",
319
+ "<issue_closed>",
320
+ "<jupyter_start>",
321
+ "<jupyter_text>",
322
+ "<jupyter_code>",
323
+ "<jupyter_output>",
324
+ "<jupyter_script>",
325
+ "<empty_output>",
326
+ "<code_to_intermediate>",
327
+ "<intermediate_to_code>",
328
+ "<pr>",
329
+ "<pr_status>",
330
+ "<pr_is_merged>",
331
+ "<pr_base>",
332
+ "<pr_file>",
333
+ "<pr_base_code>",
334
+ "<pr_diff>",
335
+ "<pr_diff_hunk>",
336
+ "<pr_comment>",
337
+ "<pr_event_id>",
338
+ "<pr_review>",
339
+ "<pr_review_state>",
340
+ "<pr_review_comment>",
341
+ "<pr_in_reply_to_review_id>",
342
+ "<pr_in_reply_to_comment_id>",
343
+ "<pr_diff_hunk_comment_line>",
344
+ "<NAME>",
345
+ "<EMAIL>",
346
+ "<KEY>",
347
+ "<PASSWORD>"
348
+ ],
349
+ "bos_token": "<|endoftext|>",
350
+ "clean_up_tokenization_spaces": true,
351
+ "eos_token": "<|endoftext|>",
352
+ "extra_special_tokens": {},
353
+ "model_max_length": 1000000000000000019884624838656,
354
+ "tokenizer_class": "GPT2Tokenizer",
355
+ "unk_token": "<|endoftext|>",
356
+ "vocab_size": 49152
357
+ }
checkpoint-2000/trainer_state.json ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 2.962962962962963,
6
+ "eval_steps": 500,
7
+ "global_step": 2000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 1.805763495862484,
14
+ "epoch": 0.07407407407407407,
15
+ "grad_norm": 0.6806204319000244,
16
+ "learning_rate": 8.032786885245902e-05,
17
+ "loss": 1.5484,
18
+ "mean_token_accuracy": 0.6665007689595223,
19
+ "num_tokens": 408149.0,
20
+ "step": 50
21
+ },
22
+ {
23
+ "entropy": 0.41144788280129435,
24
+ "epoch": 0.14814814814814814,
25
+ "grad_norm": 0.38455039262771606,
26
+ "learning_rate": 9.990765991730485e-05,
27
+ "loss": 0.3321,
28
+ "mean_token_accuracy": 0.9129975068569184,
29
+ "num_tokens": 816230.0,
30
+ "step": 100
31
+ },
32
+ {
33
+ "entropy": 0.16323913749307395,
34
+ "epoch": 0.2222222222222222,
35
+ "grad_norm": 0.29704713821411133,
36
+ "learning_rate": 9.950545603782162e-05,
37
+ "loss": 0.1447,
38
+ "mean_token_accuracy": 0.9618216013908386,
39
+ "num_tokens": 1224471.0,
40
+ "step": 150
41
+ },
42
+ {
43
+ "entropy": 0.1175146003998816,
44
+ "epoch": 0.2962962962962963,
45
+ "grad_norm": 0.35487300157546997,
46
+ "learning_rate": 9.878674879048427e-05,
47
+ "loss": 0.1071,
48
+ "mean_token_accuracy": 0.9733556269109249,
49
+ "num_tokens": 1632497.0,
50
+ "step": 200
51
+ },
52
+ {
53
+ "entropy": 0.1009879010822624,
54
+ "epoch": 0.37037037037037035,
55
+ "grad_norm": 0.17419321835041046,
56
+ "learning_rate": 9.775613308830824e-05,
57
+ "loss": 0.0925,
58
+ "mean_token_accuracy": 0.9769376286864281,
59
+ "num_tokens": 2041392.0,
60
+ "step": 250
61
+ },
62
+ {
63
+ "entropy": 0.09154447751119733,
64
+ "epoch": 0.4444444444444444,
65
+ "grad_norm": 0.20543242990970612,
66
+ "learning_rate": 9.642019796948866e-05,
67
+ "loss": 0.0836,
68
+ "mean_token_accuracy": 0.9787026332318782,
69
+ "num_tokens": 2450311.0,
70
+ "step": 300
71
+ },
72
+ {
73
+ "entropy": 0.08632300381548702,
74
+ "epoch": 0.5185185185185185,
75
+ "grad_norm": 0.17172595858573914,
76
+ "learning_rate": 9.478748447168449e-05,
77
+ "loss": 0.0812,
78
+ "mean_token_accuracy": 0.9789653661847114,
79
+ "num_tokens": 2858744.0,
80
+ "step": 350
81
+ },
82
+ {
83
+ "entropy": 0.08412999271415174,
84
+ "epoch": 0.5925925925925926,
85
+ "grad_norm": 0.1447569578886032,
86
+ "learning_rate": 9.28684310265789e-05,
87
+ "loss": 0.0805,
88
+ "mean_token_accuracy": 0.9786932443082332,
89
+ "num_tokens": 3265542.0,
90
+ "step": 400
91
+ },
92
+ {
93
+ "entropy": 0.08065679710358381,
94
+ "epoch": 0.6666666666666666,
95
+ "grad_norm": 0.19630704820156097,
96
+ "learning_rate": 9.067530672382544e-05,
97
+ "loss": 0.0773,
98
+ "mean_token_accuracy": 0.9797722736001014,
99
+ "num_tokens": 3674162.0,
100
+ "step": 450
101
+ },
102
+ {
103
+ "entropy": 0.07874332463368773,
104
+ "epoch": 0.7407407407407407,
105
+ "grad_norm": 0.08524929732084274,
106
+ "learning_rate": 8.822213287104348e-05,
107
+ "loss": 0.0762,
108
+ "mean_token_accuracy": 0.9801681047677994,
109
+ "num_tokens": 4082734.0,
110
+ "step": 500
111
+ },
112
+ {
113
+ "entropy": 0.07778633100911975,
114
+ "epoch": 0.8148148148148148,
115
+ "grad_norm": 0.10848797112703323,
116
+ "learning_rate": 8.552459335135381e-05,
117
+ "loss": 0.0753,
118
+ "mean_token_accuracy": 0.9801766823232174,
119
+ "num_tokens": 4491115.0,
120
+ "step": 550
121
+ },
122
+ {
123
+ "entropy": 0.07791180345229805,
124
+ "epoch": 0.8888888888888888,
125
+ "grad_norm": 0.12547598779201508,
126
+ "learning_rate": 8.259993435156559e-05,
127
+ "loss": 0.0752,
128
+ "mean_token_accuracy": 0.9802092918753624,
129
+ "num_tokens": 4899794.0,
130
+ "step": 600
131
+ },
132
+ {
133
+ "entropy": 0.07790746555663645,
134
+ "epoch": 0.9629629629629629,
135
+ "grad_norm": 0.0992884486913681,
136
+ "learning_rate": 7.946685410208296e-05,
137
+ "loss": 0.0759,
138
+ "mean_token_accuracy": 0.979848040342331,
139
+ "num_tokens": 5307342.0,
140
+ "step": 650
141
+ },
142
+ {
143
+ "entropy": 0.07524958597496152,
144
+ "epoch": 1.037037037037037,
145
+ "grad_norm": 0.086652472615242,
146
+ "learning_rate": 7.614538333345735e-05,
147
+ "loss": 0.0731,
148
+ "mean_token_accuracy": 0.9808213406801224,
149
+ "num_tokens": 5716156.0,
150
+ "step": 700
151
+ },
152
+ {
153
+ "entropy": 0.07525249728001654,
154
+ "epoch": 1.1111111111111112,
155
+ "grad_norm": 0.13188883662223816,
156
+ "learning_rate": 7.265675721386285e-05,
157
+ "loss": 0.0728,
158
+ "mean_token_accuracy": 0.9810096868872642,
159
+ "num_tokens": 6123905.0,
160
+ "step": 750
161
+ },
162
+ {
163
+ "entropy": 0.07601501471363008,
164
+ "epoch": 1.1851851851851851,
165
+ "grad_norm": 0.0819055363535881,
166
+ "learning_rate": 6.902327958623736e-05,
167
+ "loss": 0.0736,
168
+ "mean_token_accuracy": 0.9805573572218418,
169
+ "num_tokens": 6532143.0,
170
+ "step": 800
171
+ },
172
+ {
173
+ "entropy": 0.07429057988338172,
174
+ "epoch": 1.2592592592592593,
175
+ "grad_norm": 0.09344803541898727,
176
+ "learning_rate": 6.526818037306228e-05,
177
+ "loss": 0.0727,
178
+ "mean_token_accuracy": 0.9813062380254268,
179
+ "num_tokens": 6940424.0,
180
+ "step": 850
181
+ },
182
+ {
183
+ "entropy": 0.07440330957062542,
184
+ "epoch": 1.3333333333333333,
185
+ "grad_norm": 0.11321987956762314,
186
+ "learning_rate": 6.14154670604355e-05,
187
+ "loss": 0.0725,
188
+ "mean_token_accuracy": 0.9811275874078274,
189
+ "num_tokens": 7348719.0,
190
+ "step": 900
191
+ },
192
+ {
193
+ "entropy": 0.07393304943107068,
194
+ "epoch": 1.4074074074074074,
195
+ "grad_norm": 0.087185338139534,
196
+ "learning_rate": 5.7489771210944564e-05,
197
+ "loss": 0.0726,
198
+ "mean_token_accuracy": 0.9807305666804313,
199
+ "num_tokens": 7756710.0,
200
+ "step": 950
201
+ },
202
+ {
203
+ "entropy": 0.07433672657236456,
204
+ "epoch": 1.4814814814814814,
205
+ "grad_norm": 0.10077723860740662,
206
+ "learning_rate": 5.351619098663021e-05,
207
+ "loss": 0.0726,
208
+ "mean_token_accuracy": 0.9807634821534157,
209
+ "num_tokens": 8165027.0,
210
+ "step": 1000
211
+ },
212
+ {
213
+ "entropy": 0.07262381819076837,
214
+ "epoch": 1.5555555555555556,
215
+ "grad_norm": 0.05774468928575516,
216
+ "learning_rate": 4.952013068883795e-05,
217
+ "loss": 0.0717,
218
+ "mean_token_accuracy": 0.9813279174268246,
219
+ "num_tokens": 8573428.0,
220
+ "step": 1050
221
+ },
222
+ {
223
+ "entropy": 0.07313465082086623,
224
+ "epoch": 1.6296296296296298,
225
+ "grad_norm": 0.156134694814682,
226
+ "learning_rate": 4.5527138340828776e-05,
227
+ "loss": 0.0717,
228
+ "mean_token_accuracy": 0.9811334984004497,
229
+ "num_tokens": 8981661.0,
230
+ "step": 1100
231
+ },
232
+ {
233
+ "entropy": 0.07226949028670787,
234
+ "epoch": 1.7037037037037037,
235
+ "grad_norm": 0.060666773468256,
236
+ "learning_rate": 4.156274235153189e-05,
237
+ "loss": 0.071,
238
+ "mean_token_accuracy": 0.9813579262793064,
239
+ "num_tokens": 9390026.0,
240
+ "step": 1150
241
+ },
242
+ {
243
+ "entropy": 0.07272680706344545,
244
+ "epoch": 1.7777777777777777,
245
+ "grad_norm": 0.10639354586601257,
246
+ "learning_rate": 3.765228830469794e-05,
247
+ "loss": 0.0711,
248
+ "mean_token_accuracy": 0.981099860817194,
249
+ "num_tokens": 9798433.0,
250
+ "step": 1200
251
+ },
252
+ {
253
+ "entropy": 0.07212639102712273,
254
+ "epoch": 1.8518518518518519,
255
+ "grad_norm": 0.06637933105230331,
256
+ "learning_rate": 3.3820776916908857e-05,
257
+ "loss": 0.0711,
258
+ "mean_token_accuracy": 0.9812221045792103,
259
+ "num_tokens": 10206854.0,
260
+ "step": 1250
261
+ },
262
+ {
263
+ "entropy": 0.07320461552590132,
264
+ "epoch": 1.925925925925926,
265
+ "grad_norm": 0.10055620223283768,
266
+ "learning_rate": 3.0092704200428058e-05,
267
+ "loss": 0.0719,
268
+ "mean_token_accuracy": 0.9807316599786282,
269
+ "num_tokens": 10615186.0,
270
+ "step": 1300
271
+ },
272
+ {
273
+ "entropy": 0.07290558220818638,
274
+ "epoch": 2.0,
275
+ "grad_norm": 0.0677887499332428,
276
+ "learning_rate": 2.649190485277792e-05,
277
+ "loss": 0.0718,
278
+ "mean_token_accuracy": 0.9811730526387692,
279
+ "num_tokens": 11023650.0,
280
+ "step": 1350
281
+ },
282
+ {
283
+ "entropy": 0.07251124914735556,
284
+ "epoch": 2.074074074074074,
285
+ "grad_norm": 0.11045810580253601,
286
+ "learning_rate": 2.3041399874302905e-05,
287
+ "loss": 0.0709,
288
+ "mean_token_accuracy": 0.9809805656969547,
289
+ "num_tokens": 11430802.0,
290
+ "step": 1400
291
+ },
292
+ {
293
+ "entropy": 0.07181317125447094,
294
+ "epoch": 2.148148148148148,
295
+ "grad_norm": 0.07149960100650787,
296
+ "learning_rate": 1.976324938794482e-05,
297
+ "loss": 0.0708,
298
+ "mean_token_accuracy": 0.9813783176243305,
299
+ "num_tokens": 11839809.0,
300
+ "step": 1450
301
+ },
302
+ {
303
+ "entropy": 0.0717768538929522,
304
+ "epoch": 2.2222222222222223,
305
+ "grad_norm": 0.05869750306010246,
306
+ "learning_rate": 1.667841160219835e-05,
307
+ "loss": 0.0707,
308
+ "mean_token_accuracy": 0.9814658354222775,
309
+ "num_tokens": 12248612.0,
310
+ "step": 1500
311
+ },
312
+ {
313
+ "entropy": 0.0715598820708692,
314
+ "epoch": 2.2962962962962963,
315
+ "grad_norm": 0.08496281504631042,
316
+ "learning_rate": 1.3806608818939203e-05,
317
+ "loss": 0.0705,
318
+ "mean_token_accuracy": 0.9812778060138225,
319
+ "num_tokens": 12657077.0,
320
+ "step": 1550
321
+ },
322
+ {
323
+ "entropy": 0.07179237512871622,
324
+ "epoch": 2.3703703703703702,
325
+ "grad_norm": 0.06790705770254135,
326
+ "learning_rate": 1.1166201342777438e-05,
327
+ "loss": 0.0709,
328
+ "mean_token_accuracy": 0.9812556092441082,
329
+ "num_tokens": 13064930.0,
330
+ "step": 1600
331
+ },
332
+ {
333
+ "entropy": 0.0720329173374921,
334
+ "epoch": 2.4444444444444446,
335
+ "grad_norm": 0.06569824367761612,
336
+ "learning_rate": 8.774070098071668e-06,
337
+ "loss": 0.0711,
338
+ "mean_token_accuracy": 0.9811322076618672,
339
+ "num_tokens": 13472953.0,
340
+ "step": 1650
341
+ },
342
+ {
343
+ "entropy": 0.07238605052232742,
344
+ "epoch": 2.5185185185185186,
345
+ "grad_norm": 0.05740037187933922,
346
+ "learning_rate": 6.645508704069003e-06,
347
+ "loss": 0.0712,
348
+ "mean_token_accuracy": 0.9810698322951794,
349
+ "num_tokens": 13881339.0,
350
+ "step": 1700
351
+ },
352
+ {
353
+ "entropy": 0.07082363245077432,
354
+ "epoch": 2.5925925925925926,
355
+ "grad_norm": 0.06362631171941757,
356
+ "learning_rate": 4.794125698167262e-06,
357
+ "loss": 0.0703,
358
+ "mean_token_accuracy": 0.9813959409296512,
359
+ "num_tokens": 14289567.0,
360
+ "step": 1750
361
+ },
362
+ {
363
+ "entropy": 0.07266209500841797,
364
+ "epoch": 2.6666666666666665,
365
+ "grad_norm": 0.05623815581202507,
366
+ "learning_rate": 3.231757532415458e-06,
367
+ "loss": 0.0714,
368
+ "mean_token_accuracy": 0.9809911704063415,
369
+ "num_tokens": 14697205.0,
370
+ "step": 1800
371
+ },
372
+ {
373
+ "entropy": 0.07201593144796789,
374
+ "epoch": 2.7407407407407405,
375
+ "grad_norm": 0.05804692208766937,
376
+ "learning_rate": 1.9683928994924385e-06,
377
+ "loss": 0.071,
378
+ "mean_token_accuracy": 0.9810844567418099,
379
+ "num_tokens": 15105573.0,
380
+ "step": 1850
381
+ },
382
+ {
383
+ "entropy": 0.07237200179137289,
384
+ "epoch": 2.814814814814815,
385
+ "grad_norm": 0.07103168219327927,
386
+ "learning_rate": 1.0121088719706296e-06,
387
+ "loss": 0.0715,
388
+ "mean_token_accuracy": 0.9812616856396198,
389
+ "num_tokens": 15514180.0,
390
+ "step": 1900
391
+ },
392
+ {
393
+ "entropy": 0.07117950812913477,
394
+ "epoch": 2.888888888888889,
395
+ "grad_norm": 0.062420960515737534,
396
+ "learning_rate": 3.6901926314575894e-07,
397
+ "loss": 0.0703,
398
+ "mean_token_accuracy": 0.981419977247715,
399
+ "num_tokens": 15922315.0,
400
+ "step": 1950
401
+ },
402
+ {
403
+ "entropy": 0.07066867646761238,
404
+ "epoch": 2.962962962962963,
405
+ "grad_norm": 0.05154326930642128,
406
+ "learning_rate": 4.323553957759629e-08,
407
+ "loss": 0.07,
408
+ "mean_token_accuracy": 0.9817947860062123,
409
+ "num_tokens": 16331178.0,
410
+ "step": 2000
411
+ }
412
+ ],
413
+ "logging_steps": 50,
414
+ "max_steps": 2025,
415
+ "num_input_tokens_seen": 0,
416
+ "num_train_epochs": 3,
417
+ "save_steps": 500,
418
+ "stateful_callbacks": {
419
+ "TrainerControl": {
420
+ "args": {
421
+ "should_epoch_stop": false,
422
+ "should_evaluate": false,
423
+ "should_log": false,
424
+ "should_save": true,
425
+ "should_training_stop": false
426
+ },
427
+ "attributes": {}
428
+ }
429
+ },
430
+ "total_flos": 6.821968917982372e+17,
431
+ "train_batch_size": 1,
432
+ "trial_name": null,
433
+ "trial_params": null
434
+ }
checkpoint-2000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99474405bb9e12c9e6287ba75ece0944d511441013aa0f9574c0a7847af2b991
3
+ size 6225
checkpoint-2000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2025/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: bigcode/starcoder2-7b
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:bigcode/starcoder2-7b
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ ---
12
+
13
+ # Model Card for Model ID
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+
19
+ ## Model Details
20
+
21
+ ### Model Description
22
+
23
+ <!-- Provide a longer summary of what this model is. -->
24
+
25
+
26
+
27
+ - **Developed by:** [More Information Needed]
28
+ - **Funded by [optional]:** [More Information Needed]
29
+ - **Shared by [optional]:** [More Information Needed]
30
+ - **Model type:** [More Information Needed]
31
+ - **Language(s) (NLP):** [More Information Needed]
32
+ - **License:** [More Information Needed]
33
+ - **Finetuned from model [optional]:** [More Information Needed]
34
+
35
+ ### Model Sources [optional]
36
+
37
+ <!-- Provide the basic links for the model. -->
38
+
39
+ - **Repository:** [More Information Needed]
40
+ - **Paper [optional]:** [More Information Needed]
41
+ - **Demo [optional]:** [More Information Needed]
42
+
43
+ ## Uses
44
+
45
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
46
+
47
+ ### Direct Use
48
+
49
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Downstream Use [optional]
54
+
55
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
56
+
57
+ [More Information Needed]
58
+
59
+ ### Out-of-Scope Use
60
+
61
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ## Bias, Risks, and Limitations
66
+
67
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
68
+
69
+ [More Information Needed]
70
+
71
+ ### Recommendations
72
+
73
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
74
+
75
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
76
+
77
+ ## How to Get Started with the Model
78
+
79
+ Use the code below to get started with the model.
80
+
81
+ [More Information Needed]
82
+
83
+ ## Training Details
84
+
85
+ ### Training Data
86
+
87
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
88
+
89
+ [More Information Needed]
90
+
91
+ ### Training Procedure
92
+
93
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
94
+
95
+ #### Preprocessing [optional]
96
+
97
+ [More Information Needed]
98
+
99
+
100
+ #### Training Hyperparameters
101
+
102
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
103
+
104
+ #### Speeds, Sizes, Times [optional]
105
+
106
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
107
+
108
+ [More Information Needed]
109
+
110
+ ## Evaluation
111
+
112
+ <!-- This section describes the evaluation protocols and provides the results. -->
113
+
114
+ ### Testing Data, Factors & Metrics
115
+
116
+ #### Testing Data
117
+
118
+ <!-- This should link to a Dataset Card if possible. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Factors
123
+
124
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
125
+
126
+ [More Information Needed]
127
+
128
+ #### Metrics
129
+
130
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
131
+
132
+ [More Information Needed]
133
+
134
+ ### Results
135
+
136
+ [More Information Needed]
137
+
138
+ #### Summary
139
+
140
+
141
+
142
+ ## Model Examination [optional]
143
+
144
+ <!-- Relevant interpretability work for the model goes here -->
145
+
146
+ [More Information Needed]
147
+
148
+ ## Environmental Impact
149
+
150
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
151
+
152
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
153
+
154
+ - **Hardware Type:** [More Information Needed]
155
+ - **Hours used:** [More Information Needed]
156
+ - **Cloud Provider:** [More Information Needed]
157
+ - **Compute Region:** [More Information Needed]
158
+ - **Carbon Emitted:** [More Information Needed]
159
+
160
+ ## Technical Specifications [optional]
161
+
162
+ ### Model Architecture and Objective
163
+
164
+ [More Information Needed]
165
+
166
+ ### Compute Infrastructure
167
+
168
+ [More Information Needed]
169
+
170
+ #### Hardware
171
+
172
+ [More Information Needed]
173
+
174
+ #### Software
175
+
176
+ [More Information Needed]
177
+
178
+ ## Citation [optional]
179
+
180
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
181
+
182
+ **BibTeX:**
183
+
184
+ [More Information Needed]
185
+
186
+ **APA:**
187
+
188
+ [More Information Needed]
189
+
190
+ ## Glossary [optional]
191
+
192
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
193
+
194
+ [More Information Needed]
195
+
196
+ ## More Information [optional]
197
+
198
+ [More Information Needed]
199
+
200
+ ## Model Card Authors [optional]
201
+
202
+ [More Information Needed]
203
+
204
+ ## Model Card Contact
205
+
206
+ [More Information Needed]
207
+ ### Framework versions
208
+
209
+ - PEFT 0.18.0
checkpoint-2025/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "bigcode/starcoder2-7b",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.0",
27
+ "qalora_group_size": 16,
28
+ "r": 16,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "k_proj",
33
+ "v_proj",
34
+ "o_proj",
35
+ "q_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": false
43
+ }
checkpoint-2025/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f9a9fbadd0b35342e1993f10bde6b04ed43f8e16cbc6027b05d97c66c518dc6
3
+ size 58754616
checkpoint-2025/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2025/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:310e3145759ebef6398ac4a4c3b94099f6f222a086adee3c58729b9361c6f49e
3
+ size 117660107
checkpoint-2025/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a039ed66a9d266986e1f0e98befe7f1d8ca1c98ad6639ae7472d051bd9603c69
3
+ size 14645
checkpoint-2025/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24e77c94e08fe995eb38321a73baf050cd174cc938ef4133784c892be1db9c05
3
+ size 1465