Upload 6 files
Browse files
8bit_AAAAAAA/adapter_config.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "models/decapoda-research_llama-7b-hf",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"lora_alpha": 64,
|
| 8 |
+
"lora_dropout": 0.05,
|
| 9 |
+
"modules_to_save": null,
|
| 10 |
+
"peft_type": "LORA",
|
| 11 |
+
"r": 32,
|
| 12 |
+
"target_modules": [
|
| 13 |
+
"q_proj",
|
| 14 |
+
"v_proj"
|
| 15 |
+
],
|
| 16 |
+
"task_type": "CAUSAL_LM"
|
| 17 |
+
}
|
8bit_AAAAAAA/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2565fd5bb0d2a38ed6837210192832ea25fee643a6cd926f26fea8bd6936ba29
|
| 3 |
+
size 67154893
|
8bit_AAAAAAA/training_parameters.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"lora_name": "8bit_AAAAAAA", "always_override": false, "save_steps": 0.0, "micro_batch_size": 4, "batch_size": 128, "epochs": 3.0, "learning_rate": "9e-4", "lr_scheduler_type": "linear", "lora_rank": 32, "lora_alpha": 64, "lora_dropout": 0.05, "cutoff_len": 256, "dataset": "None", "eval_dataset": "None", "format": "None", "eval_steps": 100.0, "raw_text_file": "AAAAAAA", "overlap_len": 128, "newline_favor_len": 128, "higher_rank_limit": false, "warmup_steps": 100.0, "optimizer": "adamw_torch"}
|
8bit_BBBBBBB/adapter_config.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "models/decapoda-research_llama-7b-hf",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"lora_alpha": 64,
|
| 8 |
+
"lora_dropout": 0.05,
|
| 9 |
+
"modules_to_save": null,
|
| 10 |
+
"peft_type": "LORA",
|
| 11 |
+
"r": 32,
|
| 12 |
+
"target_modules": [
|
| 13 |
+
"q_proj",
|
| 14 |
+
"v_proj"
|
| 15 |
+
],
|
| 16 |
+
"task_type": "CAUSAL_LM"
|
| 17 |
+
}
|
8bit_BBBBBBB/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:efe64f0664c6de9c049fc8efcd52899cc38484ce5389593b05c5881dfb15038b
|
| 3 |
+
size 67154893
|
8bit_BBBBBBB/training_parameters.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"lora_name": "8bit_BBBBBBB", "always_override": false, "save_steps": 0.0, "micro_batch_size": 4, "batch_size": 128, "epochs": 3.0, "learning_rate": "9e-4", "lr_scheduler_type": "linear", "lora_rank": 32, "lora_alpha": 64, "lora_dropout": 0.05, "cutoff_len": 256, "dataset": "None", "eval_dataset": "None", "format": "None", "eval_steps": 100.0, "raw_text_file": "BBBBBBB", "overlap_len": 128, "newline_favor_len": 128, "higher_rank_limit": false, "warmup_steps": 100.0, "optimizer": "adamw_torch"}
|