kaiokendev
commited on
Commit
β’
222ada1
1
Parent(s):
a4a3590
Add 7b
Browse files- 13b/ggml/cutoff-1024/ggml-adapter-model.bin +3 -0
- 13b/ggml/{ggml-adapter-model.bin β cutoff-2048/ggml-adapter-model.bin} +0 -0
- 13b/gpu/cutoff-1024/adapter_config.json +17 -0
- 13b/gpu/cutoff-1024/adapter_model.bin +3 -0
- 13b/gpu/{adapter_config.json β cutoff-2048/adapter_config.json} +0 -0
- 13b/gpu/{adapter_model.bin β cutoff-2048/adapter_model.bin} +0 -0
- 30b/gpu/{adapter_config.json β cutoff-1024/adapter_config.json} +0 -0
- 30b/gpu/{adapter_model.bin β cutoff-1024/adapter_model.bin} +0 -0
- 7b/ggml/cutoff-1024/ggml-adapter-model.bin +3 -0
- 7b/gpu/cutoff-1024/adapter_config.json +17 -0
- 7b/gpu/cutoff-1024/adapter_model.bin +3 -0
13b/ggml/cutoff-1024/ggml-adapter-model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6cf85704816bee293d33f1ba954b39439c3391d14949dfaf8005b801533b6cd4
|
3 |
+
size 26224672
|
13b/ggml/{ggml-adapter-model.bin β cutoff-2048/ggml-adapter-model.bin}
RENAMED
File without changes
|
13b/gpu/cutoff-1024/adapter_config.json
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"base_model_name_or_path": "",
|
3 |
+
"bias": "none",
|
4 |
+
"fan_in_fan_out": false,
|
5 |
+
"inference_mode": true,
|
6 |
+
"init_lora_weights": true,
|
7 |
+
"lora_alpha": 16,
|
8 |
+
"lora_dropout": 0.05,
|
9 |
+
"modules_to_save": null,
|
10 |
+
"peft_type": "LORA",
|
11 |
+
"r": 8,
|
12 |
+
"target_modules": [
|
13 |
+
"q_proj",
|
14 |
+
"v_proj"
|
15 |
+
],
|
16 |
+
"task_type": "CAUSAL_LM"
|
17 |
+
}
|
13b/gpu/cutoff-1024/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d3a29f208807d836ed18f5b358eb6badcf1825a4bd34c3ea138362aa1712d219
|
3 |
+
size 26271757
|
13b/gpu/{adapter_config.json β cutoff-2048/adapter_config.json}
RENAMED
File without changes
|
13b/gpu/{adapter_model.bin β cutoff-2048/adapter_model.bin}
RENAMED
File without changes
|
30b/gpu/{adapter_config.json β cutoff-1024/adapter_config.json}
RENAMED
File without changes
|
30b/gpu/{adapter_model.bin β cutoff-1024/adapter_model.bin}
RENAMED
File without changes
|
7b/ggml/cutoff-1024/ggml-adapter-model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2456a6b67aa0c70d59f7cd6116ab4ac4b633cbd77c24866e722d39b0da67d17a
|
3 |
+
size 16785440
|
7b/gpu/cutoff-1024/adapter_config.json
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"base_model_name_or_path": "",
|
3 |
+
"bias": "none",
|
4 |
+
"fan_in_fan_out": false,
|
5 |
+
"inference_mode": true,
|
6 |
+
"init_lora_weights": true,
|
7 |
+
"lora_alpha": 16,
|
8 |
+
"lora_dropout": 0.05,
|
9 |
+
"modules_to_save": null,
|
10 |
+
"peft_type": "LORA",
|
11 |
+
"r": 8,
|
12 |
+
"target_modules": [
|
13 |
+
"q_proj",
|
14 |
+
"v_proj"
|
15 |
+
],
|
16 |
+
"task_type": "CAUSAL_LM"
|
17 |
+
}
|
7b/gpu/cutoff-1024/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:065cdc917c651bcc398e8b121a018deeedc268c7ba84f09cc9480d5c0475879e
|
3 |
+
size 16822989
|