zodiache commited on
Commit
f6ab5a9
1 Parent(s): a6ee28c

Training in progress, step 20

Browse files
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-Instruct-v0.2",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e6314d8d45ea35aac74acff8f0c053c56e195ba86900e7f63d4520d8db5ce1b
3
- size 16794456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1126c4efa58ffbee8ea433b3c23005cc340bf75c30a898844512db56998bd27d
3
+ size 13648688
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:228d035b0410913f8c36ea1b9c8346c519fcc8b806eda865c2231ee1aefbfee1
3
- size 1843075
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74ea892bac4c9d087f562f5a3def3c168da03697dc76928dd68d48302ea09c51
3
+ size 1795611
tokenizer_config.json CHANGED
@@ -35,15 +35,16 @@
35
  "special": true
36
  }
37
  },
 
38
  "bos_token": "<s>",
39
- "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}",
40
  "clean_up_tokenization_spaces": false,
41
  "eos_token": "</s>",
42
- "legacy": false,
43
  "model_max_length": 1000000000000000019884624838656,
44
  "pad_token": "[PAD]",
45
- "padding_side": "right",
46
  "sp_model_kwargs": {},
 
47
  "tokenizer_class": "LlamaTokenizer",
48
  "unk_token": "<unk>",
49
  "use_default_system_prompt": false
 
35
  "special": true
36
  }
37
  },
38
+ "additional_special_tokens": [],
39
  "bos_token": "<s>",
40
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
41
  "clean_up_tokenization_spaces": false,
42
  "eos_token": "</s>",
43
+ "legacy": true,
44
  "model_max_length": 1000000000000000019884624838656,
45
  "pad_token": "[PAD]",
 
46
  "sp_model_kwargs": {},
47
+ "spaces_between_special_tokens": false,
48
  "tokenizer_class": "LlamaTokenizer",
49
  "unk_token": "<unk>",
50
  "use_default_system_prompt": false
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c9461a484ce5b5a9b4847fea1f531fac50a21022109a0c4300811ae5a039115
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b65c6f5fa786309a3dee18ac93a50626ed47e6591ce5d28988a1dc27989df2c3
3
  size 5304