IbrahimTarek commited on
Commit
dc74e1c
1 Parent(s): b21306c

IbrahimTarek/Boiler_gemma7b

Browse files
README.md CHANGED
@@ -3,7 +3,7 @@ license: gemma
3
  library_name: peft
4
  tags:
5
  - generated_from_trainer
6
- base_model: google/gemma-2b-it
7
  model-index:
8
  - name: your-model
9
  results: []
@@ -14,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # your-model
16
 
17
- This model is a fine-tuned version of [google/gemma-2b-it](https://huggingface.co/google/gemma-2b-it) on an unknown dataset.
18
 
19
  ## Model description
20
 
@@ -42,7 +42,7 @@ The following hyperparameters were used during training:
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: cosine
44
  - lr_scheduler_warmup_ratio: 0.05
45
- - num_epochs: 30
46
  - mixed_precision_training: Native AMP
47
 
48
  ### Training results
 
3
  library_name: peft
4
  tags:
5
  - generated_from_trainer
6
+ base_model: google/gemma-7b-it
7
  model-index:
8
  - name: your-model
9
  results: []
 
14
 
15
  # your-model
16
 
17
+ This model is a fine-tuned version of [google/gemma-7b-it](https://huggingface.co/google/gemma-7b-it) on an unknown dataset.
18
 
19
  ## Model description
20
 
 
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: cosine
44
  - lr_scheduler_warmup_ratio: 0.05
45
+ - num_epochs: 10
46
  - mixed_precision_training: Native AMP
47
 
48
  ### Training results
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "google/gemma-2b-it",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "google/gemma-7b-it",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a597a66a3be8df1f81f0940a6792a1ceb7398095372233f4126f5fb6f0d9b72
3
- size 3695848
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89fb9766a1291baad715f3917915bb961d2f6d1bbdbd92eb5356c9c8a7813362
3
+ size 12859872
runs/Apr14_09-54-15_88d58899358b/events.out.tfevents.1713088471.88d58899358b.1884.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b9d2559e947ab479a1018c55175db69b7c420d9dc4dd5cfac769af53da22cb3
3
+ size 5982
runs/Apr14_09-55-27_88d58899358b/events.out.tfevents.1713088534.88d58899358b.1884.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23b457af18413bbb8c0f9a2420ca6b56217e2043e128fab55bab5849f691eeab
3
+ size 266640
tokenizer_config.json CHANGED
@@ -58,7 +58,6 @@
58
  "bos_token": "<bos>",
59
  "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
60
  "clean_up_tokenization_spaces": false,
61
- "device": "cuda",
62
  "eos_token": "<eos>",
63
  "legacy": null,
64
  "model_max_length": 1000000000000000019884624838656,
 
58
  "bos_token": "<bos>",
59
  "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
60
  "clean_up_tokenization_spaces": false,
 
61
  "eos_token": "<eos>",
62
  "legacy": null,
63
  "model_max_length": 1000000000000000019884624838656,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b86a6c0829b995d267b5ff9ed8b5f8a3064cd678a2b29861c400374880e37efe
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c7b8a4d3e62da2cd0ab4f446fdd4b4ecae487ec0082b7f576cb84ccc8dff969
3
  size 4984