MaziyarPanahi commited on
Commit
c4a0941
1 Parent(s): e8266ac

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - finetuned
4
+ - quantized
5
+ - 4-bit
6
+ - AWQ
7
+ - text-generation
8
+ - mixtral
9
+ model_name: Mixtral-8x22B-Instruct-v0.1-AWQ
10
+ base_model: mistralai/Mixtral-8x22B-Instruct-v0.1
11
+ inference: false
12
+ model_creator: mistralai
13
+ pipeline_tag: text-generation
14
+ quantized_by: MaziyarPanahi
15
+ ---
16
+ # Description
17
+ [MaziyarPanahi/Mixtral-8x22B-Instruct-v0.1-AWQ](https://huggingface.co/MaziyarPanahi/Mixtral-8x22B-Instruct-v0.1-AWQ) is a quantized (AWQ) version of [mistralai/Mixtral-8x22B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1)
18
+
19
+ ## How to use
20
+ ### Install the necessary packages
21
+
22
+ ```
23
+ pip install --upgrade accelerate autoawq transformers
24
+ ```
25
+
26
+ ### Example Python code
27
+
28
+
29
+ ```python
30
+ from transformers import AutoTokenizer, AutoModelForCausalLM
31
+
32
+ model_id = "MaziyarPanahi/Mixtral-8x22B-Instruct-v0.1-AWQ"
33
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
34
+ model = AutoModelForCausalLM.from_pretrained(model_id).to(0)
35
+
36
+ text = "User:\nHello can you provide me with top-3 cool places to visit in Paris?\n\nAssistant:\n"
37
+ inputs = tokenizer(text, return_tensors="pt").to(0)
38
+
39
+ out = model.generate(**inputs, max_new_tokens=300)
40
+ print(tokenizer.decode(out[0], skip_special_tokens=True))
41
+ ```
42
+
43
+ Results:
44
+ ```
45
+ User:
46
+ Hello can you provide me with top-3 cool places to visit in Paris?
47
+
48
+ Assistant:
49
+ Absolutely, here are my top-3 recommendations for must-see places in Paris:
50
+
51
+ 1. The Eiffel Tower: An icon of Paris, this wrought-iron lattice tower is a global cultural icon of France and is among the most recognizable structures in the world. Climbing up to the top offers breathtaking views of the city.
52
+
53
+ 2. The Louvre Museum: Home to thousands of works of art, the Louvre is the world's largest art museum and a historic monument in Paris. Must-see pieces include the Mona Lisa, the Winged Victory of Samothrace, and the Venus de Milo.
54
+
55
+ 3. Notre-Dame Cathedral: This cathedral is a masterpiece of French Gothic architecture and is famous for its intricate stone carvings, beautiful stained glass, and its iconic twin towers. Be sure to spend some time exploring its history and learning about the fascinating restoration efforts post the 2019 fire.
56
+
57
+ I hope you find these recommendations helpful and that they make for an enjoyable and memorable trip to Paris. Safe travels!
58
+ ```
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/maziyar/.cache/huggingface/hub/models--mistralai--Mixtral-8x22B-Instruct-v0.1/snapshots/514d47be2925f7b5d845bbb34a29d5c73ccf53d8",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 6144,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 16384,
13
+ "max_position_embeddings": 65536,
14
+ "model_type": "mixtral",
15
+ "num_attention_heads": 48,
16
+ "num_experts_per_tok": 2,
17
+ "num_hidden_layers": 56,
18
+ "num_key_value_heads": 8,
19
+ "num_local_experts": 8,
20
+ "output_router_logits": false,
21
+ "quantization_config": {
22
+ "bits": 4,
23
+ "group_size": 128,
24
+ "modules_to_not_convert": [
25
+ "gate"
26
+ ],
27
+ "quant_method": "awq",
28
+ "version": "gemm",
29
+ "zero_point": true
30
+ },
31
+ "rms_norm_eps": 1e-05,
32
+ "rope_theta": 1000000.0,
33
+ "router_aux_loss_coef": 0.001,
34
+ "sliding_window": null,
35
+ "tie_word_embeddings": false,
36
+ "torch_dtype": "float16",
37
+ "transformers_version": "4.38.2",
38
+ "use_cache": true,
39
+ "vocab_size": 32768
40
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "do_sample": true,
5
+ "eos_token_id": 2,
6
+ "transformers_version": "4.38.2"
7
+ }
model-00001-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39b3d5123fdd10549e05345589da529fe3289aaa89bdedd9fda0f346bc85270e
3
+ size 4979210376
model-00002-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8975d56c263b844b251b78b7e9e6d31a7a000ee8d2eb6292d1738c1860b1de86
3
+ size 4994966792
model-00003-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1ad0647dfa2edda601269fd88756421542e248f1aa20397111d44c880912dc1
3
+ size 4994966904
model-00004-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a28a3abf90fde50f250188a43364cc5500afca9b2283060fdb05407ed8a6835f
3
+ size 4994967128
model-00005-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72f13c3119f6c2352747048f9596b8e0dbb11cc4578ba220f4c6a5f8513f6242
3
+ size 4999807128
model-00006-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b1786bfbe62a4ea1866da3507245664a1acfea14b31a196c1d1154bba06f791
3
+ size 4996540120
model-00007-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:758073c1000379218375c00d9ede32ed6c029662e96ba5f78e656e65488e0807
3
+ size 4994967128
model-00008-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36048e0a2d701ced330ac6bddeaca10472e1a502d5990d21718f2d424b275fc5
3
+ size 4994967128
model-00009-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd5f9dae0bd579b0523521e55630ca074d13b9c9de6be533fa04e9e3f78bd0bb
3
+ size 4994967128
model-00010-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9527ead46bffe530ad605d701b5442c3615a95aa5ed75392fe3c7141f42eb0e3
3
+ size 4994967128
model-00011-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10a3b3a9e3ec10c95f455186fb04cf6c86318e5dd62f10b1758f8b528611fa24
3
+ size 4999807128
model-00012-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:229f6b4f4448a8bd1be5308423ae82b273a9e8ee35d7cc0e4792964d1e10875b
3
+ size 4996540120
model-00013-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b559a2f305f4b1c57b40447f34a77c6741856fe6903e8dfcb8298ab1f5a4b27
3
+ size 4994967128
model-00014-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1720f6f0911204d4da80526ee782bad2b8a8890b0ed11603f2aecda771836ce0
3
+ size 4994967128
model-00015-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd33801de8d34b5ef5dcd2d3548c5ca4ebb4a6295b47f679514de041efefd458
3
+ size 3736943864
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "[INST]",
31
+ "lstrip": false,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "4": {
38
+ "content": "[/INST]",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "5": {
46
+ "content": "[TOOL_CALLS]",
47
+ "lstrip": false,
48
+ "normalized": true,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "6": {
54
+ "content": "[AVAILABLE_TOOLS]",
55
+ "lstrip": false,
56
+ "normalized": true,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "7": {
62
+ "content": "[/AVAILABLE_TOOLS]",
63
+ "lstrip": false,
64
+ "normalized": true,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "8": {
70
+ "content": "[TOOL_RESULTS]",
71
+ "lstrip": false,
72
+ "normalized": true,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "9": {
78
+ "content": "[/TOOL_RESULTS]",
79
+ "lstrip": false,
80
+ "normalized": true,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ }
85
+ },
86
+ "additional_special_tokens": [],
87
+ "bos_token": "<s>",
88
+ "chat_template": "{{bos_token}}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ ' [INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'] + ' ' + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
89
+ "clean_up_tokenization_spaces": false,
90
+ "eos_token": "</s>",
91
+ "legacy": true,
92
+ "model_max_length": 1000000000000000019884624838656,
93
+ "pad_token": null,
94
+ "sp_model_kwargs": {},
95
+ "spaces_between_special_tokens": false,
96
+ "tokenizer_class": "LlamaTokenizer",
97
+ "unk_token": "<unk>",
98
+ "use_default_system_prompt": false
99
+ }