prince-canuma commited on
Commit
0a33070
1 Parent(s): 73a1d48

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. README.md +9 -7
  2. config.json +0 -155
README.md CHANGED
@@ -7,15 +7,17 @@ tags:
7
  - multimodal
8
  - qwen
9
  - mlx
10
- pipeline_tag: image-text-to-text
11
  ---
12
 
13
  # mlx-community/nanoLLaVA
 
 
 
14
 
15
- Thsi project is a WIP and will annouce soon follow me on X for updates [Prince Canuma](https://twitter.com/Prince_Canuma).
16
-
17
- This model was converted to MLX format from [`qnguyen3/nanoLLaVA`]() using mlx-vllm (WIP) version **0.0.0**.
18
-
19
- Model added by [Prince Canuma](https://twitter.com/Prince_Canuma).
20
 
21
- Refer to the [original model card](https://huggingface.co/qnguyen3/nanoLLaVA) for more details on the model.
 
 
 
7
  - multimodal
8
  - qwen
9
  - mlx
 
10
  ---
11
 
12
  # mlx-community/nanoLLaVA
13
+ This model was converted to MLX format from [`qnguyen3/nanoLLaVA`]() using mlx-vllm version **0.0.0**.
14
+ Refer to the [original model card](https://huggingface.co/qnguyen3/nanoLLaVA) for more details on the model.
15
+ ## Use with mlx
16
 
17
+ ```bash
18
+ pip install -U mlx-vlm
19
+ ```
 
 
20
 
21
+ ```bash
22
+ python -m mlx_vlm.generate --model mlx-community/nanoLLaVA --max-tokens 100 --temp 0.0
23
+ ```
config.json CHANGED
@@ -29,88 +29,6 @@
29
  "rms_norm_eps": 1e-06,
30
  "rope_theta": 1000000.0,
31
  "sliding_window": 4096,
32
- "text_config": {
33
- "vocab_size": 151936,
34
- "max_position_embeddings": 32768,
35
- "hidden_size": 1024,
36
- "intermediate_size": 2816,
37
- "num_hidden_layers": 24,
38
- "num_attention_heads": 16,
39
- "use_sliding_window": false,
40
- "sliding_window": 4096,
41
- "max_window_layers": 21,
42
- "num_key_value_heads": 16,
43
- "hidden_act": "silu",
44
- "initializer_range": 0.02,
45
- "rms_norm_eps": 1e-06,
46
- "use_cache": false,
47
- "rope_theta": 1000000.0,
48
- "attention_dropout": 0.0,
49
- "return_dict": true,
50
- "output_hidden_states": false,
51
- "output_attentions": false,
52
- "torchscript": false,
53
- "torch_dtype": "bfloat16",
54
- "use_bfloat16": false,
55
- "tf_legacy_loss": false,
56
- "pruned_heads": {},
57
- "tie_word_embeddings": false,
58
- "chunk_size_feed_forward": 0,
59
- "is_encoder_decoder": false,
60
- "is_decoder": false,
61
- "cross_attention_hidden_size": null,
62
- "add_cross_attention": false,
63
- "tie_encoder_decoder": false,
64
- "max_length": 20,
65
- "min_length": 0,
66
- "do_sample": false,
67
- "early_stopping": false,
68
- "num_beams": 1,
69
- "num_beam_groups": 1,
70
- "diversity_penalty": 0.0,
71
- "temperature": 1.0,
72
- "top_k": 50,
73
- "top_p": 1.0,
74
- "typical_p": 1.0,
75
- "repetition_penalty": 1.0,
76
- "length_penalty": 1.0,
77
- "no_repeat_ngram_size": 0,
78
- "encoder_no_repeat_ngram_size": 0,
79
- "bad_words_ids": null,
80
- "num_return_sequences": 1,
81
- "output_scores": false,
82
- "return_dict_in_generate": false,
83
- "forced_bos_token_id": null,
84
- "forced_eos_token_id": null,
85
- "remove_invalid_values": false,
86
- "exponential_decay_length_penalty": null,
87
- "suppress_tokens": null,
88
- "begin_suppress_tokens": null,
89
- "architectures": [
90
- "Qwen2ForCausalLM"
91
- ],
92
- "finetuning_task": null,
93
- "id2label": {
94
- "0": "LABEL_0",
95
- "1": "LABEL_1"
96
- },
97
- "label2id": {
98
- "LABEL_0": 0,
99
- "LABEL_1": 1
100
- },
101
- "tokenizer_class": null,
102
- "prefix": null,
103
- "bos_token_id": null,
104
- "pad_token_id": null,
105
- "eos_token_id": 151645,
106
- "sep_token_id": null,
107
- "decoder_start_token_id": null,
108
- "task_specific_params": null,
109
- "problem_type": null,
110
- "_name_or_path": "vilm/Quyen-SE-v0.1",
111
- "transformers_version": "4.39.3",
112
- "model_type": "qwen2"
113
- },
114
  "tie_word_embeddings": false,
115
  "tokenizer_model_max_length": 4096,
116
  "tokenizer_padding_side": "right",
@@ -120,78 +38,5 @@
120
  "use_cache": false,
121
  "use_mm_proj": true,
122
  "use_sliding_window": false,
123
- "vision_config": {
124
- "return_dict": true,
125
- "output_hidden_states": false,
126
- "output_attentions": false,
127
- "torchscript": false,
128
- "torch_dtype": null,
129
- "use_bfloat16": false,
130
- "tf_legacy_loss": false,
131
- "pruned_heads": {},
132
- "tie_word_embeddings": true,
133
- "chunk_size_feed_forward": 0,
134
- "is_encoder_decoder": false,
135
- "is_decoder": false,
136
- "cross_attention_hidden_size": null,
137
- "add_cross_attention": false,
138
- "tie_encoder_decoder": false,
139
- "max_length": 20,
140
- "min_length": 0,
141
- "do_sample": false,
142
- "early_stopping": false,
143
- "num_beams": 1,
144
- "num_beam_groups": 1,
145
- "diversity_penalty": 0.0,
146
- "temperature": 1.0,
147
- "top_k": 50,
148
- "top_p": 1.0,
149
- "typical_p": 1.0,
150
- "repetition_penalty": 1.0,
151
- "length_penalty": 1.0,
152
- "no_repeat_ngram_size": 0,
153
- "encoder_no_repeat_ngram_size": 0,
154
- "bad_words_ids": null,
155
- "num_return_sequences": 1,
156
- "output_scores": false,
157
- "return_dict_in_generate": false,
158
- "forced_bos_token_id": null,
159
- "forced_eos_token_id": null,
160
- "remove_invalid_values": false,
161
- "exponential_decay_length_penalty": null,
162
- "suppress_tokens": null,
163
- "begin_suppress_tokens": null,
164
- "architectures": null,
165
- "finetuning_task": null,
166
- "id2label": {
167
- "0": "LABEL_0",
168
- "1": "LABEL_1"
169
- },
170
- "label2id": {
171
- "LABEL_0": 0,
172
- "LABEL_1": 1
173
- },
174
- "tokenizer_class": null,
175
- "prefix": null,
176
- "bos_token_id": null,
177
- "pad_token_id": null,
178
- "eos_token_id": null,
179
- "sep_token_id": null,
180
- "decoder_start_token_id": null,
181
- "task_specific_params": null,
182
- "problem_type": null,
183
- "_name_or_path": "",
184
- "model_type": "siglip_vision_model",
185
- "hidden_size": 1152,
186
- "intermediate_size": 4304,
187
- "num_hidden_layers": 27,
188
- "num_attention_heads": 16,
189
- "num_channels": 3,
190
- "patch_size": 14,
191
- "image_size": 384,
192
- "attention_dropout": 0.0,
193
- "layer_norm_eps": 1e-06,
194
- "hidden_act": "gelu_pytorch_tanh"
195
- },
196
  "vocab_size": 151936
197
  }
 
29
  "rms_norm_eps": 1e-06,
30
  "rope_theta": 1000000.0,
31
  "sliding_window": 4096,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  "tie_word_embeddings": false,
33
  "tokenizer_model_max_length": 4096,
34
  "tokenizer_padding_side": "right",
 
38
  "use_cache": false,
39
  "use_mm_proj": true,
40
  "use_sliding_window": false,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  "vocab_size": 151936
42
  }