LanguageBind commited on
Commit
0b21822
1 Parent(s): 075fd32

Upload 6 files

Browse files
config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen-1_8B",
3
+ "architectures": [
4
+ "LlavaQWenForCausalLM"
5
+ ],
6
+ "attn_dropout_prob": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "Qwen/Qwen-1_8B--configuration_qwen.QWenConfig",
9
+ "AutoModelForCausalLM": "Qwen/Qwen-1_8B--modeling_qwen.QWenLMHeadModel"
10
+ },
11
+ "bf16": true,
12
+ "emb_dropout_prob": 0.0,
13
+ "fp16": false,
14
+ "fp32": false,
15
+ "freeze_mm_mlp_adapter": false,
16
+ "hidden_size": 2048,
17
+ "image_aspect_ratio": "pad",
18
+ "image_projector_type": "mlp2x_gelu",
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 11008,
21
+ "kv_channels": 128,
22
+ "layer_norm_epsilon": 1e-06,
23
+ "max_position_embeddings": 8192,
24
+ "mm_hidden_size": 1024,
25
+ "mm_image_tower": "openai/clip-vit-large-patch14-336",
26
+ "mm_projector_lr": null,
27
+ "mm_use_im_patch_token": false,
28
+ "mm_use_im_start_end": false,
29
+ "mm_video_tower": null,
30
+ "mm_vision_select_feature": "patch",
31
+ "mm_vision_select_layer": -2,
32
+ "model_type": "llava_qwen",
33
+ "no_bias": true,
34
+ "num_attention_heads": 16,
35
+ "num_hidden_layers": 24,
36
+ "onnx_safe": null,
37
+ "pad_token_id": 151646,
38
+ "rotary_emb_base": 10000,
39
+ "rotary_pct": 1.0,
40
+ "scale_attn_weights": true,
41
+ "seq_length": 8192,
42
+ "softmax_in_fp32": false,
43
+ "tie_word_embeddings": false,
44
+ "tokenizer_class": "QWenTokenizer",
45
+ "tokenizer_padding_side": "right",
46
+ "torch_dtype": "bfloat16",
47
+ "transformers_version": "4.36.2",
48
+ "tune_mm_mlp_adapter": false,
49
+ "use_cache": true,
50
+ "use_cache_kernel": false,
51
+ "use_cache_quantization": false,
52
+ "use_dynamic_ntk": true,
53
+ "use_flash_attn": true,
54
+ "use_logn_attn": true,
55
+ "use_mm_proj": true,
56
+ "video_global_proj": false,
57
+ "video_projector_type": "linear",
58
+ "video_spatial_proj": false,
59
+ "video_temproal_proj": false,
60
+ "vocab_size": 151936
61
+ }
generation_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chat_format": "raw",
3
+ "do_sample": true,
4
+ "eos_token_id": 151643,
5
+ "max_new_tokens": 512,
6
+ "pad_token_id": 151643,
7
+ "stop_words_ids": [
8
+ [
9
+ 151643
10
+ ]
11
+ ],
12
+ "top_k": 0,
13
+ "top_p": 0.8,
14
+ "transformers_version": "4.36.2"
15
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:549abd3e41e2fc5c36fa079771c8f1f6cf4a4801a8a05d4e1f1d736e30318a72
3
+ size 4293347888
qwen.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "pad_token": "<|extra_0|>",
10
+ "unk_token": {
11
+ "content": "<|extra_0|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {},
3
+ "auto_map": {
4
+ "AutoTokenizer": [
5
+ "Qwen/Qwen-1_8B--tokenization_qwen.QWenTokenizer",
6
+ null
7
+ ]
8
+ },
9
+ "clean_up_tokenization_spaces": true,
10
+ "eos_token": "<|endoftext|>",
11
+ "model_max_length": 2048,
12
+ "pad_token": "<|extra_0|>",
13
+ "padding_side": "right",
14
+ "tokenizer_class": "QWenTokenizer",
15
+ "unk_token": "<|extra_0|>",
16
+ "use_fast": false
17
+ }