michaelfeil commited on
Commit
66c1a34
1 Parent(s): 4d118d5

Upload sentence-transformers/all-MiniLM-L6-v2 ctranslate fp16 weights

Browse files
Files changed (3) hide show
  1. README.md +8 -11
  2. config.json +27 -5
  3. model.bin +2 -2
README.md CHANGED
@@ -38,20 +38,17 @@ Speedup inference while reducing memory by 2x-4x using int8 inference in C++ on
38
 
39
  quantized version of [sentence-transformers/all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2)
40
  ```bash
41
- pip install hf-hub-ctranslate2>=2.10.0 ctranslate2>=3.16.0
42
  ```
43
 
44
  ```python
45
  # from transformers import AutoTokenizer
46
  model_name = "michaelfeil/ct2fast-all-MiniLM-L6-v2"
47
 
48
- from hf_hub_ctranslate2 import EncoderCT2fromHfHub
49
- model = EncoderCT2fromHfHub(
50
- # load in int8 on CUDA
51
- model_name_or_path=model_name,
52
- device="cuda",
53
- compute_type="float16",
54
- # tokenizer=AutoTokenizer.from_pretrained("{ORG}/{NAME}")
55
  )
56
  embeddings = model.encode(
57
  ["I like soccer", "I like tennis", "The eiffel tower is in Paris"],
@@ -65,13 +62,13 @@ scores = (embeddings @ embeddings.T) * 100
65
  ```
66
 
67
  Checkpoint compatible to [ctranslate2>=3.16.0](https://github.com/OpenNMT/CTranslate2)
68
- and [hf-hub-ctranslate2>=2.10.0](https://github.com/michaelfeil/hf-hub-ctranslate2)
69
  - `compute_type=int8_float16` for `device="cuda"`
70
  - `compute_type=int8` for `device="cpu"`
71
 
72
- Converted on 2023-06-16 using
73
  ```
74
- ct2-transformers-converter --model sentence-transformers/all-MiniLM-L6-v2 --output_dir ~/tmp-ct2fast-all-MiniLM-L6-v2 --force --copy_files config_sentence_transformers.json tokenizer.json modules.json README.md tokenizer_config.json sentence_bert_config.json data_config.json vocab.txt special_tokens_map.json .gitattributes --quantization float16 --trust_remote_code
75
  ```
76
 
77
  # Licence and other remarks:
 
38
 
39
  quantized version of [sentence-transformers/all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2)
40
  ```bash
41
+ pip install hf-hub-ctranslate2>=3.0.0 ctranslate2>=3.16.0
42
  ```
43
 
44
  ```python
45
  # from transformers import AutoTokenizer
46
  model_name = "michaelfeil/ct2fast-all-MiniLM-L6-v2"
47
 
48
+ from hf_hub_ctranslate2 import CT2SentenceTransformer
49
+ model = CT2SentenceTransformer(
50
+ model_name, compute_type="int8_float16", device="cuda",
51
+ repo_contains_ct2=True
 
 
 
52
  )
53
  embeddings = model.encode(
54
  ["I like soccer", "I like tennis", "The eiffel tower is in Paris"],
 
62
  ```
63
 
64
  Checkpoint compatible to [ctranslate2>=3.16.0](https://github.com/OpenNMT/CTranslate2)
65
+ and [hf-hub-ctranslate2>=3.0.0](https://github.com/michaelfeil/hf-hub-ctranslate2)
66
  - `compute_type=int8_float16` for `device="cuda"`
67
  - `compute_type=int8` for `device="cpu"`
68
 
69
+ Converted on 2023-06-18 using
70
  ```
71
+ ct2-transformers-converter --model sentence-transformers/all-MiniLM-L6-v2 --output_dir ~/tmp-ct2fast-all-MiniLM-L6-v2 --force --copy_files tokenizer.json sentence_bert_config.json data_config.json README.md modules.json special_tokens_map.json vocab.txt config_sentence_transformers.json tokenizer_config.json .gitattributes --trust_remote_code
72
  ```
73
 
74
  # Licence and other remarks:
config.json CHANGED
@@ -1,6 +1,28 @@
1
  {
2
- "bos_token": "<s>",
3
- "eos_token": "</s>",
4
- "layer_norm_epsilon": 1e-12,
5
- "unk_token": "[UNK]"
6
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  {
2
+ "_name_or_path": "nreimers/MiniLM-L6-H384-uncased",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 384,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 1536,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 6,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "transformers_version": "4.8.2",
21
+ "type_vocab_size": 2,
22
+ "use_cache": true,
23
+ "vocab_size": 30522,
24
+ "bos_token": "<s>",
25
+ "eos_token": "</s>",
26
+ "layer_norm_epsilon": 1e-12,
27
+ "unk_token": "[UNK]"
28
+ }
model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e02198a1a1480129f35fede1751d0406a43e5ea8e7abb618ac58285e974cd6e
3
- size 45430860
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2abb237beb39bae980a7537a16a1fe5a0f0be2184be1d9f39f755b731a582adc
3
+ size 90857292