saraleivam
commited on
Commit
•
9a77067
1
Parent(s):
0afb1a8
Add new SentenceTransformer model.
Browse files- .gitattributes +2 -0
- 1_Pooling/config.json +10 -0
- README.md +390 -0
- config.json +26 -0
- config_sentence_transformers.json +10 -0
- model.safetensors +3 -0
- modules.json +14 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +51 -0
- tokenizer.json +3 -0
- tokenizer_config.json +64 -0
- unigram.json +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
37 |
+
unigram.json filter=lfs diff=lfs merge=lfs -text
|
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 384,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
|
3 |
+
datasets: []
|
4 |
+
language: []
|
5 |
+
library_name: sentence-transformers
|
6 |
+
pipeline_tag: sentence-similarity
|
7 |
+
tags:
|
8 |
+
- sentence-transformers
|
9 |
+
- sentence-similarity
|
10 |
+
- feature-extraction
|
11 |
+
- generated_from_trainer
|
12 |
+
- dataset_size:521
|
13 |
+
- loss:MultipleNegativesRankingLoss
|
14 |
+
widget:
|
15 |
+
- source_sentence: 'Marketing digital: estrategias para redes sociales y SEO.'
|
16 |
+
sentences:
|
17 |
+
- AI developer with reinforcement learning skills.
|
18 |
+
- Ingeniero civil con experiencia en diseño de estructuras.
|
19 |
+
- Especialista en marketing digital con experiencia en campañas de Google Ads y
|
20 |
+
Facebook Ads.
|
21 |
+
- source_sentence: AI for speech recognition and synthesis.
|
22 |
+
sentences:
|
23 |
+
- Ingeniero de machine learning con habilidades en PyTorch
|
24 |
+
- AI developer with speech recognition skills.
|
25 |
+
- Teacher with classroom management skills.
|
26 |
+
- source_sentence: Advanced CSS and responsive design.
|
27 |
+
sentences:
|
28 |
+
- Sort, query, and structure data in Pandas, the Python library. Describe how to
|
29 |
+
model and interpret data using Python. Create basic data visualizations with Python
|
30 |
+
libraries
|
31 |
+
- Engineer with circuit design experience.
|
32 |
+
- Front-end developer with advanced CSS and responsive web design skills.
|
33 |
+
- source_sentence: PostgreSQL Database Administration Course.
|
34 |
+
sentences:
|
35 |
+
- Nutritionist with clinical dietetics skills.
|
36 |
+
- Community manager with experience in managing social networks and creating viral
|
37 |
+
content.
|
38 |
+
- Database administrator with PostgreSQL experience.
|
39 |
+
- source_sentence: Búsqueda, reconocimiento y captación de potenciales clientes nuevos
|
40 |
+
en el sector público.Exploración de tendencias y competidores en el mercado, ajustando
|
41 |
+
estrategias de comercialización.Elaborar y presentar propuestas personalizadas
|
42 |
+
resaltando las ventajas de los servicios en la nube.Negociar condiciones, términos
|
43 |
+
y precios con posibles clientes para garantizar la concreción de acuerdos de venta.Ofrecer
|
44 |
+
asistencia posterior a la venta, resolver problemas y asegurar la satisfacción
|
45 |
+
del cliente.Fomentar relaciones con clientes ya existentes, comprendiendo sus
|
46 |
+
necesidades a largo plazo.Detectar oportunidades adicionales en cuentas existentes
|
47 |
+
mediante la presentación de nuevas soluciones y servicios que beneficien a los
|
48 |
+
clientes.
|
49 |
+
sentences:
|
50 |
+
- Demonstrate mastery of skills and knowledge acquired in the IBM Full Stack Software
|
51 |
+
Developer Professional Certificate.. Apply understanding of common technologies
|
52 |
+
related to full-stack, front-end, and back-end application development.. Explain
|
53 |
+
concepts in cloud computing, web development, HTML, CSS, JavaScript, GitHub, Python
|
54 |
+
and Django programming, microservices, and containers.. Analyze and troubleshoot
|
55 |
+
issues in software design, development, deployment, and operations.
|
56 |
+
- Digital Marketing, Media Production, Social Media, Marketing
|
57 |
+
- Orador público con habilidades en presentaciones efectivas y comunicación en público
|
58 |
+
---
|
59 |
+
|
60 |
+
# SentenceTransformer based on sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
|
61 |
+
|
62 |
+
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2) on the dataset dataset. It maps sentences & paragraphs to a 384-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
|
63 |
+
|
64 |
+
## Model Details
|
65 |
+
|
66 |
+
### Model Description
|
67 |
+
- **Model Type:** Sentence Transformer
|
68 |
+
- **Base model:** [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2) <!-- at revision bf3bf13ab40c3157080a7ab344c831b9ad18b5eb -->
|
69 |
+
- **Maximum Sequence Length:** 128 tokens
|
70 |
+
- **Output Dimensionality:** 384 tokens
|
71 |
+
- **Similarity Function:** Cosine Similarity
|
72 |
+
- **Training Dataset:**
|
73 |
+
- dataset
|
74 |
+
<!-- - **Language:** Unknown -->
|
75 |
+
<!-- - **License:** Unknown -->
|
76 |
+
|
77 |
+
### Model Sources
|
78 |
+
|
79 |
+
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
|
80 |
+
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
|
81 |
+
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
|
82 |
+
|
83 |
+
### Full Model Architecture
|
84 |
+
|
85 |
+
```
|
86 |
+
SentenceTransformer(
|
87 |
+
(0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
|
88 |
+
(1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
89 |
+
)
|
90 |
+
```
|
91 |
+
|
92 |
+
## Usage
|
93 |
+
|
94 |
+
### Direct Usage (Sentence Transformers)
|
95 |
+
|
96 |
+
First install the Sentence Transformers library:
|
97 |
+
|
98 |
+
```bash
|
99 |
+
pip install -U sentence-transformers
|
100 |
+
```
|
101 |
+
|
102 |
+
Then you can load this model and run inference.
|
103 |
+
```python
|
104 |
+
from sentence_transformers import SentenceTransformer
|
105 |
+
|
106 |
+
# Download from the 🤗 Hub
|
107 |
+
model = SentenceTransformer("saraleivam/GURU-trained-model1")
|
108 |
+
# Run inference
|
109 |
+
sentences = [
|
110 |
+
'Búsqueda, reconocimiento y captación de potenciales clientes nuevos en el sector público.Exploración de tendencias y competidores en el mercado, ajustando estrategias de comercialización.Elaborar y presentar propuestas personalizadas resaltando las ventajas de los servicios en la nube.Negociar condiciones, términos y precios con posibles clientes para garantizar la concreción de acuerdos de venta.Ofrecer asistencia posterior a la venta, resolver problemas y asegurar la satisfacción del cliente.Fomentar relaciones con clientes ya existentes, comprendiendo sus necesidades a largo plazo.Detectar oportunidades adicionales en cuentas existentes mediante la presentación de nuevas soluciones y servicios que beneficien a los clientes.',
|
111 |
+
'Digital Marketing, Media Production, Social Media, Marketing',
|
112 |
+
'Demonstrate mastery of skills and knowledge acquired in the IBM Full Stack Software Developer Professional Certificate.. Apply understanding of common technologies related to full-stack, front-end, and back-end application development.. Explain concepts in cloud computing, web development, HTML, CSS, JavaScript, GitHub, Python and Django programming, microservices, and containers.. Analyze and troubleshoot issues in software design, development, deployment, and operations.',
|
113 |
+
]
|
114 |
+
embeddings = model.encode(sentences)
|
115 |
+
print(embeddings.shape)
|
116 |
+
# [3, 384]
|
117 |
+
|
118 |
+
# Get the similarity scores for the embeddings
|
119 |
+
similarities = model.similarity(embeddings, embeddings)
|
120 |
+
print(similarities.shape)
|
121 |
+
# [3, 3]
|
122 |
+
```
|
123 |
+
|
124 |
+
<!--
|
125 |
+
### Direct Usage (Transformers)
|
126 |
+
|
127 |
+
<details><summary>Click to see the direct usage in Transformers</summary>
|
128 |
+
|
129 |
+
</details>
|
130 |
+
-->
|
131 |
+
|
132 |
+
<!--
|
133 |
+
### Downstream Usage (Sentence Transformers)
|
134 |
+
|
135 |
+
You can finetune this model on your own dataset.
|
136 |
+
|
137 |
+
<details><summary>Click to expand</summary>
|
138 |
+
|
139 |
+
</details>
|
140 |
+
-->
|
141 |
+
|
142 |
+
<!--
|
143 |
+
### Out-of-Scope Use
|
144 |
+
|
145 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
146 |
+
-->
|
147 |
+
|
148 |
+
<!--
|
149 |
+
## Bias, Risks and Limitations
|
150 |
+
|
151 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
152 |
+
-->
|
153 |
+
|
154 |
+
<!--
|
155 |
+
### Recommendations
|
156 |
+
|
157 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
158 |
+
-->
|
159 |
+
|
160 |
+
## Training Details
|
161 |
+
|
162 |
+
### Training Dataset
|
163 |
+
|
164 |
+
#### dataset
|
165 |
+
|
166 |
+
* Dataset: dataset
|
167 |
+
* Size: 521 training samples
|
168 |
+
* Columns: <code>anchor</code>, <code>positive</code>, and <code>negative</code>
|
169 |
+
* Approximate statistics based on the first 1000 samples:
|
170 |
+
| | anchor | positive | negative |
|
171 |
+
|:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|
|
172 |
+
| type | string | string | string |
|
173 |
+
| details | <ul><li>min: 6 tokens</li><li>mean: 18.76 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 20.19 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 15.48 tokens</li><li>max: 128 tokens</li></ul> |
|
174 |
+
* Samples:
|
175 |
+
| anchor | positive | negative |
|
176 |
+
|:-------------------------------------------------------------|:-----------------------------------------------------------------------------|:-----------------------------------------------------------|
|
177 |
+
| <code>Introduction to Docker and containerization.</code> | <code>DevOps engineer with Docker and container orchestration skills.</code> | <code>Biologist with field research experience.</code> |
|
178 |
+
| <code>Curso de desarrollo de aplicaciones con Vue.js.</code> | <code>Desarrollador web con habilidades en Vue.js.</code> | <code>Médico con habilidades en cardiología.</code> |
|
179 |
+
| <code>Desarrollo de videojuegos con Godot</code> | <code>Desarrollador de videojuegos con experiencia en Godot</code> | <code>Profesor de arte con experiencia en escultura</code> |
|
180 |
+
* Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
|
181 |
+
```json
|
182 |
+
{
|
183 |
+
"scale": 20.0,
|
184 |
+
"similarity_fct": "cos_sim"
|
185 |
+
}
|
186 |
+
```
|
187 |
+
|
188 |
+
### Evaluation Dataset
|
189 |
+
|
190 |
+
#### dataset
|
191 |
+
|
192 |
+
* Dataset: dataset
|
193 |
+
* Size: 131 evaluation samples
|
194 |
+
* Columns: <code>anchor</code>, <code>positive</code>, and <code>negative</code>
|
195 |
+
* Approximate statistics based on the first 1000 samples:
|
196 |
+
| | anchor | positive | negative |
|
197 |
+
|:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|
|
198 |
+
| type | string | string | string |
|
199 |
+
| details | <ul><li>min: 5 tokens</li><li>mean: 18.05 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 19.0 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 8 tokens</li><li>mean: 14.86 tokens</li><li>max: 109 tokens</li></ul> |
|
200 |
+
* Samples:
|
201 |
+
| anchor | positive | negative |
|
202 |
+
|:-----------------------------------------------------------|:----------------------------------------------------------------------|:------------------------------------------------------------------------|
|
203 |
+
| <code>Swift Mobile Application Development.</code> | <code>iOS developer with experience in Swift and Xcode.</code> | <code>Psychologist with trauma therapy experience.</code> |
|
204 |
+
| <code>Diseño de interfaces de usuario con Figma</code> | <code>Diseñador UX/UI con habilidades en Figma y prototipado</code> | <code>Ingeniero eléctrico con experiencia en sistemas de energía</code> |
|
205 |
+
| <code>Principles of natural language understanding.</code> | <code>NLP engineer with natural language understanding skills.</code> | <code>Chef with traditional cuisine skills.</code> |
|
206 |
+
* Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
|
207 |
+
```json
|
208 |
+
{
|
209 |
+
"scale": 20.0,
|
210 |
+
"similarity_fct": "cos_sim"
|
211 |
+
}
|
212 |
+
```
|
213 |
+
|
214 |
+
### Training Hyperparameters
|
215 |
+
|
216 |
+
#### All Hyperparameters
|
217 |
+
<details><summary>Click to expand</summary>
|
218 |
+
|
219 |
+
- `overwrite_output_dir`: False
|
220 |
+
- `do_predict`: False
|
221 |
+
- `eval_strategy`: no
|
222 |
+
- `prediction_loss_only`: True
|
223 |
+
- `per_device_train_batch_size`: 8
|
224 |
+
- `per_device_eval_batch_size`: 8
|
225 |
+
- `per_gpu_train_batch_size`: None
|
226 |
+
- `per_gpu_eval_batch_size`: None
|
227 |
+
- `gradient_accumulation_steps`: 1
|
228 |
+
- `eval_accumulation_steps`: None
|
229 |
+
- `learning_rate`: 5e-05
|
230 |
+
- `weight_decay`: 0.0
|
231 |
+
- `adam_beta1`: 0.9
|
232 |
+
- `adam_beta2`: 0.999
|
233 |
+
- `adam_epsilon`: 1e-08
|
234 |
+
- `max_grad_norm`: 1.0
|
235 |
+
- `num_train_epochs`: 3.0
|
236 |
+
- `max_steps`: -1
|
237 |
+
- `lr_scheduler_type`: linear
|
238 |
+
- `lr_scheduler_kwargs`: {}
|
239 |
+
- `warmup_ratio`: 0.0
|
240 |
+
- `warmup_steps`: 0
|
241 |
+
- `log_level`: passive
|
242 |
+
- `log_level_replica`: warning
|
243 |
+
- `log_on_each_node`: True
|
244 |
+
- `logging_nan_inf_filter`: True
|
245 |
+
- `save_safetensors`: True
|
246 |
+
- `save_on_each_node`: False
|
247 |
+
- `save_only_model`: False
|
248 |
+
- `restore_callback_states_from_checkpoint`: False
|
249 |
+
- `no_cuda`: False
|
250 |
+
- `use_cpu`: False
|
251 |
+
- `use_mps_device`: False
|
252 |
+
- `seed`: 42
|
253 |
+
- `data_seed`: None
|
254 |
+
- `jit_mode_eval`: False
|
255 |
+
- `use_ipex`: False
|
256 |
+
- `bf16`: False
|
257 |
+
- `fp16`: False
|
258 |
+
- `fp16_opt_level`: O1
|
259 |
+
- `half_precision_backend`: auto
|
260 |
+
- `bf16_full_eval`: False
|
261 |
+
- `fp16_full_eval`: False
|
262 |
+
- `tf32`: None
|
263 |
+
- `local_rank`: 0
|
264 |
+
- `ddp_backend`: None
|
265 |
+
- `tpu_num_cores`: None
|
266 |
+
- `tpu_metrics_debug`: False
|
267 |
+
- `debug`: []
|
268 |
+
- `dataloader_drop_last`: False
|
269 |
+
- `dataloader_num_workers`: 0
|
270 |
+
- `dataloader_prefetch_factor`: None
|
271 |
+
- `past_index`: -1
|
272 |
+
- `disable_tqdm`: False
|
273 |
+
- `remove_unused_columns`: True
|
274 |
+
- `label_names`: None
|
275 |
+
- `load_best_model_at_end`: False
|
276 |
+
- `ignore_data_skip`: False
|
277 |
+
- `fsdp`: []
|
278 |
+
- `fsdp_min_num_params`: 0
|
279 |
+
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
|
280 |
+
- `fsdp_transformer_layer_cls_to_wrap`: None
|
281 |
+
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
|
282 |
+
- `deepspeed`: None
|
283 |
+
- `label_smoothing_factor`: 0.0
|
284 |
+
- `optim`: adamw_torch
|
285 |
+
- `optim_args`: None
|
286 |
+
- `adafactor`: False
|
287 |
+
- `group_by_length`: False
|
288 |
+
- `length_column_name`: length
|
289 |
+
- `ddp_find_unused_parameters`: None
|
290 |
+
- `ddp_bucket_cap_mb`: None
|
291 |
+
- `ddp_broadcast_buffers`: False
|
292 |
+
- `dataloader_pin_memory`: True
|
293 |
+
- `dataloader_persistent_workers`: False
|
294 |
+
- `skip_memory_metrics`: True
|
295 |
+
- `use_legacy_prediction_loop`: False
|
296 |
+
- `push_to_hub`: False
|
297 |
+
- `resume_from_checkpoint`: None
|
298 |
+
- `hub_model_id`: None
|
299 |
+
- `hub_strategy`: every_save
|
300 |
+
- `hub_private_repo`: False
|
301 |
+
- `hub_always_push`: False
|
302 |
+
- `gradient_checkpointing`: False
|
303 |
+
- `gradient_checkpointing_kwargs`: None
|
304 |
+
- `include_inputs_for_metrics`: False
|
305 |
+
- `eval_do_concat_batches`: True
|
306 |
+
- `fp16_backend`: auto
|
307 |
+
- `push_to_hub_model_id`: None
|
308 |
+
- `push_to_hub_organization`: None
|
309 |
+
- `mp_parameters`:
|
310 |
+
- `auto_find_batch_size`: False
|
311 |
+
- `full_determinism`: False
|
312 |
+
- `torchdynamo`: None
|
313 |
+
- `ray_scope`: last
|
314 |
+
- `ddp_timeout`: 1800
|
315 |
+
- `torch_compile`: False
|
316 |
+
- `torch_compile_backend`: None
|
317 |
+
- `torch_compile_mode`: None
|
318 |
+
- `dispatch_batches`: None
|
319 |
+
- `split_batches`: None
|
320 |
+
- `include_tokens_per_second`: False
|
321 |
+
- `include_num_input_tokens_seen`: False
|
322 |
+
- `neftune_noise_alpha`: None
|
323 |
+
- `optim_target_modules`: None
|
324 |
+
- `batch_eval_metrics`: False
|
325 |
+
- `batch_sampler`: batch_sampler
|
326 |
+
- `multi_dataset_batch_sampler`: proportional
|
327 |
+
|
328 |
+
</details>
|
329 |
+
|
330 |
+
### Training Logs
|
331 |
+
| Epoch | Step | dataset loss |
|
332 |
+
|:-----:|:----:|:------------:|
|
333 |
+
| 3.0 | 198 | 0.0467 |
|
334 |
+
|
335 |
+
|
336 |
+
### Framework Versions
|
337 |
+
- Python: 3.10.12
|
338 |
+
- Sentence Transformers: 3.0.1
|
339 |
+
- Transformers: 4.41.2
|
340 |
+
- PyTorch: 2.3.0+cu121
|
341 |
+
- Accelerate: 0.31.0
|
342 |
+
- Datasets: 2.20.0
|
343 |
+
- Tokenizers: 0.19.1
|
344 |
+
|
345 |
+
## Citation
|
346 |
+
|
347 |
+
### BibTeX
|
348 |
+
|
349 |
+
#### Sentence Transformers
|
350 |
+
```bibtex
|
351 |
+
@inproceedings{reimers-2019-sentence-bert,
|
352 |
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
353 |
+
author = "Reimers, Nils and Gurevych, Iryna",
|
354 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
355 |
+
month = "11",
|
356 |
+
year = "2019",
|
357 |
+
publisher = "Association for Computational Linguistics",
|
358 |
+
url = "https://arxiv.org/abs/1908.10084",
|
359 |
+
}
|
360 |
+
```
|
361 |
+
|
362 |
+
#### MultipleNegativesRankingLoss
|
363 |
+
```bibtex
|
364 |
+
@misc{henderson2017efficient,
|
365 |
+
title={Efficient Natural Language Response Suggestion for Smart Reply},
|
366 |
+
author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
|
367 |
+
year={2017},
|
368 |
+
eprint={1705.00652},
|
369 |
+
archivePrefix={arXiv},
|
370 |
+
primaryClass={cs.CL}
|
371 |
+
}
|
372 |
+
```
|
373 |
+
|
374 |
+
<!--
|
375 |
+
## Glossary
|
376 |
+
|
377 |
+
*Clearly define terms in order to be accessible across audiences.*
|
378 |
+
-->
|
379 |
+
|
380 |
+
<!--
|
381 |
+
## Model Card Authors
|
382 |
+
|
383 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
384 |
+
-->
|
385 |
+
|
386 |
+
<!--
|
387 |
+
## Model Card Contact
|
388 |
+
|
389 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
390 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 384,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 1536,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"position_embedding_type": "absolute",
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.41.2",
|
23 |
+
"type_vocab_size": 2,
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 250037
|
26 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "3.0.1",
|
4 |
+
"transformers": "4.41.2",
|
5 |
+
"pytorch": "2.3.0+cu121"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null,
|
9 |
+
"similarity_fn_name": null
|
10 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e8c10a438edfaafb0ed00ae6bed991b1c920b95b6c4b5ff2c319df4e52492daa
|
3 |
+
size 470637416
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 128,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "<s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "<mask>",
|
25 |
+
"lstrip": true,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "<pad>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "</s>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "<unk>",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": false,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cad551d5600a84242d0973327029452a1e3672ba6313c2a3c3d69c4310e12719
|
3 |
+
size 17082987
|
tokenizer_config.json
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"250001": {
|
36 |
+
"content": "<mask>",
|
37 |
+
"lstrip": true,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"bos_token": "<s>",
|
45 |
+
"clean_up_tokenization_spaces": true,
|
46 |
+
"cls_token": "<s>",
|
47 |
+
"do_lower_case": true,
|
48 |
+
"eos_token": "</s>",
|
49 |
+
"mask_token": "<mask>",
|
50 |
+
"max_length": 128,
|
51 |
+
"model_max_length": 128,
|
52 |
+
"pad_to_multiple_of": null,
|
53 |
+
"pad_token": "<pad>",
|
54 |
+
"pad_token_type_id": 0,
|
55 |
+
"padding_side": "right",
|
56 |
+
"sep_token": "</s>",
|
57 |
+
"stride": 0,
|
58 |
+
"strip_accents": null,
|
59 |
+
"tokenize_chinese_chars": true,
|
60 |
+
"tokenizer_class": "BertTokenizer",
|
61 |
+
"truncation_side": "right",
|
62 |
+
"truncation_strategy": "longest_first",
|
63 |
+
"unk_token": "<unk>"
|
64 |
+
}
|
unigram.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da145b5e7700ae40f16691ec32a0b1fdc1ee3298db22a31ea55f57a966c4a65d
|
3 |
+
size 14763260
|