FremyCompany
commited on
Commit
•
35891ec
1
Parent(s):
413bd0f
Update README.md
Browse files
README.md
CHANGED
@@ -73,9 +73,9 @@ pip install -U sentence-transformers
|
|
73 |
Then you can use the model like this:
|
74 |
```python
|
75 |
from sentence_transformers import SentenceTransformer
|
76 |
-
sentences = ["
|
77 |
|
78 |
-
model = SentenceTransformer('FremyCompany/BioLORD-2023-M')
|
79 |
embeddings = model.encode(sentences)
|
80 |
print(embeddings)
|
81 |
```
|
@@ -94,10 +94,10 @@ def mean_pooling(model_output, attention_mask):
|
|
94 |
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
95 |
|
96 |
# Sentences we want sentence embeddings for
|
97 |
-
sentences = ["
|
98 |
|
99 |
# Load model from HuggingFace Hub
|
100 |
-
tokenizer = AutoTokenizer.from_pretrained('FremyCompany/BioLORD-2023-M')
|
101 |
model = AutoModel.from_pretrained('FremyCompany/BioLORD-2023-M')
|
102 |
|
103 |
# Tokenize sentences
|
|
|
73 |
Then you can use the model like this:
|
74 |
```python
|
75 |
from sentence_transformers import SentenceTransformer
|
76 |
+
sentences = ["wond door kattenscrab", "kattenkrabziekte", "bartonellosis"]
|
77 |
|
78 |
+
model = SentenceTransformer('FremyCompany/BioLORD-2023-M-Dutch-InContext-v1 ')
|
79 |
embeddings = model.encode(sentences)
|
80 |
print(embeddings)
|
81 |
```
|
|
|
94 |
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
95 |
|
96 |
# Sentences we want sentence embeddings for
|
97 |
+
sentences = ["wond door kattenscrab", "kattenkrabziekte", "bartonellosis"]
|
98 |
|
99 |
# Load model from HuggingFace Hub
|
100 |
+
tokenizer = AutoTokenizer.from_pretrained('FremyCompany/BioLORD-2023-M-Dutch-InContext-v1 ')
|
101 |
model = AutoModel.from_pretrained('FremyCompany/BioLORD-2023-M')
|
102 |
|
103 |
# Tokenize sentences
|