Update README.md
Browse files
README.md
CHANGED
@@ -42,8 +42,8 @@ First make sure to `pip install -U transformers`, then copy the snippet from the
|
|
42 |
```python
|
43 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
44 |
|
45 |
-
tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B")
|
46 |
-
model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B")
|
47 |
|
48 |
input_text = "ืฉืืื! ืื ืฉืืืื ืืืื?"
|
49 |
input_ids = tokenizer(input_text, return_tensors="pt")
|
@@ -57,8 +57,8 @@ print(tokenizer.decode(outputs[0]))
|
|
57 |
```python
|
58 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
59 |
|
60 |
-
tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B")
|
61 |
-
model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B", device_map="auto")
|
62 |
|
63 |
input_text = "ืฉืืื! ืื ืฉืืืื ืืืื?"
|
64 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
@@ -72,8 +72,8 @@ print(tokenizer.decode(outputs[0]))
|
|
72 |
```python
|
73 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
74 |
|
75 |
-
tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B")
|
76 |
-
model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B", quantization_config = BitsAndBytesConfig(load_in_4bit=True))
|
77 |
|
78 |
input_text = "ืฉืืื! ืื ืฉืืืื ืืืื?"
|
79 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
@@ -89,7 +89,7 @@ print(tokenizer.decode(outputs[0])
|
|
89 |
|
90 |
### Notice
|
91 |
|
92 |
-
Hebrew-Gemma-11B is a pretrained base model and therefore does not have any moderation mechanisms.
|
93 |
|
94 |
|
95 |
### Authors
|
|
|
42 |
```python
|
43 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
44 |
|
45 |
+
tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B-V2")
|
46 |
+
model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B-V2")
|
47 |
|
48 |
input_text = "ืฉืืื! ืื ืฉืืืื ืืืื?"
|
49 |
input_ids = tokenizer(input_text, return_tensors="pt")
|
|
|
57 |
```python
|
58 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
59 |
|
60 |
+
tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B-V2")
|
61 |
+
model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B-V2", device_map="auto")
|
62 |
|
63 |
input_text = "ืฉืืื! ืื ืฉืืืื ืืืื?"
|
64 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
72 |
```python
|
73 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
74 |
|
75 |
+
tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B-V2")
|
76 |
+
model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B-V2", quantization_config = BitsAndBytesConfig(load_in_4bit=True))
|
77 |
|
78 |
input_text = "ืฉืืื! ืื ืฉืืืื ืืืื?"
|
79 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
89 |
|
90 |
### Notice
|
91 |
|
92 |
+
Hebrew-Gemma-11B-V2 is a pretrained base model and therefore does not have any moderation mechanisms.
|
93 |
|
94 |
|
95 |
### Authors
|