yam-peleg commited on
Commit
0d4a056
โ€ข
1 Parent(s): d896dc2

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -6
README.md CHANGED
@@ -36,8 +36,8 @@ First make sure to `pip install -U transformers`, then copy the snippet from the
36
  ```python
37
  from transformers import AutoTokenizer, AutoModelForCausalLM
38
 
39
- tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B")
40
- model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B")
41
 
42
  input_text = "ืฉืœื•ื! ืžื” ืฉืœื•ืžืš ื”ื™ื•ื?"
43
  input_ids = tokenizer(input_text, return_tensors="pt")
@@ -51,8 +51,8 @@ print(tokenizer.decode(outputs[0]))
51
  ```python
52
  from transformers import AutoTokenizer, AutoModelForCausalLM
53
 
54
- tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B")
55
- model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B", device_map="auto")
56
 
57
  input_text = "ืฉืœื•ื! ืžื” ืฉืœื•ืžืš ื”ื™ื•ื?"
58
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -66,8 +66,8 @@ print(tokenizer.decode(outputs[0]))
66
  ```python
67
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
68
 
69
- tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B")
70
- model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B", quantization_config = BitsAndBytesConfig(load_in_4bit=True))
71
 
72
  input_text = "ืฉืœื•ื! ืžื” ืฉืœื•ืžืš ื”ื™ื•ื?"
73
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
36
  ```python
37
  from transformers import AutoTokenizer, AutoModelForCausalLM
38
 
39
+ tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B-Instruct")
40
+ model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B-Instruct")
41
 
42
  input_text = "ืฉืœื•ื! ืžื” ืฉืœื•ืžืš ื”ื™ื•ื?"
43
  input_ids = tokenizer(input_text, return_tensors="pt")
 
51
  ```python
52
  from transformers import AutoTokenizer, AutoModelForCausalLM
53
 
54
+ tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B-Instruct")
55
+ model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B-Instruct", device_map="auto")
56
 
57
  input_text = "ืฉืœื•ื! ืžื” ืฉืœื•ืžืš ื”ื™ื•ื?"
58
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
66
  ```python
67
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
68
 
69
+ tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B-Instruct")
70
+ model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B-Instruct", quantization_config = BitsAndBytesConfig(load_in_4bit=True))
71
 
72
  input_text = "ืฉืœื•ื! ืžื” ืฉืœื•ืžืš ื”ื™ื•ื?"
73
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")