yam-peleg commited on
Commit
4ee652c
โ€ข
1 Parent(s): d29f566

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +9 -9
README.md CHANGED
@@ -30,11 +30,11 @@ First make sure to `pip install -U transformers`, then copy the snippet from the
30
 
31
  ### Running on CPU
32
 
33
- ```
34
  from transformers import AutoTokenizer, AutoModelForCausalLM
35
 
36
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
37
- model = AutoModelForCausalLM.from_pretrained("google/gemma-7b")
38
 
39
  input_text = "ืฉืœื•ื! ืžื” ืฉืœื•ืžืš ื”ื™ื•ื?"
40
  input_ids = tokenizer(input_text, return_tensors="pt")
@@ -45,11 +45,11 @@ print(tokenizer.decode(outputs[0]))
45
 
46
  ### Running on GPU
47
 
48
- ```
49
  from transformers import AutoTokenizer, AutoModelForCausalLM
50
 
51
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
52
- model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto")
53
 
54
  input_text = "ืฉืœื•ื! ืžื” ืฉืœื•ืžืš ื”ื™ื•ื?"
55
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -60,11 +60,11 @@ print(tokenizer.decode(outputs[0]))
60
 
61
  ### Running with 4-Bit precision
62
 
63
- ```
64
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
65
 
66
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
67
- model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", quantization_config = BitsAndBytesConfig(load_in_4bit=True))
68
 
69
  input_text = "ืฉืœื•ื! ืžื” ืฉืœื•ืžืš ื”ื™ื•ื?"
70
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
30
 
31
  ### Running on CPU
32
 
33
+ ```python
34
  from transformers import AutoTokenizer, AutoModelForCausalLM
35
 
36
+ tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B")
37
+ model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B")
38
 
39
  input_text = "ืฉืœื•ื! ืžื” ืฉืœื•ืžืš ื”ื™ื•ื?"
40
  input_ids = tokenizer(input_text, return_tensors="pt")
 
45
 
46
  ### Running on GPU
47
 
48
+ ```python
49
  from transformers import AutoTokenizer, AutoModelForCausalLM
50
 
51
+ tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B")
52
+ model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B", device_map="auto")
53
 
54
  input_text = "ืฉืœื•ื! ืžื” ืฉืœื•ืžืš ื”ื™ื•ื?"
55
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
60
 
61
  ### Running with 4-Bit precision
62
 
63
+ ```python
64
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
65
 
66
+ tokenizer = AutoTokenizer.from_pretrained("yam-peleg/Hebrew-Gemma-11B")
67
+ model = AutoModelForCausalLM.from_pretrained("yam-peleg/Hebrew-Gemma-11B", quantization_config = BitsAndBytesConfig(load_in_4bit=True))
68
 
69
  input_text = "ืฉืœื•ื! ืžื” ืฉืœื•ืžืš ื”ื™ื•ื?"
70
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")