Update README.md
Browse files
README.md
CHANGED
@@ -34,7 +34,7 @@ import torch
|
|
34 |
from transformers import pipeline
|
35 |
|
36 |
generate_text = pipeline(
|
37 |
-
model="
|
38 |
torch_dtype=torch.float16,
|
39 |
trust_remote_code=True,
|
40 |
use_fast=False,
|
@@ -73,12 +73,12 @@ from h2oai_pipeline import H2OTextGenerationPipeline
|
|
73 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
74 |
|
75 |
tokenizer = AutoTokenizer.from_pretrained(
|
76 |
-
"
|
77 |
use_fast=False,
|
78 |
padding_side="left"
|
79 |
)
|
80 |
model = AutoModelForCausalLM.from_pretrained(
|
81 |
-
"
|
82 |
torch_dtype=torch.float16,
|
83 |
device_map={"": "cuda:0"}
|
84 |
)
|
@@ -103,7 +103,7 @@ You may also construct the pipeline from the loaded model and tokenizer yourself
|
|
103 |
```python
|
104 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
105 |
|
106 |
-
model_name = "
|
107 |
# Important: The prompt needs to be in the same format the model was trained with.
|
108 |
# You can find an example prompt in the experiment logs.
|
109 |
prompt = "<|prompt|>How are you?</s><|answer|>"
|
|
|
34 |
from transformers import pipeline
|
35 |
|
36 |
generate_text = pipeline(
|
37 |
+
model="h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2",
|
38 |
torch_dtype=torch.float16,
|
39 |
trust_remote_code=True,
|
40 |
use_fast=False,
|
|
|
73 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
74 |
|
75 |
tokenizer = AutoTokenizer.from_pretrained(
|
76 |
+
"h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2",
|
77 |
use_fast=False,
|
78 |
padding_side="left"
|
79 |
)
|
80 |
model = AutoModelForCausalLM.from_pretrained(
|
81 |
+
"h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2",
|
82 |
torch_dtype=torch.float16,
|
83 |
device_map={"": "cuda:0"}
|
84 |
)
|
|
|
103 |
```python
|
104 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
105 |
|
106 |
+
model_name = "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2" # either local folder or huggingface model name
|
107 |
# Important: The prompt needs to be in the same format the model was trained with.
|
108 |
# You can find an example prompt in the experiment logs.
|
109 |
prompt = "<|prompt|>How are you?</s><|answer|>"
|