ayoubkirouane
commited on
Commit
•
659fa5f
1
Parent(s):
bcf5b52
Update README.md
Browse files
README.md
CHANGED
@@ -86,13 +86,11 @@ model = PeftModel.from_pretrained(model, "TuningAI/Llama2_13B_startup_Assistant"
|
|
86 |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-13b-hf", trust_remote_code=True)
|
87 |
tokenizer.pad_token = tokenizer.eos_token
|
88 |
tokenizer.padding_side = "right"
|
|
|
89 |
while 1:
|
90 |
input_text = input(">>>")
|
91 |
prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n {input_text}. [/INST]"
|
92 |
-
|
93 |
-
num_prompt_tokens = len(tokenizer(prompt)['input_ids'])
|
94 |
-
max_length = num_prompt_tokens + num_new_tokens
|
95 |
-
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=max_length)
|
96 |
result = pipe(prompt)
|
97 |
print(result[0]['generated_text'].replace(prompt, ''))
|
98 |
```
|
|
|
86 |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-13b-hf", trust_remote_code=True)
|
87 |
tokenizer.pad_token = tokenizer.eos_token
|
88 |
tokenizer.padding_side = "right"
|
89 |
+
system_message = "Given a user's startup-related question in English, you will generate a thoughtful answer in English."
|
90 |
while 1:
|
91 |
input_text = input(">>>")
|
92 |
prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n {input_text}. [/INST]"
|
93 |
+
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=400)
|
|
|
|
|
|
|
94 |
result = pipe(prompt)
|
95 |
print(result[0]['generated_text'].replace(prompt, ''))
|
96 |
```
|