Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ bnb_config = BitsAndBytesConfig(
|
|
13 |
# Load model and tokenizer
|
14 |
model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True, device_map="cuda", quantization_config=bnb_config)
|
15 |
tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
|
16 |
-
tokenizer.chat_template = "{% for message in messages %}{{'' + ((message['role'] + '\n') if message['role'] != '' else '') + message['content'] + '' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '
|
17 |
|
18 |
# Define the response function
|
19 |
@spaces.GPU
|
@@ -46,12 +46,12 @@ def respond(
|
|
46 |
top_p=top_p
|
47 |
)
|
48 |
|
49 |
-
print("response: ", tokenizer.decode(generate_ids[0], skip_special_tokens=True))
|
50 |
-
|
51 |
# Decode the generated response
|
52 |
response = tokenizer.decode(generate_ids[0], skip_special_tokens=True)
|
53 |
response = response.split(f"θεΈ\n{message}\nζι\n")[1]
|
54 |
|
|
|
|
|
55 |
return response
|
56 |
|
57 |
# Default prompt for the chatbot
|
|
|
13 |
# Load model and tokenizer
|
14 |
model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True, device_map="cuda", quantization_config=bnb_config)
|
15 |
tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
|
16 |
+
tokenizer.chat_template = "{% for message in messages %}{{'' + ((message['role'] + '\n') if message['role'] != '' else '') + message['content'] + '' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ 'ζι\n' }}{% endif %}"
|
17 |
|
18 |
# Define the response function
|
19 |
@spaces.GPU
|
|
|
46 |
top_p=top_p
|
47 |
)
|
48 |
|
|
|
|
|
49 |
# Decode the generated response
|
50 |
response = tokenizer.decode(generate_ids[0], skip_special_tokens=True)
|
51 |
response = response.split(f"θεΈ\n{message}\nζι\n")[1]
|
52 |
|
53 |
+
print("response: ", tokenizer.decode(generate_ids[0], skip_special_tokens=True))
|
54 |
+
|
55 |
return response
|
56 |
|
57 |
# Default prompt for the chatbot
|