tanyuzhou commited on
Commit
e3ff901
β€’
1 Parent(s): 2c7e5d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -13,7 +13,7 @@ bnb_config = BitsAndBytesConfig(
13
  # Load model and tokenizer
14
  model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True, device_map="cuda", quantization_config=bnb_config)
15
  tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
16
- # tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + ((message['role'] + '\n') if message['role'] != '' else '') + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>ζ˜Ÿι‡Ž\n' }}{% endif %}" # Be careful that this model used custom chat template.
17
 
18
  # Define the response function
19
  @spaces.GPU
@@ -50,7 +50,7 @@ def respond(
50
 
51
  # Decode the generated response
52
  response = tokenizer.decode(generate_ids[0], skip_special_tokens=True)
53
- response = response.split(f"θ€εΈˆ\n{message}\nζ˜Ÿι‡Ž\n")[1]
54
 
55
  return response
56
 
 
13
  # Load model and tokenizer
14
  model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True, device_map="cuda", quantization_config=bnb_config)
15
  tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
16
+ tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + ((message['role'] + ':\n') if message['role'] != '' else '') + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>ζ˜Ÿι‡Ž:\n' }}{% endif %}" # Be careful that this model used custom chat template.
17
 
18
  # Define the response function
19
  @spaces.GPU
 
50
 
51
  # Decode the generated response
52
  response = tokenizer.decode(generate_ids[0], skip_special_tokens=True)
53
+ response = response.split(f"θ€εΈˆ:\n{message}\nζ˜Ÿι‡Ž:\n")[1]
54
 
55
  return response
56