tanyuzhou commited on
Commit
bf7a3d3
β€’
1 Parent(s): 2cbb979

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -15
app.py CHANGED
@@ -16,23 +16,9 @@ def respond(
16
  temperature,
17
  top_p,
18
  ):
19
- quantization_config = BitsAndBytesConfig(
20
- bnb_4bit_compute_dtype="float16",
21
- bnb_4bit_quant_storage="uint8",
22
- bnb_4bit_quant_type="nf4",
23
- bnb_4bit_use_double_quant=True,
24
- llm_int8_enable_fp32_cpu_offload=False,
25
- llm_int8_has_fp16_weight=False,
26
- llm_int8_skip_modules=None,
27
- llm_int8_threshold=6.0,
28
- load_in_4bit=True,
29
- load_in_8bit=False,
30
- quant_method="bitsandbytes"
31
- )
32
-
33
  # Load model and tokenizer
34
  model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True)
35
- tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True, quantization_config=quantization_config)
36
  tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + ((message['role'] + ':\n') if message['role'] != '' else '') + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>ζ˜Ÿι‡Ž:\n' }}{% endif %}" # Be careful that this model used custom chat template.
37
 
38
  # Construct the messages for the chat
 
16
  temperature,
17
  top_p,
18
  ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  # Load model and tokenizer
20
  model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True)
21
+ tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
22
  tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + ((message['role'] + ':\n') if message['role'] != '' else '') + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>ζ˜Ÿι‡Ž:\n' }}{% endif %}" # Be careful that this model used custom chat template.
23
 
24
  # Construct the messages for the chat