Hansimov commited on
Commit
3b00a19
1 Parent(s): f1a82b8

:zap: [Enhance] Upgrade gemma-7b to 1.1

Browse files
constants/models.py CHANGED
@@ -3,7 +3,7 @@ MODEL_MAP = {
3
  "nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
4
  "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2",
5
  "openchat-3.5": "openchat/openchat-3.5-0106",
6
- "gemma-7b": "google/gemma-7b-it",
7
  "command-r-plus": "CohereForAI/c4ai-command-r-plus",
8
  "default": "mistralai/Mixtral-8x7B-Instruct-v0.1",
9
  }
 
3
  "nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
4
  "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2",
5
  "openchat-3.5": "openchat/openchat-3.5-0106",
6
+ "gemma-7b": "google/gemma-1.1-7b-it",
7
  "command-r-plus": "CohereForAI/c4ai-command-r-plus",
8
  "default": "mistralai/Mixtral-8x7B-Instruct-v0.1",
9
  }
messagers/message_composer.py CHANGED
@@ -125,7 +125,7 @@ class MessageComposer:
125
  )
126
  self.merged_str_list.append(f"GPT4 Correct Assistant:\n")
127
  self.merged_str = "\n".join(self.merged_str_list)
128
- # https://huggingface.co/google/gemma-7b-it#chat-template
129
  elif self.model in ["gemma-7b"]:
130
  self.messages = self.concat_messages_by_role(messages)
131
  self.merged_str_list = []
@@ -147,11 +147,11 @@ class MessageComposer:
147
  f"{self.start_of_turn}user\n{content}{self.end_of_turn}"
148
  )
149
  self.merged_str_list.append(f"{self.start_of_turn}model\n")
150
- self.merged_str = "\n".join(self.merged_str_list)
151
  # https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO#prompt-format
152
  # https://huggingface.co/openchat/openchat-3.5-0106
153
  # elif self.model in ["openchat-3.5", "nous-mixtral-8x7b"]:
154
- elif self.model in ["openchat-3.5", "command-r-plus"]:
155
  tokenizer = AutoTokenizer.from_pretrained(self.model_fullname)
156
  self.merged_str = tokenizer.apply_chat_template(
157
  messages, tokenize=False, add_generation_prompt=True
@@ -167,9 +167,9 @@ class MessageComposer:
167
  if __name__ == "__main__":
168
  # model = "mixtral-8x7b"
169
  # model = "nous-mixtral-8x7b"
170
- # model = "gemma-7b"
171
  # model = "openchat-3.5"
172
- model = "command-r-plus"
173
  composer = MessageComposer(model)
174
  messages = [
175
  {
 
125
  )
126
  self.merged_str_list.append(f"GPT4 Correct Assistant:\n")
127
  self.merged_str = "\n".join(self.merged_str_list)
128
+ # https://huggingface.co/google/gemma-1.1-7b-it#chat-template
129
  elif self.model in ["gemma-7b"]:
130
  self.messages = self.concat_messages_by_role(messages)
131
  self.merged_str_list = []
 
147
  f"{self.start_of_turn}user\n{content}{self.end_of_turn}"
148
  )
149
  self.merged_str_list.append(f"{self.start_of_turn}model\n")
150
+ self.merged_str = "<bos>" + "\n".join(self.merged_str_list)
151
  # https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO#prompt-format
152
  # https://huggingface.co/openchat/openchat-3.5-0106
153
  # elif self.model in ["openchat-3.5", "nous-mixtral-8x7b"]:
154
+ elif self.model in ["openchat-3.5", "command-r-plus", "gemma-7b"]:
155
  tokenizer = AutoTokenizer.from_pretrained(self.model_fullname)
156
  self.merged_str = tokenizer.apply_chat_template(
157
  messages, tokenize=False, add_generation_prompt=True
 
167
  if __name__ == "__main__":
168
  # model = "mixtral-8x7b"
169
  # model = "nous-mixtral-8x7b"
170
+ model = "gemma-7b"
171
  # model = "openchat-3.5"
172
+ # model = "command-r-plus"
173
  composer = MessageComposer(model)
174
  messages = [
175
  {