NickyNicky's picture
Update README.md
618a21c verified
metadata
license: apache-2.0
datasets:
  - argilla/distilabel-intel-orca-dpo-pairs
language:
  - bg
  - ca
  - cs
  - da
  - de
  - en
  - es
  - fr
  - hr
  - hu
  - it
  - nl
  - pl
  - pt
  - ro
  - ru
  - sl
  - sr
  - sv
  - uk
library_name: transformers
widget:
  - text: |
      <bos><start_of_turn>system
      You are a helpful AI assistant.<end_of_turn>
      <start_of_turn>user
      What is the meaning of life in the current time?<end_of_turn>
      <start_of_turn>model

image/png

reference data model:

  datasets:
      link: https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs
      link dataset format gemma chatml: https://huggingface.co/datasets/NickyNicky/distilabel-intel-orca-dpo-pairs_gemma_chatml

  model:
    - google/gemma-2b-it
      Link base:
        https://huggingface.co/google/gemma-2b-it

      Link fine-tune:
        https://huggingface.co/NickyNicky/gemma-2b-it_oasst2_chatML_Cluster_2_V1

  Epoch: 1

  Future experts: 4 

  Eval model:
    - link:
        soon

Loss: train/loss 0.0664

Re/Accuracies: train/rewards/accuracies 0.9642857313156128

!python -m pip install --upgrade pip
!pip install "torch>=2.1.1" -U
# !pip install torchaudio==2.2.0
!pip install -q datasets trl peft bitsandbytes sentencepiece wandb
!pip install -q accelerate safetensors deepspeed
!pip install -q scipy ninja -U
!pip install -q -U transformers==4.38.0

Version

import torch
torch.__version__
#OUTPUTS: ('2.2.0+cu121' )

How to use


from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    BitsAndBytesConfig,
    HfArgumentParser,
    TrainingArguments,
    pipeline,
    logging,
    GenerationConfig,
    TextIteratorStreamer,
)

from transformers import StoppingCriteria, StoppingCriteriaList

import torch

model_id='NickyNicky/gemma-2b-it_chatML_distilabel-intel-orca-dpo-pairs_v1'

model = AutoModelForCausalLM.from_pretrained(model_id,
                                             device_map="auto",
                                             trust_remote_code=True,
                                             torch_dtype=torch.bfloat16,
                                             attn_implementation="flash_attention_2",
                                             # load_in_4bit=True,
                                             # low_cpu_mem_usage= True,

                                             )

max_length=2155
print("max_length",max_length)


tokenizer = AutoTokenizer.from_pretrained(model_id,
                                          # use_fast = False,
                                          max_length=max_length,)


class ListOfTokensStoppingCriteria(StoppingCriteria):
    """
    Clase para definir un criterio de parada basado en una lista de tokens específicos.
    """
    def __init__(self, tokenizer, stop_tokens):
        self.tokenizer = tokenizer
        # Codifica cada token de parada y guarda sus IDs en una lista
        self.stop_token_ids_list = [tokenizer.encode(stop_token, add_special_tokens=False) for stop_token in stop_tokens]

    def __call__(self, input_ids, scores, **kwargs):
        # Verifica si los últimos tokens generados coinciden con alguno de los conjuntos de tokens de parada
        for stop_token_ids in self.stop_token_ids_list:
            len_stop_tokens = len(stop_token_ids)
            if len(input_ids[0]) >= len_stop_tokens:
                if input_ids[0, -len_stop_tokens:].tolist() == stop_token_ids:
                    return True
        return False

# Uso del criterio de parada personalizado
stop_tokens = ["<end_of_turn>"]  # Lista de tokens de parada

# Inicializa tu criterio de parada con el tokenizer y la lista de tokens de parada
stopping_criteria = ListOfTokensStoppingCriteria(tokenizer, stop_tokens)

# Añade tu criterio de parada a una StoppingCriteriaList
stopping_criteria_list = StoppingCriteriaList([stopping_criteria])



#EXAMPLE #1
txt="""<bos><start_of_turn>system
You are a helpful AI assistant.<end_of_turn>
<start_of_turn>user
Me dices los diferentes tipos de reciclaje que suelen existir en las ciudades europeas<end_of_turn>
<start_of_turn>model
"""

#EXAMPLE #2
txt="""<bos><start_of_turn>system
You are a helpful AI assistant.<end_of_turn>
<start_of_turn>user
What is the meaning of life in the current time?<end_of_turn>
<start_of_turn>model
"""


inputs = tokenizer.encode(txt,
                          return_tensors="pt",
                          add_special_tokens=False).to("cuda:0")
max_new_tokens=1000
generation_config = GenerationConfig(
              max_new_tokens=max_new_tokens,
              temperature=0.1, # .82 .2
              #top_p=0.9,
              top_k=50,
              repetition_penalty=1.1, 
              do_sample=True,
          )
outputs = model.generate(generation_config=generation_config,
                         input_ids=inputs,
                         stopping_criteria=stopping_criteria_list,)
tokenizer.decode(outputs[0], skip_special_tokens=False) #True