leaderboard-pr-bot's picture
Adding Evaluation Results
bc7078b verified
|
raw
history blame
3.68 kB
metadata
license: apache-2.0
library_name: transformers
datasets:
  - berkeley-nest/Nectar
base_model: openchat/openchat-3.5-0106
model-index:
  - name: openchat-nectar-0.14
    results:
      - task:
          type: text-generation
          name: Text Generation
        dataset:
          name: AI2 Reasoning Challenge (25-Shot)
          type: ai2_arc
          config: ARC-Challenge
          split: test
          args:
            num_few_shot: 25
        metrics:
          - type: acc_norm
            value: 65.61
            name: normalized accuracy
        source:
          url: >-
            https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=andysalerno/openchat-nectar-0.14
          name: Open LLM Leaderboard
      - task:
          type: text-generation
          name: Text Generation
        dataset:
          name: HellaSwag (10-Shot)
          type: hellaswag
          split: validation
          args:
            num_few_shot: 10
        metrics:
          - type: acc_norm
            value: 83.02
            name: normalized accuracy
        source:
          url: >-
            https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=andysalerno/openchat-nectar-0.14
          name: Open LLM Leaderboard
      - task:
          type: text-generation
          name: Text Generation
        dataset:
          name: MMLU (5-Shot)
          type: cais/mmlu
          config: all
          split: test
          args:
            num_few_shot: 5
        metrics:
          - type: acc
            value: 64.58
            name: accuracy
        source:
          url: >-
            https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=andysalerno/openchat-nectar-0.14
          name: Open LLM Leaderboard
      - task:
          type: text-generation
          name: Text Generation
        dataset:
          name: TruthfulQA (0-shot)
          type: truthful_qa
          config: multiple_choice
          split: validation
          args:
            num_few_shot: 0
        metrics:
          - type: mc2
            value: 50.09
        source:
          url: >-
            https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=andysalerno/openchat-nectar-0.14
          name: Open LLM Leaderboard
      - task:
          type: text-generation
          name: Text Generation
        dataset:
          name: Winogrande (5-shot)
          type: winogrande
          config: winogrande_xl
          split: validation
          args:
            num_few_shot: 5
        metrics:
          - type: acc
            value: 82
            name: accuracy
        source:
          url: >-
            https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=andysalerno/openchat-nectar-0.14
          name: Open LLM Leaderboard
      - task:
          type: text-generation
          name: Text Generation
        dataset:
          name: GSM8k (5-shot)
          type: gsm8k
          config: main
          split: test
          args:
            num_few_shot: 5
        metrics:
          - type: acc
            value: 69.22
            name: accuracy
        source:
          url: >-
            https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=andysalerno/openchat-nectar-0.14
          name: Open LLM Leaderboard

max_steps = 200
learning_rate = 1e-6
warmup_ratio = 0.1
dpo_beta = 0.4
use_rslora = True
use_loftq = False
lora_rank = 128
lora_alpha = 256
load_separate_reference_model = False
optim = "paged_lion_32bit"

Open LLM Leaderboard Evaluation Results

Detailed results can be found here

Metric Value
Avg. 69.09
AI2 Reasoning Challenge (25-Shot) 65.61
HellaSwag (10-Shot) 83.02
MMLU (5-Shot) 64.58
TruthfulQA (0-shot) 50.09
Winogrande (5-shot) 82.00
GSM8k (5-shot) 69.22