eduagarcia's picture
filter quantized models from the collection
ab44cd6 verified
import os
import pandas as pd
from huggingface_hub import add_collection_item, delete_collection_item, get_collection, update_collection_item
from huggingface_hub.utils._errors import HfHubHTTPError
from pandas import DataFrame
import numpy as np
from src.display.utils import AutoEvalColumn, ModelType, NUMERIC_INTERVALS
from src.envs import H4_TOKEN, PATH_TO_COLLECTION
# Specific intervals for the collections
"""
intervals = {
"1B": pd.Interval(0, 1.5, closed="right"),
"3B": pd.Interval(2.5, 3.5, closed="neither"),
"7B": pd.Interval(6, 8, closed="neither"),
"13B": pd.Interval(10, 14, closed="neither"),
"30B": pd.Interval(25, 35, closed="neither"),
"65B": pd.Interval(60, 70, closed="neither"),
}
"""
intervals = {k:v for k,v in NUMERIC_INTERVALS.items() if "?" not in k}
def update_collections(df: DataFrame):
"""This function updates the Open LLM Leaderboard model collection with the latest best models for
each size category and type.
"""
collection = get_collection(collection_slug=PATH_TO_COLLECTION, token=H4_TOKEN)
params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
cur_best_models = []
cur_best_scores = []
scores_per_type = {'pretrained': 0, 'other': 0, 'language': 0}
types_to_consider = [('pretrained', [ModelType.PT]), ('other', [ModelType.LA, ModelType.FT, ModelType.chat])]
for item in collection.items:
try:
delete_collection_item(
collection_slug=PATH_TO_COLLECTION, item_object_id=item.item_object_id, token=H4_TOKEN
)
except HfHubHTTPError:
continue
#filter quantized models
df = df[df[AutoEvalColumn.precision.name].isin(['bfloat16', 'float16'])]
ix = 0
for size in intervals:
interval_scores = []
interval_itens_languages = []
interval_itens = []
numeric_interval = pd.IntervalIndex([intervals[size]])
mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
size_df = df.loc[mask]
for model_type, types in types_to_consider:
type_emojis = []
for type in types:
if type.value.name == "":
continue
type_emoji = [t[0] for t in type.value.symbol]
type_emojis.extend(type_emoji)
filtered_df = size_df[size_df[AutoEvalColumn.model_type_symbol.name].isin(type_emojis)]
filtered_df = filtered_df[filtered_df[AutoEvalColumn.average.name].astype(float) > scores_per_type[model_type]]
best_models = filtered_df.sort_values(AutoEvalColumn.average.name, ascending=False)
print(type_emojis, size, list(best_models[AutoEvalColumn.dummy.name])[:10])
# We add them one by one to the leaderboard
for i, row in best_models.iterrows():
model = row[AutoEvalColumn.dummy.name]
score = row[AutoEvalColumn.average.name]
language = row[AutoEvalColumn.main_language.name]
if language == 'Portuguese':
note = f"Best Portuguese {type.to_str(' ')} model of around {size} on the leaderboard today! (Score: {score})"
else:
note = f"Best {type.to_str(' ')} model of around {size} on the leaderboard today! (Score: {score})"
try:
collection = add_collection_item(
PATH_TO_COLLECTION,
item_id=model,
item_type="model",
exists_ok=True,
note=note,
token=H4_TOKEN,
)
ix += 1
item_object_id = collection.items[-1].item_object_id
cur_best_models.append(model)
interval_scores.append(float(score))
interval_itens_languages.append(language)
interval_itens.append(item_object_id)
scores_per_type[model_type] = float(score)
break
except HfHubHTTPError:
continue
if 'Portuguese' not in interval_itens_languages:
language = ['Portuguese']
model_type = 'language'
filtered_df = size_df[size_df[AutoEvalColumn.main_language.name].isin(language)]
filtered_df = filtered_df[filtered_df[AutoEvalColumn.average.name].astype(float) > scores_per_type[model_type]]
best_models = filtered_df.sort_values(AutoEvalColumn.average.name, ascending=False)
print(language, size, list(best_models[AutoEvalColumn.dummy.name])[:10])
# We add them one by one to the leaderboard
for i, row in best_models.iterrows():
model = row[AutoEvalColumn.dummy.name]
score = row[AutoEvalColumn.average.name]
language = row[AutoEvalColumn.main_language.name]
if language == 'Portuguese':
note = f"Best Portuguese {type.to_str(' ')} model of around {size} on the leaderboard today! (Score: {score})"
else:
note = f"Best {type.to_str(' ')} model of around {size} on the leaderboard today! (Score: {score})"
try:
collection = add_collection_item(
PATH_TO_COLLECTION,
item_id=model,
item_type="model",
exists_ok=True,
note=note,
token=H4_TOKEN,
)
ix += 1
item_object_id = collection.items[-1].item_object_id
cur_best_models.append(model)
interval_scores.append(float(score))
interval_itens_languages.append(language)
interval_itens.append(item_object_id)
scores_per_type[model_type] = float(score)
break
except HfHubHTTPError:
continue
# fix order:
starting_idx = len(cur_best_models)
k = 0
for i in np.argsort(interval_scores):
if i == k:
continue
else:
try:
update_collection_item(
collection_slug=PATH_TO_COLLECTION, item_object_id=interval_itens[i], position=starting_idx+k
)
except:
pass
k += 1
collection = get_collection(PATH_TO_COLLECTION, token=H4_TOKEN)
for item in collection.items:
if item.item_id not in cur_best_models:
try:
delete_collection_item(
collection_slug=PATH_TO_COLLECTION, item_object_id=item.item_object_id, token=H4_TOKEN
)
except HfHubHTTPError:
continue