File size: 2,417 Bytes
266c9c5 6965158 48d77c5 266c9c5 48d77c5 266c9c5 48d77c5 6965158 48d77c5 266c9c5 c84d7cc 266c9c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import streamlit as st
from datasets import load_dataset
from transformer_ranker import TransformerRanker, prepare_popular_models
st.title("Choose Your Transformer")
model_options = {
'bert-tiny': 'prajjwal1/bert-tiny',
'bert-small': 'prajjwal1/bert-small',
'electra-small': 'google/electra-small-discriminator',
'deberta-small': 'microsoft/deberta-v3-small',
'distilbert-cased': 'distilbert-base-cased',
'distilbert-uncased': 'distilbert-base-uncased',
}
# 1) Select dataset names (from text classification or token classification subcategory)
dataset_options = ['trec', 'conll2003'] # Example datasets; you can expand this list
selected_dataset = st.selectbox("Select Dataset", dataset_options)
# 2) Select the parameter for dataset downsampling
downsample_values = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
downsample_ratio = st.select_slider("Dataset Downsample Ratio", options=downsample_values, value=0.2)
# 3) Select multiple models from HuggingFace model hub
model_names = list(model_options.keys())
selected_models = st.multiselect("Select Models", model_names, default=['bert-tiny', 'electra-small'])
selected_models = [model_options[model_name] for model_name in selected_models]
# 4) Select the parameter for layer selection with layermean as the default
layer_options = ['lastlayer', 'layermean', 'bestlayer']
selected_layer = st.selectbox("Layer Selection", layer_options, index=1)
# Add real-time logging in the future
log_expander = st.expander("Expand to view log")
log_placeholder = log_expander.empty() # Placeholder for log updates
# Button to run the ranking process
if st.button("Run Model Ranking"):
with st.spinner("Running the transformer-ranker..."):
# Step 1: Load the selected dataset
dataset = load_dataset(selected_dataset, trust_remote_code=True)
# Step 2: Prepare the selected models
language_models = prepare_popular_models('base') if selected_models == [] else selected_models
# Step 3: Initialize the ranker
ranker = TransformerRanker(dataset, dataset_downsample=downsample_ratio)
# Placeholder for log updates
log_placeholder.text("Real-time logging will be added here...")
# Run the ranker
results = ranker.run(language_models, batch_size=64)
# Display the final results
st.write(results)
st.success("Ranking is Done!")
|