#!/bin/bash #SBATCH --job-name=eval_cascaded_SLU_NLU.mt5-base.task_type-1.fine_tune.gpu_a100-40g+.node-1x1.bsz-64.epochs-22.metric-ema.metric_lang-all/checkpoint-30407 #SBATCH -n 1 #SBATCH -N 1 #SBATCH -p gpu #SBATCH --gres=gpu:1 #SBATCH --cpus-per-task=8 #SBATCH --constraint=gpu_a100&gpu_40g #SBATCH --mem=32G #SBATCH --mail-type=ALL #SBATCH --mail-user=beomseok.lee-phdstudent@naverlabs.com #SBATCH --output=/beegfs/scratch/user/blee/project_3/models/NLU.mt5-base.task_type-1.fine_tune.gpu_a100-40g+.node-1x1.bsz-64.epochs-22.metric-ema.metric_lang-all/checkpoint-30407/sbatch-%j-02-05.22-53.log source /home/blee/environments/py39-hugging-face/bin/activate export http_proxy=http://proxy.int.europe.naverlabs.com:3128 export https_proxy=http://proxy.int.europe.naverlabs.com:3128 export no_proxy=int.europe.naverlabs.com export HF_HOME=/beegfs/scratch/user/blee/hugging-face/models export HF_DATASETS_DOWNLOADED_DATASETS_PATH=/beegfs/scratch/user/blee/hugging-face/downloaded export HF_DATASETS_EXTRACTED_DATASETS_PATH=/beegfs/scratch/user/blee/hugging-face/extracted export PYTHONPATH=:/home/blee/code-repo/transformers-slu python /home/blee/code-repo/transformers-slu/nle/examples/nlu/run_nlu_mT5.py \ --do_predict \ --predict_with_generate \ --use_fast_tokenizer \ --trust_remote_code \ --test_dataset_name /home/blee/code-repo/transformers-slu/nle/dataset/speech_massive_cascaded \ --test_dataset_config_name multilingual-test \ --model_name_or_path /beegfs/scratch/user/blee/project_3/models/NLU.mt5-base.task_type-1.fine_tune.gpu_a100-40g+.node-1x1.bsz-64.epochs-22.metric-ema.metric_lang-all/checkpoint-30407 \ --output_dir /beegfs/scratch/user/blee/project_3/models/NLU.mt5-base.task_type-1.fine_tune.gpu_a100-40g+.node-1x1.bsz-64.epochs-22.metric-ema.metric_lang-all/checkpoint-30407/eval/cascaded_SLU \ --preprocessing_num_workers 1 \ --length_column_name input_length \ --per_device_eval_batch_size 32 \ --group_by_length \ --overwrite_cache \ --generation_num_beams 2