NLU-Speech-MASSIVE-finetune / submitted_eval_job_02-26.18-03.sh
Beomseok-LEE's picture
Upload folder using huggingface_hub
8048f3a verified
raw
history blame contribute delete
No virus
2.01 kB
#!/bin/bash
#SBATCH --job-name=eval_cascaded_SLU_wo_num2word_NLU.mt5-base.task_type-1.fine_tune.gpu_a100-40g+.node-1x1.bsz-64.epochs-22.metric-ema.metric_lang-all/checkpoint-30407
#SBATCH -n 1
#SBATCH -N 1
#SBATCH -p gpu
#SBATCH --gres=gpu:1
#SBATCH --cpus-per-task=8
#SBATCH --constraint=gpu_a100&gpu_40g+
#SBATCH --mem=32G
#SBATCH --mail-type=ALL
#SBATCH [email protected]
#SBATCH --output=/beegfs/scratch/user/blee/project_3/models/NLU.mt5-base.task_type-1.fine_tune.gpu_a100-40g+.node-1x1.bsz-64.epochs-22.metric-ema.metric_lang-all/checkpoint-30407/sbatch-%j-02-26.18-03.log
source /home/blee/environments/py39-hugging-face/bin/activate
export http_proxy=http://proxy.int.europe.naverlabs.com:3128
export https_proxy=http://proxy.int.europe.naverlabs.com:3128
export no_proxy=int.europe.naverlabs.com
export HF_HOME=/beegfs/scratch/user/blee/hugging-face/models
export HF_DATASETS_DOWNLOADED_DATASETS_PATH=/beegfs/scratch/user/blee/hugging-face/downloaded
export HF_DATASETS_EXTRACTED_DATASETS_PATH=/beegfs/scratch/user/blee/hugging-face/extracted
export PYTHONPATH=:/home/blee/code-repo/transformers-slu
python /home/blee/code-repo/transformers-slu/nle/examples/nlu/run_nlu_mT5.py \
--do_predict \
--predict_with_generate \
--use_fast_tokenizer \
--trust_remote_code \
--test_dataset_name /home/blee/code-repo/transformers-slu/nle/dataset/speech_massive_cascaded_v2 \
--test_dataset_config_name multilingual-test \
--model_name_or_path /beegfs/scratch/user/blee/project_3/models/NLU.mt5-base.task_type-1.fine_tune.gpu_a100-40g+.node-1x1.bsz-64.epochs-22.metric-ema.metric_lang-all/checkpoint-30407 \
--output_dir /beegfs/scratch/user/blee/project_3/models/NLU.mt5-base.task_type-1.fine_tune.gpu_a100-40g+.node-1x1.bsz-64.epochs-22.metric-ema.metric_lang-all/checkpoint-30407/eval/cascaded_SLU_wo_num2word \
--preprocessing_num_workers 1 \
--length_column_name input_length \
--per_device_eval_batch_size 32 \
--overwrite_cache \
--generation_num_beams 2