LuojiaHOG / cisen /config /cisen_r0.9_fpn.yaml
aleo1's picture
Upload 41 files
bb6012a verified
raw
history blame
2.21 kB
DATA:
dataset: classification
dataset_json_file: /data02/xy/dataEngine/json_data/LuojiaHOG(test)_.json
# dataset_json_file: /data02/xy/dataEngine/json_data/merged_output_combined_9w_resplit.json
# dataset_json_file: /data02/xy/dataEngine/json_data/merged_output_combined_9w_resplit.json
exp_name: classifi
ratio: 0
dataset_train_split: 0.6
dataset_query_split: 0.2
imgs_folder: /data02/xy/Clip-hash/datasets/image/
label_path: /data02/xy/Clip-hash/labels.txt
num_classes: 10
# num_classes: 131
TRAIN:
# Base Arch
# clip_pretrain: /data02/xy/Clip-hash/pretrain/RS5M_ViT-B-32.pt
clip_pretrain: ./cisen/pretrain/RS5M_ViT-B-32.pt
model_name: ViT-B-32
ckpt_path: /data02/xy/GeoRSCLIP/codebase/inference/pretrain/RS5M_ViT-B-32.pt
input_size: 224
word_len: 328
word_dim: 1024
vis_dim: 512
fpn_in: [ 512, 768, 768 ]
fpn_out: [ 768, 768, 768, 512 ]
sync_bn: True
# Decoder
num_layers: 3
num_head: 8
dim_ffn: 2048
dropout: 0.1
intermediate: False
# Training Setting
workers: 32 # data loader workers
workers_val: 16
epochs: 50
milestones: [50]
start_epoch: 0
batch_size: 256 # batch size for training
batch_size_val: 256 # batch size for validation during training, memory and speed tradeoff 11111
base_lr: 0.0001
min_lr: 0.00000001
lr_decay: 0.5
lr_multi: 0.1
weight_decay: 0.
max_norm: 0.
manual_seed: 0
print_freq: 1
lamda1: 0.5
lamda2: 0.5
beta1: 0.5
beta2: 0.5
eta: 0.2
warmup_epochs: 0
contrastive: [0.4, 0.3, 0.3]
# Resume & Save
output_folder: /data02/xy/Clip-hash/exp/
save_freq: 1
weight: # path to initial weight (default: none)
resume: False # path to latest checkpoint (default: none)
evaluate: True # evaluate on validation set, extra gpu memory needed and small batch_size_val is recommend
Distributed:
dist_url: tcp://localhost:3693
dist_backend: 'nccl'
multiprocessing_distributed: True
world_size: 1
rank: 0
TEST:
test_split: val-test
gpu : [0]
test_lmdb: /data02/xy/Clip-hash/datasets/lmdb/refcoco/val.lmdb
visualize: False
topk: 5
test_batch_size: 256 #1111111
val_batch_size: 1