llavaguard / scripts /run_instructblip_safety_patch.sh
Ahren09's picture
Upload 227 files
5ca4e86 verified
set -x
MODEL=instructblip
MODEL_PATH=/workingdir/models_hf/lmsys/vicuna-13b-v1.1
GPU_ID=2
for TASK in unconstrained constrained qna; do
for SAFETY_PATCH_MODE in heuristic optimized; do
INFERENCE_FILE="outputs/${MODEL}/inference_${TASK}_${MODEL}_${SAFETY_PATCH_MODE}"
METRIC_FILE="outputs/${MODEL}/metric_${TASK}_${MODEL}_${SAFETY_PATCH_MODE}"
SUMMARY_FILE="outputs/${MODEL}/summary_${TASK}_${MODEL}_${SAFETY_PATCH_MODE}"
IMAGE_SAFETY_PATCH=safety_patch/safety_patch.pt
TEXT_SAFETY_PATCH=safety_patch/text_patch_${SAFETY_PATCH_MODE}
if [ "${TASK}" = "constrained" ]; then
echo "Running constrained"
python instructblip_constrained_inference.py --gpu-id ${GPU_ID} \
--model_path ${MODEL_PATH} \
--image_safety_patch ${IMAGE_SAFETY_PATCH} \
--text_safety_patch ${TEXT_SAFETY_PATCH} \
--output_file ${INFERENCE_FILE} \
--safety_patch_mode ${SAFETY_PATCH_MODE}
elif [ "${TASK}" = "unconstrained" ]; then
echo "Running unconstrained"
python instructblip_unconstrained_inference.py --gpu-id ${GPU_ID} \
--model_path ${MODEL_PATH} \
--image_safety_patch ${IMAGE_SAFETY_PATCH} \
--text_safety_patch ${TEXT_SAFETY_PATCH} \
--output_file ${INFERENCE_FILE} \
--safety_patch_mode ${SAFETY_PATCH_MODE}
elif [ "${TASK}" = "qna" ]; then
echo "Running qna"
python instructblip_qna.py --gpu-id ${GPU_ID} \
--image_path ${TASK}_attack_images/adversarial_ \
--image_safety_patch ${IMAGE_SAFETY_PATCH} \
--text_safety_patch ${TEXT_SAFETY_PATCH} \
--output_file ${INFERENCE_FILE} \
--safety_patch_mode ${SAFETY_PATCH_MODE}
else
echo "Wrong Implementation"
exit 1
fi
CUDA_VISIBLE_DEVICES=${GPU_ID} python get_metric.py --input ${INFERENCE_FILE} \
--output ${METRIC_FILE} \
--perplexity ${SUMMARY_FILE} \
--device cuda \
--load_existing_generation
python cal_metrics.py --input ${METRIC_FILE} \
--output ${SUMMARY_FILE}
done
done