-
Notifications
You must be signed in to change notification settings - Fork 6
/
run_predict.sh
72 lines (55 loc) · 1.7 KB
/
run_predict.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
#!/bin/bash
# This is an example where we use the lava model to condut the prediction.
CUR_DIR=`pwd`
ROOT=${CUR_DIR}
export PYTHONPATH=${ROOT}:${PYTHONPATH}
VISION_MODEL=none
LLM=none
CKPT_PATHs=(
your-llava-path
)
TEMPLATE=llava
MODEL_ARCHITECTURE=llava
# sciQA
DATA_PATH=data/prediction_sample/sci_qa_test.txt
IMAGE_FOLDER=data/prediction_sample/sci_qa_test_image
OUTPUT_TAG=/res/sciqa_with_image.res
DATA="llava_predict"
DATA_SAMPLE="all"
IMAGE_PER_SAMPLE="1"
BATCH_SIZE=1
TOPK=50
TOPP=0.95
MAX_NEW_TOKENS=1024
NUM_RETURN_SEQUENCES=1
TEMPERATURE=0.0
DEVICES=(7)
ARRAY_LENGTH=${#CKPT_PATHs[@]}
for (( i=0; i<${ARRAY_LENGTH}; i++ )); do
CKPT_PATH=${CKPT_PATHs[i]}
mkdir $CKPT_PATH/res
OUTPUT=$CKPT_PATH/$OUTPUT_TAG
rm ${OUTPUT}
DEVICE=${DEVICES[i]}
# we assume the batch size is 128, which means Num_GPU * per_device_train_batch_size * gradient_accumulation_steps
nohup deepspeed --include localhost:$DEVICE --master_port $((12345 + DEVICE)) ./eval/predict.py \
--max_seq_len 2048 \
--data_path ${DATA_PATH} \
--dataset_names ${DATA} --dataset_samples ${DATA_SAMPLE} --dataset_concatenate_samples ${IMAGE_PER_SAMPLE} \
--precision bf16 --enable_mmca_attention \
--from_checkpoint ${CKPT_PATH} \
--template ${TEMPLATE} \
--model_architecture ${MODEL_ARCHITECTURE} \
--lm_model_name_or_path $LLM \
--num_return_sequences 1 \
--vision_model_name_or_path $VISION_MODEL \
--batch_size ${BATCH_SIZE} \
--image_folder ${IMAGE_FOLDER} \
--output_path ${OUTPUT} \
--do_sample \
--topk ${TOPK} \
--topp ${TOPP} \
--max_new_tokens $MAX_NEW_TOKENS \
--num_return_sequences $NUM_RETURN_SEQUENCES \
--temperature $TEMPERATURE > $CKPT_PATH/generating.log &
done