0805recom / data0805 /test.sh
daiweinan.thu
update
967796e
# export CUDA_VISIBLE_DEVICES=7
export HF_ENDPOINT=https://hf-mirror.com
# export HF_HUB_OFFLINE="1"
# export TRANSFORMERS_OFFLINE="1"
# for model_name in "Qwen/Qwen2.5-0.5B" "Qwen/Qwen2.5-1.5B" "Qwen/Qwen2.5-3B" "Qwen/Qwen2.5-7B" "Qwen/Qwen2.5-14B" "Qwen/Qwen2.5-32B"
# for model_name in "meta-llama/Llama-3.1-8B" "meta-llama/Llama-3.1-8B-Instruct"
# for model_name in "Qwen/Qwen3-8B" "Qwen/Qwen3-32B"
# for model_name in "EleutherAI/pythia-14m" "EleutherAI/pythia-31m" "EleutherAI/pythia-70m" "EleutherAI/pythia-160m" "EleutherAI/pythia-410m" "EleutherAI/pythia-1b" "EleutherAI/pythia-1.4b" "EleutherAI/pythia-2.8b" "EleutherAI/pythia-6.9b" "EleutherAI/pythia-12b"
# for model_name in "Qwen/Qwen3-0.6B-Base" "Qwen/Qwen3-1.7B-Base" "Qwen/Qwen3-4B-Base" "Qwen/Qwen3-8B-Base" "Qwen/Qwen3-14B-Base"
# for model_name in "Qwen/Qwen3-0.6B-Base"
for model_name in "Qwen/Qwen3-0.6B-Base" "Qwen/Qwen3-1.7B-Base" "Qwen/Qwen3-4B-Base" "Qwen/Qwen3-8B-Base" "Qwen/Qwen3-14B-Base" "Qwen/Qwen2.5-0.5B" "Qwen/Qwen2.5-1.5B" "Qwen/Qwen2.5-3B" "Qwen/Qwen2.5-7B" "Qwen/Qwen2.5-14B" "Qwen/Qwen2.5-32B"
do
for round_threshold in 30
do
for max_assistant_seq_len in 0
do
echo "Processing model: $model_name, round_threshold: $round_threshold"
python3 compute_ce_fsdp_recom_test.py \
--model_path $model_name \
--data_name "netflix" \
--round_threshold $round_threshold \
--max_seq_len 30000 \
--max_item_per_user 10 \
--max_history_len 100 \
--subsample 100
done
done
done