-
Notifications
You must be signed in to change notification settings - Fork 41
Description
Hello, Xuandong,
Thank you for your excellent work.
I can reproduce the results of 1.5B and 3B, but when training the 7B model, there is an entropy collapse problem. Can you help me analyze together where the problem occurred.
My script:
set -x
export RAY_TMPDIR=""
export WANDB_API_KEY=xxx
export WANDB_DIR=""
export WANDB_MODE=online
export ACCELERATE_LOG_LEVEL=info
export HYDRA_FULL_ERROR=1
export CUDA_VISIBLE_DEVICES="0,1"
project_name='Intuitor'
exp_name='Intuitor-Qwen2.5-7B'
adv_estimator=intuitor
use_kl_in_reward=False
use_kl_loss=True
kl_loss_coef=0.005
max_prompt_length=$((1024 * 1))
max_response_length=$((1024 * 4))
train_prompt_bsz=8
n_resp_per_prompt=8
train_prompt_mini_bsz=8
filter_overlong_prompts=True
# Ray
RAY_ADDRESS=${RAY_ADDRESS:-""}
WORKING_DIR=${WORKING_DIR:-"${PWD}"}
RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"}
NNODES=${NNODES:-1}
# Paths
RAY_DATA_HOME=${RAY_DATA_HOME:-""}
MODEL_PATH=${MODEL_PATH:-"models/Qwen/Qwen2.5-7B"}
CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"}
TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/math/train.parquet"}
TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/math/test.parquet"}
# Algorithm
temperature=1.0
top_p=1.0
top_k=-1 # 0 for HF rollout, -1 for vLLM rollout
val_top_p=0.7
offload=True
PYTHONUNBUFFERED=1 python3 -m verl.trainer.main_ppo \
data.train_files="${TRAIN_FILE}" \
data.val_files="${TEST_FILE}" \
data.train_batch_size=${train_prompt_bsz} \
data.max_prompt_length=${max_prompt_length} \
data.max_response_length=${max_response_length} \
data.filter_overlong_prompts=${filter_overlong_prompts} \
data.truncation='error' \
algorithm.adv_estimator=${adv_estimator} \
algorithm.use_kl_in_reward=${use_kl_in_reward} \
actor_rollout_ref.model.path="${MODEL_PATH}" \
actor_rollout_ref.model.use_fused_kernels=False \
actor_rollout_ref.actor.optim.lr=3e-6 \
actor_rollout_ref.actor.optim.warmup_style=cosine \
actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \
actor_rollout_ref.model.use_remove_padding=True \
actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \
actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \
actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \
actor_rollout_ref.actor.kl_loss_type=low_var_kl \
actor_rollout_ref.actor.entropy_coeff=0 \
actor_rollout_ref.model.enable_gradient_checkpointing=True \
actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \
actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
actor_rollout_ref.rollout.name=vllm \
actor_rollout_ref.rollout.gpu_memory_utilization=0.35 \
actor_rollout_ref.rollout.n=${n_resp_per_prompt} \
actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \
actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \
trainer.critic_warmup=0 \
trainer.val_before_train=False \
trainer.n_gpus_per_node=2 \
trainer.nnodes="${NNODES}" \
trainer.logger=['console','wandb'] \
trainer.project_name="${project_name}" \
trainer.experiment_name="${exp_name}" \
trainer.save_freq=300 \
trainer.test_freq=100 \
trainer.default_local_dir="${CKPTS_DIR}" \
trainer.resume_mode=auto \
trainer.total_epochs=1Log:
step:200 - actor/entropy:0.0002663135528564453 - actor/pg_clipfrac:np.float64(0.0) - actor/ppo_kl:np.float64(0.0) - actor/pg_clipfrac_lower:np.float64(0.0) - actor/kl_loss:np.float64(0.3125) - actor/kl_coef:np.float64(0.005) - actor/pg_loss:np.float64(-0.005985647672787309) - actor/grad_norm:np.float64(0.0008942923159338534) - perf/mfu/actor:np.float64(0.01098676646450222) - perf/max_memory_allocated_gb:np.float64(96.64947319030762) - perf/max_memory_reserved_gb:np.float64(102.810546875) - perf/cpu_memory_used_gb:np.float64(224.5666847229004) - actor/lr:np.float64(3e-06) - val-core/DigitalLearningGmbH/MATH-lighteval/reward/mean@1:np.float64(0.0) - training/global_step:200 - training/epoch:0 - critic/score/mean:0.0 - critic/score/max:0.0 - critic/score/min:0.0 - critic/rewards/mean:0.0 - critic/rewards/max:0.0 - critic/rewards/min:0.0 - critic/advantages/mean:0.19154074788093567 - critic/advantages/max:0.7811176180839539 - critic/advantages/min:-0.627615213394165 - critic/returns/mean:0.19154074788093567 - critic/returns/max:0.7811176180839539 - critic/returns/min:-0.627615213394165 - response_length/mean:1.0 - response_length/max:1.0 - response_length/min:1.0 - response_length/clip_ratio:0.0 - response_length_non_aborted/mean:1.0 - response_length_non_aborted/max:1.0 - response_length_non_aborted/min:1.0 - response_length_non_aborted/clip_ratio:0.0 - response/aborted_ratio:0.0 - prompt_length/mean:212.875 - prompt_length/max:751.0 - prompt_length/min:91.0 - prompt_length/clip_ratio:0.0 - self_certainty/mean:25.892353057861328 - self_certainty/max:26.730852127075195 - self_certainty/min:25.33042335510254 - self_certainty/std:0.4960935115814209 - timing_s/start_profile:4.959292709827423e-05 - timing_s/generate_sequences:0.1778503954410553 - timing_s/generation_timing/max:0.18106216192245483 - timing_s/generation_timing/min:0.17463864386081696 - timing_s/generation_timing/topk_ratio:0.5 - timing_s/gen:8.403249513357878 - timing_s/reward:0.009339101612567902 - timing_s/old_log_prob:20.513225408270955 - timing_s/ref:44.67556981369853 - timing_s/adv:0.004478976130485535 - timing_s/update_actor:95.41149312630296 - timing_s/step:169.0328492615372 - timing_s/testing:63.94828402902931 - timing_s/stop_profile:3.767386078834534e-05 - timing_per_token_ms/gen:131.30077364621684 - timing_per_token_ms/adv:0.00032721917960882045 - timing_per_token_ms/ref:3.2638493434905413 - timing_per_token_ms/update_actor:6.970448065919269 - perf/total_num_tokens:13688 - perf/time_per_step:169.0328492615372 - perf/throughput:40.48917136461787