# Directory settings checkpoint_dir: "/lustre/scratch/data/polyglot_datasets/portuguese/checkpoints/models/tucano_v2" train_dataset_dir: # Total: ~140B # Portuguese Text (~76B, 54%) | English Text (~64B, 46%) # Web Text (~70B, 50%) - /lustre/scratch/data/polyglot_datasets/portuguese/tokenized/gigaverbo_v2/4 # 28B (PT) - /lustre/scratch/data/polyglot_datasets/portuguese/tokenized/gigaverbo_v2/4 # 28B (PT) - /lustre/scratch/data/polyglot_datasets/portuguese/tokenized/fineweb/4 # 14B (EN) # Synthetic Text (~50B, 35%) - /lustre/scratch/data/polyglot_datasets/portuguese/tokenized/cosmopedia # 30B (EN) - /lustre/scratch/data/polyglot_datasets/portuguese/tokenized/gigaverbo_v2_synth # 10B (PT) - /lustre/scratch/data/polyglot_datasets/portuguese/tokenized/gigaverbo_v2_synth # 10B (PT) # Reasoning Text (~12B, 8%) - /lustre/scratch/data/polyglot_datasets/portuguese/tokenized/nvidia_openscience # 9B (EN) - /lustre/scratch/data/polyglot_datasets/portuguese/tokenized/big_reasoning_traces # 2B (EN) - /lustre/scratch/data/polyglot_datasets/portuguese/tokenized/math_meta_reasoning_filtered # 1B (EN) # Math Text (~8B, 5%) - /lustre/scratch/data/polyglot_datasets/portuguese/tokenized/finemath/4 # 8B (EN) val_dataset_dir: "/lustre/scratch/data/polyglot_datasets/portuguese/tokenized/validation" dataset_type: "parquet" cache_dir: "/lustre/mlnvme/data/polyglot/.cache" # Data loading settings pin_memory: true num_workers_for_dataloader: 16 shuffle_dataset: true mask_eos_token: false mask_pad_token: false # Model architecture settings vocab_size: 49152 num_hidden_layers: 28 num_attention_heads: 16 num_key_value_heads: 8 head_dim: null hidden_size: 1536 intermediate_size: 3072 max_position_embeddings: 4096 tie_word_embeddings: true hidden_act: "silu" output_hidden_states: false attn_implementation: "flash_attention_2" use_cache: false no_rope_layer_interval: null rope_theta: 50000.0 rope_scale_factor: null rms_norm_eps: 0.000001 # Training settings total_batch_size: 2097152 micro_batch_size: 16 eval_micro_batch_size: 8 num_train_epochs: 1 warmup_steps: 0 max_learning_rate: 0.0007 min_learning_rate: 0.0 muon_learning_rate: 0.007 weight_decay: 0.1 beta1: 0.9 beta2: 0.95 eps: 0.00000001 lr_decay_type: "wsd" use_sqrt: false lr_decay_iters_coef: 0.0 seed: 42 max_steps: 60000 max_grad_norm: 1.0 # Precision and optimization settings torch_compile: false mat_mul_precision: "highest" tf32: true bf16: true gradient_checkpointing: false use_liger_kernel: true static_graph: false # Hub settings push_to_hub: false hub_token: null hub_model_id: null # Tokenizer and Reference model tokenizer_name_or_path: "/lustre/scratch/data/polyglot_datasets/portuguese/checkpoints/tokenizers/sentencepiece" chat_template_path: null reference_model: "HuggingFaceTB/SmolLM2-360M" continual_pretraining: false # Checkpoint settings resume_from_checkpoint: "/lustre/scratch/data/polyglot_datasets/portuguese/checkpoints/models/tucano_v2/stage1" checkpointing_steps: 5000 begin_new_stage: true stage_name: "stage2" # Miscellaneous settings sanity_check: false sanity_check_num_samples: 100000 wandb_token: null wandb_id: "tucano-v2" wandb_project: "Polyglot" wandb_desc: "Developing LLMs for low-resource languages"