| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| tokenizer: |
| _component_: torchtune.models.llama3.llama3_s_tokenizer |
| path: ../model_zoo_llama3.2/tokenizer.model |
| max_seq_len: 512 |
|
|
| |
| dataset: |
| _component_: torchtune.datasets.sound_completion_dataset |
| source: jan-hq/raw-speech-whispervq-v2-merged |
| max_seq_len: 512 |
| split: train |
| column: text |
|
|
| seed: 42 |
| shuffle: True |
| |
| model: |
| _component_: torchtune.models.llama3_2.llama3_2_s_3b |
| |
| checkpointer: |
| _component_: torchtune.training.FullModelHFCheckpointerSaveSteps |
| checkpoint_dir: ../model_zoo_llama3.2/llama3.2-s-3b-init |
| checkpoint_files: [ |
| model-00001-of-00002.safetensors, |
| model-00002-of-00002.safetensors, |
| ] |
| recipe_checkpoint: null |
| output_dir: ../model_zoo_llama3.2/llama3.2-3B-s |
| model_type: LLAMA3_2 |
| resume_from_checkpoint: False |
| save_every_n_steps: 1000 |
| max_checkpoints: 3 |
| |
| batch_size: 24 |
| epochs: 1 |
| max_steps_per_epoch: null |
| gradient_accumulation_steps: 2 |
| compile: False |
| |
| optimizer: |
| _component_: torch.optim.AdamW |
| weight_decay: 0.01 |
| lr: 2e-4 |
| fused: True |
| lr_scheduler: |
| _component_: torchtune.modules.get_cosine_schedule_with_warmup |
| num_warmup_steps: 80 |
|
|
| loss: |
| _component_: torch.nn.CrossEntropyLoss |
|
|
| fsdp: |
| cpu_offload: False |
|
|
| |
| device: cuda |
| dtype: bf16 |
|
|
| |
| enable_activation_checkpointing: True |
| memory_efficient_fsdp_wrap: True |
| ac_mode: 'selective' |
|
|
|
|
| |
| metric_logger: |
| _component_: torchtune.training.metric_logging.DiskLogger |
| log_dir: ${output_dir} |
| output_dir: ../model_zoo_llama3.2/llama3.2-3B-s-log/ |
| log_every_n_steps: 1 |
| log_peak_memory_stats: False |