File size: 1,845 Bytes
a71f7c1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 | # /// script
# dependencies = [
# "trl>=0.12.0",
# "peft>=0.7.0",
# "transformers>=4.36.0",
# "accelerate>=0.24.0",
# "trackio",
# ]
# ///
from datasets import load_dataset
from peft import LoraConfig
from trl import SFTTrainer, SFTConfig
# Load dataset (ChatML format)
print("Loading pentest dataset...")
dataset = load_dataset(
"jason-oneal/pentest-agent-dataset",
data_files="chatml_train.jsonl",
split="train"
)
print(f"Dataset loaded: {len(dataset)} examples")
# Train/eval split
dataset_split = dataset.train_test_split(test_size=0.1, seed=42)
train_dataset = dataset_split["train"]
eval_dataset = dataset_split["test"]
# Training configuration
config = SFTConfig(
output_dir="qwen2.5-coder-1.5b-pentest",
push_to_hub=True,
hub_model_id="fawazo/qwen2.5-coder-1.5b-pentest",
hub_strategy="every_save",
num_train_epochs=3,
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
learning_rate=2e-5,
logging_steps=10,
save_strategy="steps",
save_steps=200,
save_total_limit=2,
eval_strategy="steps",
eval_steps=200,
warmup_ratio=0.1,
lr_scheduler_type="cosine",
report_to="trackio",
project="pentest-coder",
run_name="qwen2.5-coder-1.5b-sft",
)
# LoRA config for efficient training
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
)
# Train
print("Starting training...")
trainer = SFTTrainer(
model="Qwen/Qwen2.5-Coder-1.5B",
train_dataset=train_dataset,
eval_dataset=eval_dataset,
args=config,
peft_config=peft_config,
)
trainer.train()
trainer.push_to_hub()
print("Model saved to: https://huggingface.co/fawazo/qwen2.5-coder-1.5b-pentest")
|