| import os, json |
| from datasets import Dataset |
| from sklearn.model_selection import train_test_split |
| from transformers import T5Tokenizer, T5ForConditionalGeneration, TrainingArguments, Trainer, DataCollatorForSeq2Seq |
|
|
|
|
| def load_squad(path: str): |
| with open(path, "r", encoding="utf-8") as f: |
| d = json.load(f) |
|
|
| data = [] |
| for a in d.get("data", []): |
| for p in a.get("paragraphs", []): |
| ctx = p.get("context", "") |
| for qa in p.get("qas", []): |
| if qa.get("is_impossible") or not qa.get("answers"): |
| continue |
| ans = qa["answers"][0].get("text", "") |
| q = qa.get("question", "") |
| if ans and q and ctx: |
| data.append({"input": f"answer: {ans} context: {ctx}", "target": q}) |
| return data |
|
|
|
|
| def tokenize(batch, tok, max_in=512, max_out=64): |
| x = tok(batch["input"], max_length=max_in, truncation=True) |
| y = tok(text_target=batch["target"], max_length=max_out, truncation=True) |
| x["labels"] = y["input_ids"] |
| return x |
|
|
|
|
| def latest_ckpt(out_dir: str): |
| if not os.path.isdir(out_dir): |
| return None |
|
|
| best_step, best_path = -1, None |
| for name in os.listdir(out_dir): |
| if not name.startswith("checkpoint-"): |
| continue |
| try: |
| step = int(name.split("-")[-1]) |
| except ValueError: |
| continue |
| if step > best_step: |
| best_step, best_path = step, os.path.join(out_dir, name) |
|
|
| return best_path |
|
|
|
|
| def main(): |
| data_path = "39k_train.json" |
| out_dir = "t5-viet-qg-finetuned" |
| logs_dir = "logs" |
| model_name = "VietAI/vit5-base" |
|
|
| print("Tải mô hình và tokenizer...") |
| tok = T5Tokenizer.from_pretrained(model_name) |
| model = T5ForConditionalGeneration.from_pretrained(model_name) |
|
|
| print("Đọc và chia dữ liệu...") |
| data = load_squad(data_path) |
| tr, va = train_test_split(data, test_size=0.2, random_state=42) |
|
|
| print("Tokenize dữ liệu...") |
| tr_ds = Dataset.from_list(tr).map( |
| lambda b: tokenize(b, tok), |
| batched=True, |
| remove_columns=["input", "target"], |
| ) |
| va_ds = Dataset.from_list(va).map( |
| lambda b: tokenize(b, tok), |
| batched=True, |
| remove_columns=["input", "target"], |
| ) |
|
|
| print("Cấu hình huấn luyện (checkpoint + resume)...") |
| args = TrainingArguments( |
| output_dir=out_dir, |
| overwrite_output_dir=False, |
| per_device_train_batch_size=1, |
| gradient_accumulation_steps=1, |
| num_train_epochs=3, |
| learning_rate=2e-4, |
| weight_decay=0.01, |
| warmup_steps=0, |
| save_strategy="steps", |
| save_steps=500, |
| save_total_limit=100, |
| eval_strategy="steps", |
| eval_steps=500, |
| load_best_model_at_end=True, |
| metric_for_best_model="eval_loss", |
| greater_is_better=False, |
| logging_dir=logs_dir, |
| logging_steps=10, |
| fp16=True, |
| report_to="none", |
| ) |
|
|
| trainer = Trainer( |
| model=model, |
| args=args, |
| train_dataset=tr_ds, |
| eval_dataset=va_ds, |
| tokenizer=tok, |
| data_collator=DataCollatorForSeq2Seq(tokenizer=tok, model=model), |
| ) |
|
|
| ckpt = latest_ckpt(out_dir) |
| if ckpt: |
| print(f"Phát hiện checkpoint: {ckpt} → Resume training") |
| trainer.train(resume_from_checkpoint=ckpt) |
| else: |
| print("Không có checkpoint → Train từ đầu") |
| trainer.train() |
|
|
| print("Lưu mô hình cuối cùng...") |
| trainer.save_model(out_dir) |
| tok.save_pretrained(out_dir) |
|
|
| print("Huấn luyện hoàn tất!") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|