Datasets:
File size: 3,687 Bytes
5bf8ca4 a384a74 5bf8ca4 a384a74 5bf8ca4 a384a74 5bf8ca4 a384a74 5bf8ca4 a384a74 5bf8ca4 a384a74 5bf8ca4 a384a74 5bf8ca4 a384a74 5bf8ca4 a384a74 5bf8ca4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 | import os, json
from datasets import Dataset
from sklearn.model_selection import train_test_split
from transformers import T5Tokenizer, T5ForConditionalGeneration, TrainingArguments, Trainer, DataCollatorForSeq2Seq
def load_squad(path: str):
with open(path, "r", encoding="utf-8") as f:
d = json.load(f)
data = []
for a in d.get("data", []):
for p in a.get("paragraphs", []):
ctx = p.get("context", "")
for qa in p.get("qas", []):
if qa.get("is_impossible") or not qa.get("answers"):
continue
ans = qa["answers"][0].get("text", "")
q = qa.get("question", "")
if ans and q and ctx:
data.append({"input": f"answer: {ans} context: {ctx}", "target": q})
return data
def tokenize(batch, tok, max_in=512, max_out=64):
x = tok(batch["input"], max_length=max_in, truncation=True)
y = tok(text_target=batch["target"], max_length=max_out, truncation=True)
x["labels"] = y["input_ids"]
return x
def latest_ckpt(out_dir: str):
if not os.path.isdir(out_dir):
return None
best_step, best_path = -1, None
for name in os.listdir(out_dir):
if not name.startswith("checkpoint-"):
continue
try:
step = int(name.split("-")[-1])
except ValueError:
continue
if step > best_step:
best_step, best_path = step, os.path.join(out_dir, name)
return best_path
def main():
data_path = "39k_train.json"
out_dir = "t5-viet-qg-finetuned"
logs_dir = "logs"
model_name = "VietAI/vit5-base"
print("Tải mô hình và tokenizer...")
tok = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)
print("Đọc và chia dữ liệu...")
data = load_squad(data_path)
tr, va = train_test_split(data, test_size=0.2, random_state=42)
print("Tokenize dữ liệu...")
tr_ds = Dataset.from_list(tr).map(
lambda b: tokenize(b, tok),
batched=True,
remove_columns=["input", "target"],
)
va_ds = Dataset.from_list(va).map(
lambda b: tokenize(b, tok),
batched=True,
remove_columns=["input", "target"],
)
print("Cấu hình huấn luyện (checkpoint + resume)...")
args = TrainingArguments(
output_dir=out_dir,
overwrite_output_dir=False,
per_device_train_batch_size=1,
gradient_accumulation_steps=1,
num_train_epochs=3,
learning_rate=2e-4,
weight_decay=0.01,
warmup_steps=0,
save_strategy="steps",
save_steps=500,
save_total_limit=100,
eval_strategy="steps",
eval_steps=500,
load_best_model_at_end=True,
metric_for_best_model="eval_loss",
greater_is_better=False,
logging_dir=logs_dir,
logging_steps=10,
fp16=True,
report_to="none",
)
trainer = Trainer(
model=model,
args=args,
train_dataset=tr_ds,
eval_dataset=va_ds,
tokenizer=tok,
data_collator=DataCollatorForSeq2Seq(tokenizer=tok, model=model),
)
ckpt = latest_ckpt(out_dir)
if ckpt:
print(f"Phát hiện checkpoint: {ckpt} → Resume training")
trainer.train(resume_from_checkpoint=ckpt)
else:
print("Không có checkpoint → Train từ đầu")
trainer.train()
print("Lưu mô hình cuối cùng...")
trainer.save_model(out_dir)
tok.save_pretrained(out_dir)
print("Huấn luyện hoàn tất!")
if __name__ == "__main__":
main()
|