| from __future__ import annotations |
|
|
| import argparse |
| import json |
| import os |
| import re |
| import sys |
| import threading |
| from pathlib import Path |
| from typing import Any |
|
|
| os.environ.setdefault("TOKENIZERS_PARALLELISM", "false") |
| os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "expandable_segments:True") |
|
|
|
|
| def raise_missing_dependency_error(exc: ModuleNotFoundError) -> None: |
| root = Path(__file__).resolve().parent |
| requirements = root / "requirements.txt" |
| message = [ |
| f"Thiếu thư viện Python: {exc.name}", |
| f"Interpreter hiện tại: {sys.executable}", |
| ] |
| if requirements.exists(): |
| message.extend( |
| [ |
| "Cài đặt dependencies bằng lệnh:", |
| f"{sys.executable} -m pip install -r {requirements}", |
| ] |
| ) |
| raise SystemExit("\n".join(message)) from exc |
|
|
|
|
| try: |
| import torch |
| from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
| except ModuleNotFoundError as exc: |
| raise_missing_dependency_error(exc) |
|
|
|
|
| APP_TITLE = "Mô hình sinh câu hỏi thường gặp" |
| TASK_PREFIX = "sinh câu hỏi" |
| QUESTION_LIMIT = 100 |
| GENERATION_PASSES = ( |
| (0.9, 0.95, None, 1, 4), |
| (1.0, 0.97, 16, 1, 5), |
| (1.08, 0.99, 8, 2, 6), |
| ) |
|
|
|
|
| def normalize_text(text: Any) -> str: |
| return " ".join(str(text or "").split()) |
|
|
|
|
| def unique_text(items: list[str]) -> list[str]: |
| seen: set[str] = set() |
| output: list[str] = [] |
| for item in items: |
| value = normalize_text(item) |
| key = value.lower() |
| if key and key not in seen: |
| seen.add(key) |
| output.append(value) |
| return output |
|
|
|
|
| def parse_question_count(value: Any, default: int = 5) -> int: |
| try: |
| parsed = int(value) |
| except (TypeError, ValueError): |
| parsed = default |
| return max(1, min(parsed, QUESTION_LIMIT)) |
|
|
|
|
| def format_questions(items: list[str]) -> str: |
| if not items: |
| return "Không sinh được câu hỏi phù hợp." |
| return "\n".join(f"{index}. {item}" for index, item in enumerate(items, 1)) |
|
|
|
|
| def resolve_model_dir(model_dir: str | Path, prefer_nested_model: bool = True) -> Path: |
| model_root = Path(model_dir).expanduser().resolve() |
| nested_candidates = [model_root / "best-model", model_root / "final-model"] |
| candidates = [*nested_candidates, model_root] if prefer_nested_model else [model_root, *nested_candidates] |
| for candidate in candidates: |
| if candidate.is_dir() and (candidate / "config.json").exists(): |
| return candidate |
| raise FileNotFoundError(f"Không tìm thấy thư mục mô hình hợp lệ: {model_root}") |
|
|
|
|
| def parse_dtype(value: str) -> torch.dtype: |
| normalized = value.strip().lower() |
| mapping = { |
| "float16": torch.float16, |
| "fp16": torch.float16, |
| "float32": torch.float32, |
| "fp32": torch.float32, |
| "bfloat16": torch.bfloat16, |
| "bf16": torch.bfloat16, |
| } |
| if normalized not in mapping: |
| raise ValueError(f"Không hỗ trợ gpu_dtype={value}") |
| return mapping[normalized] |
|
|
|
|
| class QuestionGenerator: |
| def __init__( |
| self, |
| model_dir: str | Path = "t5-viet-qg-finetuned", |
| task_prefix: str = TASK_PREFIX, |
| max_source_length: int = 512, |
| max_new_tokens: int = 64, |
| device: str = "auto", |
| cpu_threads: int | None = None, |
| gpu_dtype: str = "auto", |
| prefer_nested_model: bool = True, |
| ) -> None: |
| self.model_root = Path(model_dir).expanduser().resolve() |
| self.model_dir = resolve_model_dir(model_dir, prefer_nested_model=prefer_nested_model) |
| self.task_prefix = task_prefix |
| self.max_source_length = max_source_length |
| self.max_new_tokens = max_new_tokens |
| self.requested_device = device |
| self.cpu_threads = cpu_threads |
| self.gpu_dtype = gpu_dtype |
| self.prefer_nested_model = prefer_nested_model |
| self.device: torch.device | None = None |
| self.dtype: torch.dtype | None = None |
| self.tokenizer = None |
| self.model = None |
| self._load_lock = threading.Lock() |
|
|
| def _resolve_device(self) -> torch.device: |
| requested = self.requested_device.lower() |
| if requested == "cpu": |
| return torch.device("cpu") |
| if requested == "cuda": |
| if not torch.cuda.is_available(): |
| raise RuntimeError("Bạn đã chọn device=cuda nhưng máy hiện tại không có CUDA.") |
| return torch.device("cuda") |
| return torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
| def _resolve_dtype(self) -> torch.dtype: |
| if self.device is None or self.device.type != "cuda": |
| return torch.float32 |
| if self.gpu_dtype == "auto": |
| if hasattr(torch.cuda, "is_bf16_supported") and torch.cuda.is_bf16_supported(): |
| return torch.bfloat16 |
| return torch.float16 |
| return parse_dtype(self.gpu_dtype) |
|
|
| def _configure_runtime(self) -> None: |
| if self.device is None: |
| return |
| if self.device.type == "cpu": |
| if self.cpu_threads: |
| torch.set_num_threads(max(1, int(self.cpu_threads))) |
| if hasattr(torch, "set_num_interop_threads"): |
| torch.set_num_interop_threads(max(1, min(int(self.cpu_threads), 4))) |
| return |
|
|
| if hasattr(torch.backends, "cuda") and hasattr(torch.backends.cuda, "matmul"): |
| torch.backends.cuda.matmul.allow_tf32 = True |
| if hasattr(torch.backends, "cudnn"): |
| torch.backends.cudnn.allow_tf32 = True |
| torch.backends.cudnn.benchmark = True |
|
|
| def load(self) -> None: |
| if self.model is not None and self.tokenizer is not None: |
| return |
|
|
| with self._load_lock: |
| if self.model is not None and self.tokenizer is not None: |
| return |
|
|
| self.device = self._resolve_device() |
| self.dtype = self._resolve_dtype() |
| self._configure_runtime() |
|
|
| model_kwargs: dict[str, Any] = {} |
| if self.device.type == "cuda": |
| model_kwargs["torch_dtype"] = self.dtype |
| model_kwargs["low_cpu_mem_usage"] = True |
|
|
| self.tokenizer = AutoTokenizer.from_pretrained(str(self.model_dir), use_fast=True) |
| self.model = AutoModelForSeq2SeqLM.from_pretrained(str(self.model_dir), **model_kwargs) |
| self.model.to(self.device) |
| self.model.eval() |
|
|
| def metadata(self) -> dict[str, Any]: |
| active_device = self.device.type if self.device is not None else None |
| predicted_device = "cuda" if torch.cuda.is_available() and self.requested_device != "cpu" else "cpu" |
| return { |
| "title": APP_TITLE, |
| "model_root": str(self.model_root), |
| "model_dir": str(self.model_dir), |
| "requested_device": self.requested_device, |
| "active_device": active_device, |
| "predicted_device": predicted_device, |
| "loaded": self.model is not None, |
| "gpu_available": torch.cuda.is_available(), |
| "gpu_dtype": None if self.dtype is None else str(self.dtype).replace("torch.", ""), |
| "cpu_threads": torch.get_num_threads(), |
| } |
|
|
| def _candidate_answers(self, text: str, limit: int) -> list[str]: |
| text = normalize_text(text) |
| if not text: |
| return [] |
|
|
| candidates: list[str] = [] |
| split_pattern = r"(?<=[.!?])\s+|\n+" |
| for sentence in [normalize_text(part) for part in re.split(split_pattern, text) if normalize_text(part)]: |
| if 3 <= len(sentence.split()) <= 30: |
| candidates.append(sentence) |
| for clause in (normalize_text(part) for part in re.split(r"\s*[,;:]\s*", sentence)): |
| if 3 <= len(clause.split()) <= 20: |
| candidates.append(clause) |
|
|
| if not candidates: |
| words = text.split() |
| candidates = [" ".join(words[: min(12, len(words))])] if words else [text] |
|
|
| ranked = sorted(unique_text(candidates), key=lambda item: (abs(len(item.split()) - 10), len(item))) |
| return ranked[:limit] |
|
|
| def _build_prompt(self, context: str, answer: str) -> str: |
| return f"{self.task_prefix}:\nngữ cảnh: {context}\nđáp án: {answer}" |
|
|
| @torch.inference_mode() |
| def _sample(self, context: str, answer: str, count: int, temperature: float, top_p: float) -> list[str]: |
| if self.tokenizer is None or self.model is None or self.device is None: |
| raise RuntimeError("Model chưa được load.") |
|
|
| inputs = self.tokenizer( |
| self._build_prompt(context, answer), |
| return_tensors="pt", |
| truncation=True, |
| max_length=self.max_source_length, |
| ).to(self.device) |
| outputs = self.model.generate( |
| **inputs, |
| max_new_tokens=self.max_new_tokens, |
| do_sample=True, |
| temperature=temperature, |
| top_p=top_p, |
| num_return_sequences=count, |
| no_repeat_ngram_size=3, |
| repetition_penalty=1.1, |
| ) |
| questions: list[str] = [] |
| for token_ids in outputs: |
| question = normalize_text(self.tokenizer.decode(token_ids, skip_special_tokens=True)) |
| if question: |
| questions.append(question if question.endswith("?") else f"{question}?") |
| return [question for question in unique_text(questions) if len(question.split()) >= 3] |
|
|
| @torch.inference_mode() |
| def _beam_search(self, context: str, answer: str, count: int) -> list[str]: |
| if self.tokenizer is None or self.model is None or self.device is None: |
| raise RuntimeError("Model chưa được load.") |
|
|
| inputs = self.tokenizer( |
| self._build_prompt(context, answer), |
| return_tensors="pt", |
| truncation=True, |
| max_length=self.max_source_length, |
| ).to(self.device) |
| outputs = self.model.generate( |
| **inputs, |
| max_new_tokens=self.max_new_tokens, |
| num_beams=max(4, count), |
| num_return_sequences=min(count, 4), |
| early_stopping=True, |
| no_repeat_ngram_size=3, |
| repetition_penalty=1.1, |
| ) |
| questions: list[str] = [] |
| for token_ids in outputs: |
| question = normalize_text(self.tokenizer.decode(token_ids, skip_special_tokens=True)) |
| if question: |
| questions.append(question if question.endswith("?") else f"{question}?") |
| return [question for question in unique_text(questions) if len(question.split()) >= 3] |
|
|
| def generate(self, text: str, count: int = 5) -> list[str]: |
| self.load() |
| context = normalize_text(text) |
| if not context: |
| raise ValueError("Vui lòng nhập đoạn văn.") |
|
|
| count = parse_question_count(count) |
| pool = unique_text( |
| self._candidate_answers(context, max(32, count * 5)) + [context[:180], context[:280], context] |
| ) |
| output: list[str] = [] |
| seen: set[str] = set() |
|
|
| for temperature, top_p, limit, rounds, floor in GENERATION_PASSES: |
| answers = pool[:limit] if limit else pool |
| for _ in range(rounds): |
| for answer in answers: |
| remaining = count - len(output) |
| if remaining <= 0: |
| return output[:count] |
| sample_count = min(8, max(floor, remaining * 2)) |
| for question in self._sample(context, answer, sample_count, temperature, top_p): |
| key = question.lower() |
| if key not in seen: |
| seen.add(key) |
| output.append(question) |
| if len(output) >= count: |
| return output[:count] |
|
|
| for answer in pool[: min(8, len(pool))]: |
| remaining = count - len(output) |
| if remaining <= 0: |
| break |
| for question in self._beam_search(context, answer, remaining): |
| key = question.lower() |
| if key not in seen: |
| seen.add(key) |
| output.append(question) |
| if len(output) >= count: |
| break |
|
|
| return output[:count] |
|
|
|
|
| def read_input_text(args: argparse.Namespace) -> str: |
| if args.text: |
| return args.text |
| if args.input_file: |
| return Path(args.input_file).read_text(encoding="utf-8") |
| if sys.stdin.isatty(): |
| return input("Nhập đoạn văn cần sinh câu hỏi:\n").strip() |
| return sys.stdin.read().strip() |
|
|
|
|
| def build_parser() -> argparse.ArgumentParser: |
| parser = argparse.ArgumentParser(description="Sinh câu hỏi từ đoạn văn bằng model T5 fine-tuned.") |
| parser.add_argument("--model_dir", default="t5-viet-qg-finetuned") |
| parser.add_argument("--task_prefix", default=TASK_PREFIX) |
| parser.add_argument("--max_source_length", type=int, default=512) |
| parser.add_argument("--max_new_tokens", type=int, default=64) |
| parser.add_argument("--num_questions", type=int, default=100) |
| parser.add_argument("--device", choices=["auto", "cpu", "cuda"], default="auto") |
| parser.add_argument("--cpu_threads", type=int, default=None) |
| parser.add_argument("--gpu_dtype", default="auto") |
| parser.add_argument("--text", default=None) |
| parser.add_argument("--input_file", default=None) |
| parser.add_argument("--output_format", choices=["text", "json"], default="text") |
| return parser |
|
|
|
|
| def main() -> None: |
| args = build_parser().parse_args() |
| if hasattr(sys.stdout, "reconfigure"): |
| sys.stdout.reconfigure(encoding="utf-8") |
| generator = QuestionGenerator( |
| model_dir=args.model_dir, |
| task_prefix=args.task_prefix, |
| max_source_length=args.max_source_length, |
| max_new_tokens=args.max_new_tokens, |
| device=args.device, |
| cpu_threads=args.cpu_threads, |
| gpu_dtype=args.gpu_dtype, |
| prefer_nested_model=True, |
| ) |
| text = read_input_text(args) |
| questions = generator.generate(text, parse_question_count(args.num_questions)) |
| payload = { |
| "text": normalize_text(text), |
| "questions": questions, |
| "formatted": format_questions(questions), |
| "meta": generator.metadata(), |
| } |
| if args.output_format == "json": |
| print(json.dumps(payload, ensure_ascii=False, indent=2)) |
| return |
| print(payload["formatted"]) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|