DANGDOCAO commited on
Commit
f83acf4
·
verified ·
1 Parent(s): c7cd490

Delete HVU_QA/generate_question.py

Browse files
Files changed (1) hide show
  1. HVU_QA/generate_question.py +0 -153
HVU_QA/generate_question.py DELETED
@@ -1,153 +0,0 @@
1
- import re
2
- import torch
3
- from difflib import SequenceMatcher
4
- from transformers import T5Tokenizer, T5ForConditionalGeneration
5
- from transformers.utils import logging as hf_logging
6
-
7
- hf_logging.set_verbosity_error()
8
-
9
- MODEL_DIR = "t5-viet-qg-finetuned"
10
-
11
- PATTERN = re.compile(
12
- r"""
13
- “([^”]{3,120})”
14
- | "([^"]{3,120})"
15
- | \b(?:là|gồm|do|theo)\s+([^,.;:\n]{3,120})
16
- | \b\d{4}\b
17
- | \b(?:Điều|Khoản)\s+\d+\b
18
- """,
19
- re.VERBOSE | re.IGNORECASE,
20
- )
21
-
22
-
23
- def norm(s: str) -> str:
24
- return re.sub(r"\s+", " ", s).strip()
25
-
26
-
27
- def is_dup(q: str, qs: list[str], thr: float = 0.85) -> bool:
28
- ql = q.lower()
29
- for x in qs:
30
- if SequenceMatcher(None, ql, x.lower()).ratio() >= thr:
31
- return True
32
- return False
33
-
34
-
35
- def extract_answers(ctx: str, max_n: int = 60) -> list[str]:
36
- ctx = norm(ctx)
37
- answers, seen = [], set()
38
-
39
- for m in PATTERN.finditer(ctx):
40
- for g in m.groups():
41
- if not g:
42
- continue
43
- g = norm(g)
44
- k = g.lower()
45
- if 3 <= len(g) <= 120 and k not in seen:
46
- seen.add(k)
47
- answers.append(g)
48
- if len(answers) >= max_n:
49
- return answers
50
-
51
- if len(answers) < 8:
52
- for i in range(0, min(len(ctx), 500), 60):
53
- ch = norm(ctx[i : i + 60])
54
- k = ch.lower()
55
- if len(ch) >= 15 and k not in seen:
56
- seen.add(k)
57
- answers.append(ch)
58
- if len(answers) >= max_n:
59
- break
60
-
61
- if not answers and ctx:
62
- answers = [ctx[:120]]
63
-
64
- return answers
65
-
66
-
67
- def load_model():
68
- tok = T5Tokenizer.from_pretrained(MODEL_DIR)
69
- mdl = T5ForConditionalGeneration.from_pretrained(MODEL_DIR)
70
-
71
- dev = "cuda" if torch.cuda.is_available() else "cpu"
72
- try:
73
- mdl = mdl.to(dev)
74
- except RuntimeError:
75
- dev = "cpu"
76
- try:
77
- torch.cuda.empty_cache()
78
- except Exception:
79
- pass
80
- mdl = mdl.to(dev)
81
-
82
- mdl.eval()
83
- return tok, mdl, dev
84
-
85
-
86
- tokenizer, model, device = load_model()
87
-
88
-
89
- def generate_questions(context: str, n: int = 20) -> list[str]:
90
- ctx = norm(context)
91
- answers = extract_answers(ctx, max_n=80)
92
- questions = []
93
-
94
- gen_cfg = dict(
95
- do_sample=True,
96
- top_k=80,
97
- top_p=0.98,
98
- temperature=1.05,
99
- max_new_tokens=72,
100
- no_repeat_ngram_size=3,
101
- repetition_penalty=1.08,
102
- )
103
-
104
- num_ret = 8 if n <= 20 else 10
105
-
106
- def run_prompt(ans: str, rounds: int):
107
- nonlocal gen_cfg
108
- prompt = f"answer: {ans}\ncontext: {ctx}\nquestion:"
109
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to(device)
110
-
111
- for _ in range(rounds):
112
- outs = model.generate(**inputs, num_return_sequences=num_ret, **gen_cfg)
113
- added = 0
114
- for o in outs:
115
- q = norm(tokenizer.decode(o, skip_special_tokens=True))
116
- if not q:
117
- continue
118
- if not q.endswith("?"):
119
- q += "?"
120
- if len(q) >= 6 and not is_dup(q, questions, thr=0.85):
121
- questions.append(q)
122
- added += 1
123
- if len(questions) >= n:
124
- return
125
- if added == 0:
126
- gen_cfg["temperature"] = min(1.25, gen_cfg["temperature"] + 0.05)
127
- gen_cfg["top_p"] = min(0.995, gen_cfg["top_p"] + 0.005)
128
-
129
- with torch.inference_mode():
130
- for ans in answers:
131
- if len(questions) >= n:
132
- break
133
- run_prompt(ans, rounds=6)
134
-
135
- if len(questions) < n:
136
- run_prompt(ctx[:120], rounds=12)
137
-
138
- return questions[:n]
139
-
140
-
141
- if __name__ == "__main__":
142
- ctx = input("\nNhập đoạn văn bản:\n").strip()
143
- try:
144
- n = int((input("\nNhập số lượng câu hỏi cần sinh: ").strip() or ""))
145
- except ValueError:
146
- n = 20
147
-
148
- n = max(1, min(n, 200))
149
- qs = generate_questions(ctx, n)
150
-
151
- print("\nCác câu hỏi sinh ra:")
152
- for i, q in enumerate(qs, 1):
153
- print(f"{i}. {q}")