|
| 1 | +import json |
| 2 | +import re |
| 3 | +from typing import List |
| 4 | + |
| 5 | +from dingo.io.input import Data, RequiredField |
| 6 | +from dingo.io.output.eval_detail import EvalDetail |
| 7 | +from dingo.model import Model |
| 8 | +from dingo.model.llm.base_openai import BaseOpenAI |
| 9 | +from dingo.model.response.response_class import ResponseScoreTypeNameReason |
| 10 | +from dingo.utils import log |
| 11 | +from dingo.utils.exception import ConvertJsonError |
| 12 | + |
| 13 | + |
| 14 | +@Model.llm_register("LLMHtmlExtractCompareV3") |
| 15 | +class LLMHtmlExtractCompareV3(BaseOpenAI): |
| 16 | + """ |
| 17 | + HTML提取工具对比评估 V3 版本 |
| 18 | +
|
| 19 | + 基于 LLMTextQualityV5 的质量维度(Completeness / Effectiveness / Similarity / Security) |
| 20 | + 对两个 HTML 提取工具的完整输出做对比评估,判断哪个工具的提取质量更高。 |
| 21 | +
|
| 22 | + 与 V2 的区别:V2 侧重"谁保留了更多信息内容",V3 侧重"谁引入了更少质量缺陷"。 |
| 23 | + V3 直接发送全文(不做 diff 预处理),保留完整上下文,确保质量缺陷(尤其是 |
| 24 | + Error_Formula 等需要上下文才能正确归因的问题)能被准确识别。 |
| 25 | +
|
| 26 | + 输入数据要求: |
| 27 | + - input_data.prompt: 工具A提取的文本(对应 Data.prompt 字段) |
| 28 | + - input_data.content: 工具B提取的文本(对应 Data.content 字段) |
| 29 | + - language: 可选,来自 input_data.language 或 raw_data["language"],缺省为 "en" |
| 30 | +
|
| 31 | + EvalDetail.label 前缀与 Data 字段对齐(避免 TOOL_ONE/TOOL_TWO 歧义): |
| 32 | + - PROMPT_BETTER:score=1,Data.prompt 侧提取质量更好 |
| 33 | + - CONTENT_BETTER:score=2,Data.content 侧更好 |
| 34 | + - EXTRACTION_EQUAL:score=0,两者相当 |
| 35 | + """ |
| 36 | + |
| 37 | + _metric_info = { |
| 38 | + "category": "Pretrain Text Quality Assessment Metrics", |
| 39 | + "metric_name": "LLMHtmlExtractCompareV3", |
| 40 | + "description": "Compares two HTML extraction tools using LLM pretraining quality dimensions (completeness, effectiveness, similarity, security) with full-text evaluation for accurate defect attribution", |
| 41 | + } |
| 42 | + |
| 43 | + _required_fields = [RequiredField.CONTENT, RequiredField.PROMPT] |
| 44 | + |
| 45 | + prompt = { |
| 46 | + "content_en": r"""You are an expert in assessing pretraining data quality for large language models. You will compare two texts extracted from the same HTML page by different tools, and determine which extraction is of higher quality for LLM pretraining. |
| 47 | +
|
| 48 | +# Quality Dimensions |
| 49 | +
|
| 50 | +Evaluate BOTH texts against these dimensions and compare: |
| 51 | +
|
| 52 | +## 1. Completeness |
| 53 | +- **Error_Content_Coverage**: One extraction tool failed to capture the full main-body content of the page — at least one complete paragraph or named section present in the other extraction is entirely absent (e.g., an "Applications" or "Common Algorithms" section is missing). This is about **extraction-level omission** (the tool did not locate or include that block), NOT about individual missing words, broken formatting, or formula stripping (use the specific error types below for those). |
| 54 | +- **Error_Formula**: Mathematical content with broken LaTeX syntax (unmatched delimiters, unclosed environments) OR systematically stripped symbols/formulas (orphan hyphens from stripped Greek letters like "-solutions" instead of "κ-solutions", empty positions after connective words like "thus ;" where a formula was removed) |
| 55 | +- **Error_Table**: Malformed or unreadable table structures (misaligned columns, missing headers, garbled HTML tags) |
| 56 | +- **Error_Code**: Code blocks with formatting corruption (missing code fences, lost indentation, broken identifiers like "sys .argv", line numbers mixed with code) |
| 57 | +
|
| 58 | +## 2. Effectiveness |
| 59 | +- **Error_Garbled_Characters**: Encoding issues or anti-crawler artifacts ("’", "□□□", ""); threshold: >1% of characters garbled |
| 60 | +- **Error_Words_Stuck**: Missing spaces breaking tokenization ("Thequickbrownfox"); threshold: >1% of text affected |
| 61 | +- **Error_Lack_Punctuation**: Unclear sentence boundaries ("I like apples they are red also I like oranges") |
| 62 | +
|
| 63 | +## 3. Similarity |
| 64 | +- **Error_Duplicate**: Excessive repetition dominating the text; threshold: same phrase repeats >5 times OR duplicate ratio >30% |
| 65 | +
|
| 66 | +## 4. Security |
| 67 | +- **Error_Politics**: Content promoting extremism, terrorism, ethnic hatred |
| 68 | +- **Error_Prohibition**: Violence, pornography, gambling, drugs |
| 69 | +
|
| 70 | +# Input |
| 71 | +
|
| 72 | +**Text A** (Data.prompt — first extraction tool): |
| 73 | +{text_tool_a} |
| 74 | +
|
| 75 | +**Text B** (Data.content — second extraction tool): |
| 76 | +{text_tool_b} |
| 77 | +
|
| 78 | +# Evaluation Rules |
| 79 | +
|
| 80 | +1. Evaluate each text independently against the quality dimensions above, then compare. |
| 81 | +2. Identify the dimension with the **largest quality difference** between the two texts. |
| 82 | +3. Minor formatting or whitespace differences that do not affect training quality should be ignored. |
| 83 | +
|
| 84 | +⚠️ The order of Text A and Text B reflects the fixed field mapping: A = `Data.prompt`, B = `Data.content`. Do NOT favor either text based on its position. |
| 85 | +
|
| 86 | +# Output Format |
| 87 | +
|
| 88 | +Return JSON only: |
| 89 | +{{ |
| 90 | + "score": [0|1|2], |
| 91 | + "name": "[error_type from the dimension with greatest difference]", |
| 92 | + "reason": "[objective description of quality differences]" |
| 93 | +}} |
| 94 | +
|
| 95 | +Where: |
| 96 | +- `score`: 1 if Text A (`Data.prompt`) is better, 2 if Text B (`Data.content`) is better, 0 if equal |
| 97 | +- `name`: The specific error type with the biggest quality difference (e.g., "Error_Content_Coverage", "Error_Formula", "Error_Table", "Error_Code", "Error_Garbled_Characters", "Error_Words_Stuck", "Error_Lack_Punctuation", "Error_Duplicate", "Error_Politics", "Error_Prohibition"). Use "None" if both are equal. |
| 98 | +- `reason`: Brief objective description (1-3 sentences) |
| 99 | +""", |
| 100 | + "content_cn": r"""你是一位大语言模型预训练数据质量评估专家。你将对比两个不同 HTML 提取工具从同一网页中提取的文本,判断哪个提取结果的质量更高,更适合用于 LLM 预训练。 |
| 101 | +
|
| 102 | +# 质量维度 |
| 103 | +
|
| 104 | +请基于以下维度分别评估两段文本并进行对比: |
| 105 | +
|
| 106 | +## 1. 完整性 (Completeness) |
| 107 | +- **Error_Content_Coverage**:一个提取工具未能覆盖网页的完整主体内容——另一方存在的至少一个完整段落或命名小节在这方完全缺失(例如"应用场景"或"常用算法"整节不见)。这针对的是**提取层面的遗漏**(工具未识别或未包含该区块),而非个别词语缺失、格式损坏或公式剥离(这些请用下方对应的专用错误类型)。 |
| 108 | +- **Error_Formula**:数学内容存在 LaTeX 语法错误(未匹配的定界符、未关闭的环境)或符号/公式被系统性剥离(如 "κ-solutions" 被剥离为 "-solutions",连接词后公式缺失如 "thus ;" ) |
| 109 | +- **Error_Table**:表格结构畸形或不可读(列未对齐、缺少表头、HTML标签残留) |
| 110 | +- **Error_Code**:代码块格式损坏(缺少代码围栏、缩进丢失、标识符断裂如 "sys .argv"、行号混入代码) |
| 111 | +
|
| 112 | +## 2. 有效性 (Effectiveness) |
| 113 | +- **Error_Garbled_Characters**:编码问题或反爬虫伪影("’"、"□□□"、"");阈值:>1% 的字符为乱码 |
| 114 | +- **Error_Words_Stuck**:缺失空格导致分词错误("Thequickbrownfox");阈值:>1% 的文本受影响 |
| 115 | +- **Error_Lack_Punctuation**:句子边界不清("I like apples they are red also I like oranges") |
| 116 | +
|
| 117 | +## 3. 相似性 (Similarity) |
| 118 | +- **Error_Duplicate**:过度重复内容;阈值:同一短语重复>5次 或 重复率>30% |
| 119 | +
|
| 120 | +## 4. 安全性 (Security) |
| 121 | +- **Error_Politics**:宣扬极端主义、恐怖主义、民族仇恨的内容 |
| 122 | +- **Error_Prohibition**:暴力、色情、赌博、毒品相关内容 |
| 123 | +
|
| 124 | +# 输入 |
| 125 | +
|
| 126 | +**文本A**(Data.prompt — 第一个提取工具的结果): |
| 127 | +{text_tool_a} |
| 128 | +
|
| 129 | +**文本B**(Data.content — 第二个提取工具的结果): |
| 130 | +{text_tool_b} |
| 131 | +
|
| 132 | +# 评估规则 |
| 133 | +
|
| 134 | +1. 独立按上述质量维度评估每段文本,再进行对比。 |
| 135 | +2. 找出两段文本之间**质量差异最大**的维度。 |
| 136 | +3. 不影响训练质量的细微格式差异或空白差异应忽略。 |
| 137 | +
|
| 138 | +⚠️ 文本A和文本B的顺序反映固定字段映射:A = `Data.prompt`,B = `Data.content`。请勿因位置先后偏好任何一方。 |
| 139 | +
|
| 140 | +# 输出格式 |
| 141 | +
|
| 142 | +仅返回 JSON: |
| 143 | +{{ |
| 144 | + "score": [0|1|2], |
| 145 | + "name": "[差异最大维度中的具体错误类型]", |
| 146 | + "reason": "[客观描述两段文本的质量差异]" |
| 147 | +}} |
| 148 | +
|
| 149 | +其中: |
| 150 | +- `score`:文本A(`Data.prompt`)更好为 1,文本B(`Data.content`)更好为 2,质量相当为 0 |
| 151 | +- `name`:差异最大的具体错误类型(如 "Error_Content_Coverage"、"Error_Formula"、"Error_Table"、"Error_Code"、"Error_Garbled_Characters"、"Error_Words_Stuck"、"Error_Lack_Punctuation"、"Error_Duplicate"、"Error_Politics"、"Error_Prohibition")。如果两者相当则为 "None"。 |
| 152 | +- `reason`:简要客观描述(1-3句话) |
| 153 | +""", |
| 154 | + } |
| 155 | + |
| 156 | + @classmethod |
| 157 | + def build_messages(cls, input_data: Data) -> List: |
| 158 | + text_tool_a = input_data.prompt |
| 159 | + text_tool_b = input_data.content |
| 160 | + |
| 161 | + raw_data = getattr(input_data, "raw_data", {}) or {} |
| 162 | + language = raw_data.get("language", getattr(input_data, "language", "en")) |
| 163 | + |
| 164 | + if language == "zh": |
| 165 | + prompt_template = cls.prompt["content_cn"] |
| 166 | + else: |
| 167 | + prompt_template = cls.prompt["content_en"] |
| 168 | + |
| 169 | + prompt_content = prompt_template.format( |
| 170 | + text_tool_a=text_tool_a, |
| 171 | + text_tool_b=text_tool_b, |
| 172 | + ) |
| 173 | + |
| 174 | + return [{"role": "user", "content": prompt_content}] |
| 175 | + |
| 176 | + @classmethod |
| 177 | + def process_response(cls, response: str) -> EvalDetail: |
| 178 | + log.info(response) |
| 179 | + |
| 180 | + response_think = "" |
| 181 | + if response.startswith("<think>"): |
| 182 | + think_content = re.search( |
| 183 | + r"<think>(.*?)</think>", response, flags=re.DOTALL |
| 184 | + ) |
| 185 | + if think_content: |
| 186 | + response_think = think_content.group(1).strip() |
| 187 | + response = re.sub(r"<think>.*?</think>", "", response, flags=re.DOTALL) |
| 188 | + response = response.strip() |
| 189 | + |
| 190 | + if response.startswith("```json"): |
| 191 | + response = response[7:] |
| 192 | + if response.startswith("```"): |
| 193 | + response = response[3:] |
| 194 | + if response.endswith("```"): |
| 195 | + response = response[:-3] |
| 196 | + response = response.strip() |
| 197 | + |
| 198 | + try: |
| 199 | + response_json = json.loads(response) |
| 200 | + if response_think: |
| 201 | + response_json["reason"] = response_json.get("reason", "") + "\n" + response_think |
| 202 | + except json.JSONDecodeError: |
| 203 | + raise ConvertJsonError(f"Convert to JSON format failed: {response}") |
| 204 | + |
| 205 | + response_model = ResponseScoreTypeNameReason(**response_json) |
| 206 | + |
| 207 | + result = EvalDetail(metric=cls.__name__) |
| 208 | + |
| 209 | + # Label prefixes match Data fields: prompt=first extraction, content=second. |
| 210 | + if response_model.score == 1: |
| 211 | + tmp_type = "PROMPT_BETTER" |
| 212 | + elif response_model.score == 2: |
| 213 | + tmp_type = "CONTENT_BETTER" |
| 214 | + else: |
| 215 | + tmp_type = "EXTRACTION_EQUAL" |
| 216 | + |
| 217 | + result.status = response_model.score != 1 |
| 218 | + result.label = [f"{tmp_type}.{response_model.name}"] |
| 219 | + result.reason = [json.dumps(response_json, ensure_ascii=False)] |
| 220 | + |
| 221 | + return result |
0 commit comments