| | """ |
| | Several preprocessor classes. |
| | Author: md |
| | """ |
| |
|
| | from preprocessor.base import BasePreprocessorConfig, BasePreprocessor |
| | from const import ( |
| | DIALOGUE_SUMMARY, |
| | DIALOGUE_CONTEXT_TO_RESPONSE_GENERATION, |
| | DIALOG, |
| | KNOWLEDGE, |
| | UTTERANCE, |
| | ROLES, |
| | EMOTION_RECOGNITION, |
| | VALUE, |
| | ABSA, |
| | CHARACTER_IDENTIFICATION, |
| | DIALOGUE_STATE_TRACKING, |
| | DOCUMENT_GROUNDED_CONVERSATION, |
| | TEXT2SQL, |
| | SLOT_FILLING, |
| | ROLE_RELATION_RECOGNITION, |
| | QUESTION_IN_CONTEXT_REWRITING, |
| | NATURAL_LANGUAGE_INFERENCE, |
| | MACHINE_READING_COMPREHENSION, |
| | MULTIPLE_CHOICE_QUESTION_ANSWERING, |
| | INTENT_DETECTION, |
| | DATA_TO_TEXT, |
| | CHIT_CHAT, |
| | TRAIN_SPLIT, |
| | ) |
| | from typing import Dict, List, Callable |
| | from copy import deepcopy |
| |
|
| |
|
| | class SerialConfig(BasePreprocessorConfig): |
| | def __init__( |
| | self, |
| | input_dir: str, |
| | output_dir: str, |
| | task: str, |
| | task_bos_token: str = "<s>", |
| | knowledge_bos_token: str = "[EK]", |
| | prompt_bos_token: str = "[C]", |
| | use_role: bool = True, |
| | turn_sep: str = None, |
| | roles_to_build_example: List = None, |
| | dev_and_test_roles_to_build_example: List = None, |
| | prompt_func: Callable = None, |
| | knowledge_func: Callable = None, |
| | label_func: Callable = None, |
| | turn_knowledge_func: Callable = None, |
| | roles_in_history: List[List] = None, |
| | cur_turn_process_func: Callable = None, |
| | all_turns_process_func: Callable = None, |
| | multi_ref_sep: str = None, |
| | *args, |
| | **kwargs, |
| | ) -> None: |
| | super().__init__(input_dir, output_dir, task, *args, **kwargs) |
| |
|
| | self.use_role = use_role |
| | self.turn_sep = turn_sep |
| | self.roles_to_build_example = roles_to_build_example |
| | self.prompt_func = prompt_func |
| | self.task_bos_token = task_bos_token |
| | self.knowledge_bos_token = knowledge_bos_token |
| | self.prompt_bos_token = prompt_bos_token |
| | self.knowledge_func = knowledge_func |
| | self.label_func = label_func |
| | self.turn_knowledge_func = turn_knowledge_func |
| | self.roles_in_history = roles_in_history |
| | self.multi_ref_sep = multi_ref_sep |
| | self.dev_and_test_roles_to_build_example = dev_and_test_roles_to_build_example |
| | self.cur_turn_process_func = cur_turn_process_func |
| | self.all_turns_process_func = all_turns_process_func |
| |
|
| |
|
| | def concat_roles(roles): |
| | return ", ".join(roles) |
| |
|
| |
|
| | def concat_dial_history(config: SerialConfig, history: List[Dict]): |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | utterance_list = [] |
| | for turn in history: |
| | if ( |
| | config.roles_in_history is not None |
| | and turn[ROLES] not in config.roles_in_history |
| | ): |
| | continue |
| |
|
| | if config.use_role: |
| | utterance_list.append( |
| | f"{concat_roles(turn[ROLES])}: {turn[UTTERANCE].strip()}" |
| | ) |
| | else: |
| | utterance_list.append(turn[UTTERANCE].strip()) |
| |
|
| | if not utterance_list: |
| | return "None" |
| |
|
| | turn_sep = " " |
| | if config.turn_sep is not None: |
| | turn_sep = f" {config.turn_sep} " |
| |
|
| | return turn_sep.join(utterance_list) |
| |
|
| |
|
| | def concat_history_knowledge_prompt( |
| | config: SerialConfig, history: str, knowledge: str = "", prompt: str = "" |
| | ): |
| | """Concat `history`, `knowledge` and `prompt`. |
| | |
| | NOTE: the order is fixed now. |
| | """ |
| | text = "" |
| |
|
| | if config.task_bos_token is not None: |
| | text = f"{config.task_bos_token} " |
| |
|
| | text += history |
| |
|
| | if knowledge is not None: |
| | text += f" {config.knowledge_bos_token} {knowledge}" |
| |
|
| | if prompt is not None: |
| | text += f" {config.prompt_bos_token} {prompt}" |
| |
|
| | return text |
| |
|
| |
|
| | def clean(text): |
| | return text.replace("\r\n", " ").replace("\n", " ").replace("\r", " ") |
| |
|
| |
|
| | def add_prefix_to_label(prefix, split, label): |
| | tgt = f"{prefix} {label}" if split == "train" else label |
| | return tgt |
| |
|
| |
|
| | class SerialPreprocessor(BasePreprocessor): |
| | def __init__(self, config: SerialConfig) -> None: |
| | super().__init__(config) |
| |
|
| | def extract_knowledge(self, example: Dict): |
| | if self.config.knowledge_func is None: |
| | knowledge = None |
| |
|
| | elif ( |
| | KNOWLEDGE not in example |
| | or not self.config.knowledge_func.__code__.co_argcount |
| | ): |
| | knowledge = self.config.knowledge_func() |
| | else: |
| | knowledge = self.config.knowledge_func(example[KNOWLEDGE][VALUE]) |
| |
|
| | return knowledge |
| |
|
| | def preprocess_for_dialogue_level(self, split: str, example: Dict, knowledge: str): |
| | label = self.config.label_func(example) |
| | tgt = add_prefix_to_label(self.config.task_bos_token, split, label) |
| |
|
| | history = concat_dial_history(self.config, example[DIALOG]) |
| |
|
| | if self.config.prompt_func is None: |
| | prompt = "" |
| | elif not self.config.prompt_func.__code__.co_argcount: |
| | prompt = self.config.prompt_func() |
| |
|
| | src = concat_history_knowledge_prompt(self.config, history, knowledge, prompt) |
| |
|
| | return [{"src": clean(src), "tgt": clean(tgt)}] |
| |
|
| | def preprocess_for_label_level(self, split: str, example: Dict, knowledge: str): |
| | label_generator = self.config.label_func(example) |
| |
|
| | examples = [] |
| | for turn_id, label, extra_args in label_generator: |
| | tgt = add_prefix_to_label(self.config.task_bos_token, split, label) |
| |
|
| | hist = deepcopy(example[DIALOG]) |
| | if self.config.all_turns_process_func is not None: |
| | hist[turn_id] = self.config.all_turns_process_func( |
| | hist[turn_id], *extra_args |
| | ) |
| |
|
| | history = concat_dial_history(self.config, hist) |
| |
|
| | if self.config.prompt_func is None: |
| | prompt = "" |
| | elif not self.config.prompt_func.__code__.co_argcount: |
| | prompt = self.config.prompt_func() |
| |
|
| | src = concat_history_knowledge_prompt( |
| | self.config, history, knowledge, prompt |
| | ) |
| |
|
| | examples.append({"src": clean(src), "tgt": clean(tgt)}) |
| |
|
| | return examples |
| |
|
| | def get_label( |
| | self, turn, include_current_turn, turn_idx, split, origin_knowledge=None |
| | ): |
| | |
| | if ( |
| | split != TRAIN_SPLIT |
| | and self.config.dev_and_test_roles_to_build_example is not None |
| | ): |
| | roles_to_build_example = self.config.dev_and_test_roles_to_build_example |
| | else: |
| | roles_to_build_example = self.config.roles_to_build_example |
| | if ( |
| | roles_to_build_example is not None |
| | and turn[ROLES] not in roles_to_build_example |
| | ): |
| | return None |
| |
|
| | |
| | if not include_current_turn and turn_idx == 0: |
| | return None |
| |
|
| | if self.config.task != DIALOGUE_STATE_TRACKING: |
| | try: |
| | label = self.config.label_func(turn, split=split) |
| | except: |
| | label = self.config.label_func(turn, origin_knowledge, split=split) |
| | else: |
| | label = self.config.label_func( |
| | turn, self.ontologies[split], do_train=(split == TRAIN_SPLIT) |
| | ) |
| |
|
| | return label |
| |
|
| | def preprocess_for_turn_level( |
| | self, |
| | split: str, |
| | example: Dict, |
| | knowledge: str, |
| | include_current_turn=False, |
| | origin_knowledge=None, |
| | ): |
| | examples = [] |
| | multiref = [] |
| | for turn_idx, turn in enumerate(example[DIALOG]): |
| | label = self.get_label( |
| | turn, include_current_turn, turn_idx, split, origin_knowledge |
| | ) |
| |
|
| | if label is None: |
| | continue |
| |
|
| | multiref.append(label) |
| | |
| | if ( |
| | self.config.multi_ref_sep is not None |
| | and split != "train" |
| | and turn_idx < len(example[DIALOG]) - 1 |
| | and self.get_label( |
| | example[DIALOG][turn_idx + 1], |
| | include_current_turn, |
| | turn_idx + 1, |
| | split, |
| | ) |
| | is not None |
| | ): |
| | continue |
| |
|
| | if self.config.multi_ref_sep is not None and split != "train": |
| | label = self.config.multi_ref_sep.join(multiref) |
| |
|
| | tgt = add_prefix_to_label(self.config.task_bos_token, split, label) |
| |
|
| | end = (turn_idx + 1) if include_current_turn else turn_idx |
| |
|
| | hist = deepcopy(example[DIALOG][:end]) |
| | if self.config.cur_turn_process_func is not None: |
| | hist[-1] = self.config.cur_turn_process_func(hist[-1]) |
| |
|
| | history = concat_dial_history(self.config, hist) |
| |
|
| | if self.config.prompt_func is None: |
| | prompt = "" |
| | elif not self.config.prompt_func.__code__.co_argcount: |
| | prompt = self.config.prompt_func() |
| |
|
| | if self.config.turn_knowledge_func is not None: |
| | knowledge_to_use = self.config.turn_knowledge_func(knowledge, turn) |
| | else: |
| | knowledge_to_use = knowledge |
| |
|
| | src = concat_history_knowledge_prompt( |
| | self.config, history, knowledge_to_use, prompt |
| | ) |
| |
|
| | examples.append({"src": clean(src), "tgt": clean(tgt)}) |
| |
|
| | multiref = [] |
| |
|
| | return examples |
| |
|
| | def preprocess_line(self, split: str, example: Dict) -> List[Dict]: |
| | knowledge = self.extract_knowledge(example) |
| |
|
| | |
| | if self.config.task == DIALOGUE_SUMMARY: |
| | return self.preprocess_for_dialogue_level(split, example, knowledge) |
| |
|
| | |
| | if self.config.task == EMOTION_RECOGNITION: |
| | return self.preprocess_for_turn_level( |
| | split, example, knowledge, include_current_turn=True |
| | ) |
| |
|
| | |
| | if self.config.task == DIALOGUE_CONTEXT_TO_RESPONSE_GENERATION: |
| | return self.preprocess_for_turn_level( |
| | split, example, knowledge, include_current_turn=False |
| | ) |
| |
|
| | |
| | if self.config.task.startswith(ABSA): |
| | return self.preprocess_for_turn_level( |
| | split, example, knowledge, include_current_turn=True |
| | ) |
| |
|
| | |
| | if self.config.task == CHARACTER_IDENTIFICATION: |
| | |
| | |
| | |
| | |
| | return self.preprocess_for_label_level(split, example, knowledge) |
| |
|
| | |
| | if self.config.task == DIALOGUE_STATE_TRACKING: |
| | return self.preprocess_for_turn_level( |
| | split, example, knowledge, include_current_turn=True |
| | ) |
| |
|
| | |
| | if self.config.task == DOCUMENT_GROUNDED_CONVERSATION: |
| | return self.preprocess_for_turn_level( |
| | split, example, knowledge, include_current_turn=False |
| | ) |
| |
|
| | |
| | if self.config.task == TEXT2SQL: |
| | seq_examples = self.preprocess_for_turn_level( |
| | split, example, knowledge, include_current_turn=True |
| | ) |
| |
|
| | for idx in range(len(seq_examples)): |
| | seq_examples[idx]["db_id"] = knowledge["db_id"] |
| |
|
| | return seq_examples |
| |
|
| | |
| | if self.config.task == SLOT_FILLING: |
| | return self.preprocess_for_turn_level( |
| | split, example, knowledge, include_current_turn=True |
| | ) |
| |
|
| | |
| | if self.config.task == ROLE_RELATION_RECOGNITION: |
| | return self.preprocess_for_dialogue_level(split, example, knowledge) |
| |
|
| | |
| | if self.config.task == QUESTION_IN_CONTEXT_REWRITING: |
| | return self.preprocess_for_turn_level( |
| | split, example, knowledge, include_current_turn=True |
| | ) |
| |
|
| | |
| | if self.config.task == NATURAL_LANGUAGE_INFERENCE: |
| | return self.preprocess_for_turn_level( |
| | split, |
| | example, |
| | knowledge, |
| | include_current_turn=True, |
| | origin_knowledge=example[KNOWLEDGE][VALUE], |
| | ) |
| |
|
| | |
| | if self.config.task == MACHINE_READING_COMPREHENSION: |
| | return self.preprocess_for_turn_level(split, example, knowledge) |
| |
|
| | |
| | if self.config.task == MULTIPLE_CHOICE_QUESTION_ANSWERING: |
| | return self.preprocess_for_turn_level( |
| | split, |
| | example, |
| | knowledge, |
| | include_current_turn=True, |
| | origin_knowledge=example[KNOWLEDGE][VALUE], |
| | ) |
| |
|
| | |
| | if self.config.task == INTENT_DETECTION: |
| | return self.preprocess_for_turn_level( |
| | split, example, knowledge, include_current_turn=True |
| | ) |
| |
|
| | |
| | if self.config.task == DATA_TO_TEXT: |
| | return self.preprocess_for_turn_level( |
| | split, example, knowledge, include_current_turn=True |
| | ) |
| |
|
| | |
| | if self.config.task == CHIT_CHAT: |
| | return self.preprocess_for_turn_level( |
| | split, example, knowledge, include_current_turn=False |
| | ) |
| |
|
| | if self.config.task == "Semantic Parsing": |
| | seq_examples = self.preprocess_for_turn_level( |
| | split, example, knowledge, include_current_turn=True |
| | ) |
| |
|
| | return seq_examples |
| |
|