| --- |
| configs: |
| - config_name: "10_shot_rlw" |
| data_files: |
| - split: dev |
| path: "10_shot_rlw/dev.*" |
| - split: ood_cons_count_10 |
| path: "10_shot_rlw/ood_cons_count_10.*" |
| - split: ood_cons_count_3 |
| path: "10_shot_rlw/ood_cons_count_3.*" |
| - split: ood_cons_count_5 |
| path: "10_shot_rlw/ood_cons_count_5.*" |
| - split: ood_cons_count_7 |
| path: "10_shot_rlw/ood_cons_count_7.*" |
| - split: ood_cons_len_10 |
| path: "10_shot_rlw/ood_cons_len_10.*" |
| - split: ood_cons_len_3 |
| path: "10_shot_rlw/ood_cons_len_3.*" |
| - split: ood_cons_len_5 |
| path: "10_shot_rlw/ood_cons_len_5.*" |
| - split: ood_cons_len_7 |
| path: "10_shot_rlw/ood_cons_len_7.*" |
| - split: ood_lexical |
| path: "10_shot_rlw/ood_lexical.*" |
| - split: test |
| path: "10_shot_rlw/test.*" |
| - split: train |
| path: "10_shot_rlw/train.*" |
| - config_name: "1_shot_eng" |
| data_files: |
| - split: dev |
| path: "1_shot_eng/dev.*" |
| - split: ood_cons_count_3 |
| path: "1_shot_eng/ood_cons_count_3.*" |
| - split: ood_cons_count_5 |
| path: "1_shot_eng/ood_cons_count_5.*" |
| - split: ood_cons_len_3 |
| path: "1_shot_eng/ood_cons_len_3.*" |
| - split: ood_cons_len_5 |
| path: "1_shot_eng/ood_cons_len_5.*" |
| - split: ood_lexical |
| path: "1_shot_eng/ood_lexical.*" |
| - split: other_tasks_id |
| path: "1_shot_eng/other_tasks_id.*" |
| - split: other_tasks_ood |
| path: "1_shot_eng/other_tasks_ood.*" |
| - split: test |
| path: "1_shot_eng/test.*" |
| - split: train |
| path: "1_shot_eng/train.*" |
| - config_name: "1_shot_rlw" |
| data_files: |
| - split: dev |
| path: "1_shot_rlw/dev.*" |
| - split: ood_cons_count_10 |
| path: "1_shot_rlw/ood_cons_count_10.*" |
| - split: ood_cons_count_3 |
| path: "1_shot_rlw/ood_cons_count_3.*" |
| - split: ood_cons_count_5 |
| path: "1_shot_rlw/ood_cons_count_5.*" |
| - split: ood_cons_count_7 |
| path: "1_shot_rlw/ood_cons_count_7.*" |
| - split: ood_cons_len_10 |
| path: "1_shot_rlw/ood_cons_len_10.*" |
| - split: ood_cons_len_3 |
| path: "1_shot_rlw/ood_cons_len_3.*" |
| - split: ood_cons_len_5 |
| path: "1_shot_rlw/ood_cons_len_5.*" |
| - split: ood_cons_len_7 |
| path: "1_shot_rlw/ood_cons_len_7.*" |
| - split: ood_lexical |
| path: "1_shot_rlw/ood_lexical.*" |
| - split: test |
| path: "1_shot_rlw/test.*" |
| - split: train |
| path: "1_shot_rlw/train.*" |
| - config_name: "1_shot_rlw_10x" |
| data_files: |
| - split: dev |
| path: "1_shot_rlw_10x/dev.*" |
| - split: ood_cons_count_10 |
| path: "1_shot_rlw_10x/ood_cons_count_10.*" |
| - split: ood_cons_count_3 |
| path: "1_shot_rlw_10x/ood_cons_count_3.*" |
| - split: ood_cons_count_5 |
| path: "1_shot_rlw_10x/ood_cons_count_5.*" |
| - split: ood_cons_count_7 |
| path: "1_shot_rlw_10x/ood_cons_count_7.*" |
| - split: ood_cons_len_10 |
| path: "1_shot_rlw_10x/ood_cons_len_10.*" |
| - split: ood_cons_len_3 |
| path: "1_shot_rlw_10x/ood_cons_len_3.*" |
| - split: ood_cons_len_5 |
| path: "1_shot_rlw_10x/ood_cons_len_5.*" |
| - split: ood_cons_len_7 |
| path: "1_shot_rlw_10x/ood_cons_len_7.*" |
| - split: ood_lexical |
| path: "1_shot_rlw_10x/ood_lexical.*" |
| - split: test |
| path: "1_shot_rlw_10x/test.*" |
| - split: train |
| path: "1_shot_rlw_10x/train.*" |
| - config_name: "2_shot_rlw" |
| data_files: |
| - split: dev |
| path: "2_shot_rlw/dev.*" |
| - split: ood_cons_count_10 |
| path: "2_shot_rlw/ood_cons_count_10.*" |
| - split: ood_cons_count_3 |
| path: "2_shot_rlw/ood_cons_count_3.*" |
| - split: ood_cons_count_5 |
| path: "2_shot_rlw/ood_cons_count_5.*" |
| - split: ood_cons_count_7 |
| path: "2_shot_rlw/ood_cons_count_7.*" |
| - split: ood_cons_len_10 |
| path: "2_shot_rlw/ood_cons_len_10.*" |
| - split: ood_cons_len_3 |
| path: "2_shot_rlw/ood_cons_len_3.*" |
| - split: ood_cons_len_5 |
| path: "2_shot_rlw/ood_cons_len_5.*" |
| - split: ood_cons_len_7 |
| path: "2_shot_rlw/ood_cons_len_7.*" |
| - split: ood_lexical |
| path: "2_shot_rlw/ood_lexical.*" |
| - split: test |
| path: "2_shot_rlw/test.*" |
| - split: train |
| path: "2_shot_rlw/train.*" |
| - config_name: "3_shot_rlw" |
| data_files: |
| - split: dev |
| path: "3_shot_rlw/dev.*" |
| - split: ood_cons_count_10 |
| path: "3_shot_rlw/ood_cons_count_10.*" |
| - split: ood_cons_count_3 |
| path: "3_shot_rlw/ood_cons_count_3.*" |
| - split: ood_cons_count_5 |
| path: "3_shot_rlw/ood_cons_count_5.*" |
| - split: ood_cons_count_7 |
| path: "3_shot_rlw/ood_cons_count_7.*" |
| - split: ood_cons_len_10 |
| path: "3_shot_rlw/ood_cons_len_10.*" |
| - split: ood_cons_len_3 |
| path: "3_shot_rlw/ood_cons_len_3.*" |
| - split: ood_cons_len_5 |
| path: "3_shot_rlw/ood_cons_len_5.*" |
| - split: ood_cons_len_7 |
| path: "3_shot_rlw/ood_cons_len_7.*" |
| - split: ood_lexical |
| path: "3_shot_rlw/ood_lexical.*" |
| - split: test |
| path: "3_shot_rlw/test.*" |
| - split: train |
| path: "3_shot_rlw/train.*" |
| - config_name: "5_shot_rlw" |
| data_files: |
| - split: dev |
| path: "5_shot_rlw/dev.*" |
| - split: ood_cons_count_10 |
| path: "5_shot_rlw/ood_cons_count_10.*" |
| - split: ood_cons_count_3 |
| path: "5_shot_rlw/ood_cons_count_3.*" |
| - split: ood_cons_count_5 |
| path: "5_shot_rlw/ood_cons_count_5.*" |
| - split: ood_cons_count_7 |
| path: "5_shot_rlw/ood_cons_count_7.*" |
| - split: ood_cons_len_10 |
| path: "5_shot_rlw/ood_cons_len_10.*" |
| - split: ood_cons_len_3 |
| path: "5_shot_rlw/ood_cons_len_3.*" |
| - split: ood_cons_len_5 |
| path: "5_shot_rlw/ood_cons_len_5.*" |
| - split: ood_cons_len_7 |
| path: "5_shot_rlw/ood_cons_len_7.*" |
| - split: ood_lexical |
| path: "5_shot_rlw/ood_lexical.*" |
| - split: test |
| path: "5_shot_rlw/test.*" |
| - split: train |
| path: "5_shot_rlw/train.*" |
|
|
| annotations_creators: |
| - machine-generated |
| language: |
| - en |
| language_creators: |
| - machine-generated |
| license: |
| - other |
| multilinguality: |
| - monolingual |
| pretty_name: Templatic Generation Tasks for In-Context Learning Research |
| size_categories: |
| - 10K<n<100K |
| - 1K<n<10K |
| - n<1K |
| source_datasets: |
| - original |
| tags: |
| - seq2seq |
| task_categories: |
| - text2text-generation |
| task_ids: [] |
| --- |
| # Dataset Card for Active/Passive/Logical Transforms |
|
|
| ## Table of Contents |
| - [Dataset Description](#dataset-description) |
| - [Dataset Summary](#dataset-summary) |
| - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) |
| - [Languages](#languages) |
| - [Dataset Structure](#dataset-structure) |
| - [Dataset Subsets (Tasks)](#data-tasks) |
| - [Dataset Splits](#data-splits) |
| - [Data Instances](#data-instances) |
| - [Data Fields](#data-fields) |
| - [Dataset Creation](#dataset-creation) |
| - [Curation Rationale](#curation-rationale) |
| - [Source Data](#source-data) |
| - [Annotations](#annotations) |
| - [Personal and Sensitive Information](#personal-and-sensitive-information) |
| - [Considerations for Using the Data](#considerations-for-using-the-data) |
| - [Social Impact of Dataset](#social-impact-of-dataset) |
| - [Discussion of Biases](#discussion-of-biases) |
| - [Other Known Limitations](#other-known-limitations) |
| - [Additional Information](#additional-information) |
| - [Dataset Curators](#dataset-curators) |
| - [Licensing Information](#licensing-information) |
| - [Citation Information](#citation-information) |
| - [Contributions](#contributions) |
|
|
| ## Dataset Description |
|
|
| - **Homepage:** |
| - **Repository:** |
| - **Paper:** |
| - **Leaderboard:** |
| - **Point of Contact:** [Roland Fernandez](mailto:rfernand@microsoft.com) |
|
|
| ### Dataset Summary |
|
|
| This dataset is a synthetic dataset containing a set of templatic generation tasks using both English and random 2-letter words. |
|
|
| ### Supported Tasks and Leaderboards |
|
|
| [TBD] |
|
|
| ### Languages |
|
|
| All data is in English or random 2-letter words. |
|
|
| ## Dataset Structure |
|
|
| The dataset consists of several subsets, or tasks. Each task contains a train split, a dev split, and a |
| test split, and multiple out-of-distribution splits. |
|
|
| Each sample in a split contains a source string, a target string, and an annotation string (describing the sample). |
|
|
| ### Dataset Subsets (Tasks) |
| The dataset consists of the following tasks: |
|
|
| ``` |
| - 1_shot_rlw (1 example input/output pair, a test input, and the gold output, all using random 2-letter words) |
| - 1_shot_eng (same as 1_shot_rlw but using English words). |
| - 1_shot_rlw_10x (same as 1_shot_rlw, but with 10x the training samples) |
| - 2_shot_rlw (2 example input/output pairs, a test input, and the gold output, all using random 2-letter words) |
| - 3_shot_rlw (3 example input/output pairs, a test input, and the gold output, all using random 2-letter words) |
| - 5_shot_rlw (5 example input/output pairs, a test input, and the gold output, all using random 2-letter words) |
| - 10_shot_rtw (10 example input/output pairs, a test input, and the gold output, all using random 2-letter words) |
| ``` |
|
|
| ### Data Splits |
|
|
| Most tasks have the following splits: |
| - train |
| - dev |
| - test |
| - ood_lexical |
| - ood_cons_count_3 |
| - ood_cons_count_5 |
| - ood_cons_count_7 |
| - ood_cons_count_10 |
| - ood_cons_len_3 |
| - ood_cons_len_5 |
| - ood_cons_len_7 |
| - ood_cons_len_10 |
| |
| Here is a table showing how the number of examples varies by split (for most tasks): |
| |
| | Dataset Split | Number of Instances in Split | |
| | ------------- | ------------------------------------------- | |
| | train | 280,000 | |
| | dev | 35,000 | |
| | test | 35,000 | |
| | ood_* | 84,000 | |
|
|
|
|
| ### Data Instances |
|
|
| Each sample consits of a source, target, and annotation string (all tab separated). |
|
|
| Here is an example from the *train* split of the *1_shot_eng* task: |
|
|
| ``` |
| { |
| 'raw': 'Q any mouse ) ; bear A any mouse & . Q road ) ; building A road & . {"cons_count": "Q2A1", "cons_len": "Q21.Q11"}' |
| |
| 'source': 'Q any mouse ) ; bear A any mouse & . Q road ) ; building A', |
| 'target': 'road & .', |
| 'annotation': '{"cons_count": "Q2A1", "cons_len": "Q21.Q11"}' |
| } |
| ``` |
|
|
| ### Data Fields |
|
|
| - `source`: the string containing the N-shot examples and the test cue |
| - `target`: the string containing the desired (gold) output |
| - `annotation`: the string describing the example (as a python or JSON dictionary) |
|
|
| ## Dataset Creation |
|
|
| ### Curation Rationale |
|
|
| We wanted a dataset that would test in-context (and from scratch) learning of abstract, semantic-free symbolic transformations, |
| based on a random template for each example. The dataset is designed to test 3 types of out of distribution generalization: |
|
|
| - lexical - known words used in new contexts (relative to train split) |
| - length - train split uses constituents of 1, 2, or 4 words; OOD splits use 3, 5, 7, or 10 words |
| - count - train split uses 1, 2, or 4 constituents; OOD splits use 3, 5, 7, or 10 constituents |
|
|
| ### Source Data |
|
|
| [N/A] |
|
|
| #### Initial Data Collection and Normalization |
|
|
| [N/A] |
|
|
| #### Who are the source language producers? |
|
|
| The dataset by generated from templates designed by Paul Smolensky and Roland Fernandez. |
|
|
| ### Annotations |
|
|
| Besides the source and target strings, each sample contains an annotation string that describes the sample. |
|
|
| #### Annotation process |
|
|
| The annotation columns were generated from each sample template. |
|
|
| #### Who are the annotators? |
|
|
| [N/A] |
|
|
| ### Personal and Sensitive Information |
|
|
| No names or other sensitive information are included in the data. |
|
|
| ## Considerations for Using the Data |
|
|
| ### Social Impact of Dataset |
|
|
| The purpose of this dataset is to research how LLM and from-scratch model can learn to solve templatic generation tasks. |
|
|
| ### Discussion of Biases |
|
|
| [TBD] |
|
|
| ### Other Known Limitations |
|
|
| [TBD] |
|
|
| ## Additional Information |
|
|
| The internal name of this dataset is nc_tgt_v11. Also see DATASET_INFO.md and GRAMMAR.md files. |
| |
| ### Dataset Curators |
| |
| The dataset by generated from templates designed by Paul Smolensky and Roland Fernandez. |
| |
| ### Licensing Information |
| |
| This dataset is released under the [Permissive 2.0 license](https://cdla.dev/permissive-2-0/). |
| |
| ### Citation Information |
| |
| [TBD] |
| |
| ### Contributions |
| |
| Thanks to [The Neurocompositional AI group at Microsoft Research](https://www.microsoft.com/en-us/research/project/neurocompositional-ai/) for creating and adding this dataset. |
| |
| |