| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """String Operations Dataset for fast model development""" |
|
|
|
|
| import csv |
| import json |
| import os |
| import re |
|
|
| import datasets |
| import gzip |
|
|
| |
| |
| _CITATION = """\ |
| @InProceedings{huggingface:dataset, |
| title = {String Operations Dataset: A small set of string manipulation tasks for fast model development}, |
| author={Michael Granitzer}, |
| year={2023} |
| } |
| """ |
|
|
| |
| |
| _DESCRIPTION = """\ |
| Minimal dataset for intended for LM development and testing using python string operations. |
| The dataset is created by running different one line python string operations on random strings |
| The idea is, that transformer implementation can learn the string operations and that this task is a good |
| proxy tasks for other transformer operations on real languages and real tasks. Consequently, the |
| data set is small and can be used in the development process without large scale infrastructures. |
| |
| There are different configurations for the data set. |
| |
| - `small`: contains below 50k instances of various string length and only contains slicing operations, i.e. all python operations expressable with `s[i:j:s]` (which also includes string reversal). |
| - you can further choose different subsets according to either length or the kind of operation |
| - `small10`: like small, but only strings to length 10 |
| - `small15`: like small, but only strings to length 15 |
| - `small20`: like small, but only strings to length 20 |
| |
| The fields have the following meaning: |
| |
| - `input`: input string, i.e. the string and the string operation |
| - `output`: output of the string operation |
| - `code`: code for running the string operation in python, |
| - `res_var`: name of the result variable |
| - `operation`: kind of operation: |
| - `step_x` for `s[::x]` |
| - `char_at_x` for `s[x]` |
| - `slice_x:y` for `s[x:y]` |
| - `slice_step_x:y:z` for `s[x:y:z]` |
| - `slice_reverse_i:j:k` for `s[i:i+j][::k]` |
| |
| Siblings of `data` contain additional metadata information about the dataset. |
| |
| - `prompt` describes possible prompts based on that data splitted into input prompts / output prompts |
| |
| |
| """ |
|
|
| |
| _HOMEPAGE = "https://huggingface.co/PaDaS-Lab/SynStOp" |
|
|
| |
| _LICENSE = "Apache 2.0 License" |
|
|
| |
| |
| |
| _URLS = { |
| "small": { |
| "train": ["./small/stop_10_train.json.gz", "./small/stop_20_train.json.gz", "./small/stop_15_train.json.gz",], |
| "test": ["./small/stop_10_test.json.gz", "./small/stop_20_test.json.gz", "./small/stop_15_test.json.gz",] |
| }, |
| "small15": { |
| "train": [ "./small/stop_15_train.json.gz",], |
| "test": [ "./small/stop_15_test.json.gz",] |
| }, |
| "small10": { |
| "train": ["./small/stop_10_train.json.gz"], |
| "test": ["./small/stop_10_test.json.gz"] |
| }, |
| "small20": { |
| "train": [ "./small/stop_20_train.json.gz"], |
| "test": [ "./small/stop_20_test.json.gz"] |
| } |
| } |
|
|
|
|
| class SynStOpDatasetConfig(datasets.BuilderConfig): |
|
|
| def __init__(self, subset="small", length=(10,15,20), **kwargs): |
| """BuilderConfig for SynStOpDatasetConfig. |
| Args: |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(SynStOpDatasetConfig, self).__init__(**kwargs) |
| self.subset = subset |
| self.length = length |
| self.files = { |
| "train": ["./{subset}".format(subset=subset) + "/stop_{length}_train.json.gz".format(length=length) for length in length], |
| "test": ["./{subset}".format(subset=subset) + "/stop_{length}_test.json.gz".format(length=length) for length in length], |
| } |
|
|
|
|
|
|
| |
| class SynStOpDataset(datasets.GeneratorBasedBuilder): |
| """TODO: Short description of my dataset.""" |
|
|
| VERSION = datasets.Version("0.0.1") |
|
|
| |
| |
| |
|
|
| |
| |
| |
| BUILDER_CONFIGS = [SynStOpDatasetConfig(name="small", length=(10,15,20),version=VERSION, description="Small set of string operations with string slices only")] +\ |
| [SynStOpDatasetConfig(name=f"small{l1}", length=(l1,), version=datasets.Version("0.0.1"), description="Small set of string operations with string slices only") for l1 in [10,15, 20]] |
|
|
| DEFAULT_CONFIG_NAME = "small" |
|
|
| def _info(self): |
| |
| features = datasets.Features( |
| { |
| "input": datasets.Value("string"), |
| "output": datasets.Value("string"), |
| "code": datasets.Value("string"), |
| "res_var": datasets.Value("string"), |
| "operation": datasets.Value("string"), |
| "id": datasets.Value("int32"), |
| |
| } |
| ) |
| return datasets.DatasetInfo( |
| |
| description=_DESCRIPTION, |
| |
| features=features, |
| |
| |
| |
| |
| homepage=_HOMEPAGE, |
| |
| license=_LICENSE, |
| |
| citation=_CITATION, |
| ) |
| def _split_generators(self, dl_manager): |
| |
| |
|
|
| |
| |
| |
| urls = self.config.files |
| data_dir = dl_manager.download_and_extract(urls) |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| |
| gen_kwargs={ |
| "filepath": data_dir["train"], |
| "split": "train", |
| }, |
| ), |
|
|
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| |
| gen_kwargs={ |
| "filepath": data_dir["test"], |
| "split": "test", |
| }, |
| ), |
| ] |
|
|
| |
| def _generate_examples(self, filepath, split): |
| |
| |
| count = 0 |
| for filename in filepath: |
| with open(filename, encoding="utf-8") as f: |
| dataset = json.load(f) |
| for ix, data in enumerate(dataset): |
|
|
| if self.config.name.startswith("small"): |
| |
| id = data["id"] if "id" in data else count |
| count = count + 1 |
| yield id, { |
| "input": data["input"], |
| "output": data["output"], |
| "code": data["code"], |
| "res_var": data["res_var"], |
| "id": id, |
| "operation": data["operation"] |
| } |
| else: |
| yield "", { |
| "sentence": data["sentence"], |
| "option2": data["option2"], |
| "second_domain_answer": "" if split == "test" else data["second_domain_answer"], |
| } |
|
|