| """C4 dataset based on Common Crawl.""" | |
| import gzip | |
| import json | |
| import datasets | |
| try: | |
| import lzma as xz | |
| except ImportError: | |
| import pylzma as xz | |
| logger = datasets.logging.get_logger(__name__) | |
| _DESCRIPTION = """\ | |
| A living legal dataset. | |
| """ | |
| _CITATION = """ | |
| TODO | |
| """ | |
| _URL = "" | |
| _DATA_URL = { | |
| "eoir_privacy" : | |
| { | |
| "train" : ["https://huggingface.co/datasets/pile-of-law/eoir_privacy/resolve/main/data/train.privacy.eoir.jsonl.xz"], | |
| "validation" : ["https://huggingface.co/datasets/pile-of-law/eoir_privacy/resolve/main/data/validation.privacy.eoir.jsonl.xz"] | |
| } | |
| } | |
| _VARIANTS = ["all"] + list(_DATA_URL.keys()) | |
| class EOIRPrivacy(datasets.GeneratorBasedBuilder): | |
| """TODO""" | |
| BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS] | |
| def _info(self): | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=datasets.Features( | |
| { | |
| "text": datasets.Value("string"), | |
| "year": datasets.Value("string"), | |
| "name": datasets.Value("string"), | |
| 'label': datasets.ClassLabel(num_classes=2, names=['False', 'True']) | |
| } | |
| ), | |
| supervised_keys=None, | |
| homepage=_URL, | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| data_urls = {} | |
| if self.config.name == "all": | |
| data_sources = list(_DATA_URL.keys()) | |
| else: | |
| data_sources = [self.config.name] | |
| for split in ["train", "validation"]: | |
| data_urls[split] = [] | |
| for source in data_sources: | |
| for chunk in _DATA_URL[source][split]: | |
| data_urls[split].append(chunk) | |
| train_downloaded_files = dl_manager.download(data_urls["train"]) | |
| validation_downloaded_files = dl_manager.download(data_urls["validation"]) | |
| return [ | |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files} | |
| ), | |
| ] | |
| def _generate_examples(self, filepaths): | |
| """This function returns the examples in the raw (text) form by iterating on all the files.""" | |
| id_ = 0 | |
| for filepath in filepaths: | |
| logger.info("generating examples from = %s", filepath) | |
| with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: | |
| for line in f: | |
| if line: | |
| example = json.loads(line) | |
| label = example["label"] | |
| example["label"] = int(label) | |
| yield id_, example | |
| id_ += 1 | |