| | from datasets import load_dataset, concatenate_datasets, Value, Features |
| | from transformers import GPT2Tokenizer |
| |
|
| |
|
| | new_features = Features({ |
| | 'max_stars_repo_path': Value('string'), |
| | 'max_stars_repo_name': Value('string'), |
| | 'max_stars_count': Value('int64'), |
| | 'id': Value('string'), |
| | 'content': Value('string') |
| | }) |
| |
|
| | tokenizer = GPT2Tokenizer.from_pretrained("gpt2") |
| |
|
| | def count_tokens(row_data): |
| | return {"n_tokens": len(tokenizer(row_data["content"])["input_ids"])} |
| |
|
| | |
| | dc = load_dataset("bigcode/starcoderdata", data_dir="c", split="train").cast(new_features) |
| | dcpp = load_dataset("bigcode/starcoderdata", data_dir="cpp", split="train").cast(new_features) |
| | dpython = load_dataset("bigcode/starcoderdata", data_dir="python", split="train") |
| | djson = load_dataset("bigcode/starcoderdata", data_dir="json", split="train") |
| | djava = load_dataset("bigcode/starcoderdata", data_dir="java", split="train") |
| |
|
| | |
| | seed = 42 |
| | aggregated_dataset = concatenate_datasets([dc, dpython, dcpp, djson, djava]) |
| | aggregated_dataset = aggregated_dataset.remove_columns(["id", "max_stars_repo_path", "max_stars_repo_name"]) |
| | aggregated_dataset = aggregated_dataset.shuffle(seed=seed) |
| |
|
| | |
| | qualified_subset = aggregated_dataset.filter(lambda x: x["max_stars_count"] > 300, num_proc=16) |
| |
|
| | |
| | n_sample = min(2_500_000, qualified_subset.num_rows) |
| | target_dataset = qualified_subset.shuffle(seed=seed).select(range(n_sample)) |
| |
|
| | |
| | target_train_dataset = target_dataset['train'].map(count_tokens, num_proc=16) |
| | total_tokens = sum(target_train_dataset["n_tokens"]) |
| |
|
| | |
| | target_dataset_dir = "/data/filtered_starcoder" |
| | target_train_dataset.to_parquet(target_dataset_dir) |