Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add chunking of pretrain text modeling datasets #3586

Merged
merged 2 commits into from
Jul 19, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 49 additions & 33 deletions model/model_training/custom_datasets/pretrain_datasets.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
"""
Datasets for LM objective pre-training aimed to prevent catastrophic forgetting during fine-tuning
"""
import random
from pathlib import Path
from typing import Optional

Expand All @@ -10,57 +9,74 @@
from torch.utils.data import Dataset


class RedPajama(Dataset):
name = "red_pajama"

class PretrainDataset(Dataset):
def __init__(
self,
dataset_name: str,
split: str,
text_column_name: str,
cache_dir: str | Path,
mode: str = "sft",
char_max_len: Optional[int] = 65536,
random_offset: bool = False,
max_chunk_size: Optional[int] = 64 * 1024,
) -> None:
super().__init__()
self.mode = mode

assert mode in ("sft", "rm", "rl")
self.char_max_len = char_max_len
self.random_offset = random_offset
self.dataset = load_dataset("togethercomputer/RedPajama-Data-1T-Sample", cache_dir=cache_dir)["train"]
self.mode = mode
self.max_chunk_size = max_chunk_size
self.dataset = load_dataset(dataset_name, cache_dir=cache_dir)[split]
self.text_column_name = text_column_name

# split long entries into chunks smaller than max_chunk_size
self.index_map = []
for i, entry in enumerate(self.dataset):
text_len = len(entry[self.text_column_name])
for segment_begin in range(0, text_len, max_chunk_size):
segment_end = min(segment_begin + max_chunk_size, text_len)
self.index_map.append((i, segment_begin, segment_end))

def __len__(self) -> int:
return len(self.dataset)
return len(self.index_map)

def __getitem__(self, index) -> DatasetEntryLm:
text = self.dataset[index]["text"]
if self.char_max_len and len(text) > self.char_max_len:
offset = 0 if not self.random_offset else random.randrange(len(text) - self.char_max_len)
text = text[offset : offset + self.char_max_len]
i, segment_begin, segment_end = self.index_map[index]
text = self.dataset[i][self.text_column_name][segment_begin:segment_end]
return DatasetEntryLm(text=text)


class FanFics(Dataset):
name = "fanfics"
class RedPajama(PretrainDataset):
name = "red_pajama"

def __init__(
self,
cache_dir: str | Path,
mode: str = "sft",
char_max_len: Optional[int] = 65536,
random_offset: bool = False,
max_chunk_size: Optional[int] = 64 * 1024,
) -> None:
super().__init__()
self.mode = mode
assert mode in ("sft", "rm", "rl")
self.char_max_len = char_max_len
self.random_offset = random_offset
self.dataset = load_dataset("atom-in-the-universe/fanfics-10k-50k", cache_dir=cache_dir)["train"]
super().__init__(
dataset_name="togethercomputer/RedPajama-Data-1T-Sample",
split="train",
text_column_name="text",
cache_dir=cache_dir,
mode=mode,
max_chunk_size=max_chunk_size,
)

def __len__(self) -> int:
return len(self.dataset)

def __getitem__(self, index) -> DatasetEntryLm:
text = self.dataset[index]["TEXT"]
if self.char_max_len and len(text) > self.char_max_len:
offset = 0 if not self.random_offset else random.randrange(len(text) - self.char_max_len)
text = text[offset : offset + self.char_max_len]
return DatasetEntryLm(text=text)
class FanFics(PretrainDataset):
name = "fanfics"

def __init__(
self,
cache_dir: str | Path,
mode: str = "sft",
max_chunk_size: Optional[int] = 64 * 1024,
) -> None:
super().__init__(
dataset_name="atom-in-the-universe/fanfics-10k-50k",
split="train",
text_column_name="TEXT",
cache_dir=cache_dir,
mode=mode,
max_chunk_size=max_chunk_size,
)