Search is not available for this dataset
id
int32 0
3.83k
| image
imagewidth (px) 256
256
| moments
array 3D | contexts
list |
|---|---|---|---|
0
| [[[-6.216769695281982,-4.10491418838501,-7.046452522277832,-4.838533878326416,-8.050795555114746,-4.(...TRUNCATED)
| [[[-0.3883763551712036,0.022943725809454918,-0.05219653993844986,-0.184060737490654,-0.0273184534162(...TRUNCATED)
|
|
1
| [[[-5.4123101234436035,-7.396025657653809,-0.43733441829681396,-6.895396709442139,-1.476715922355651(...TRUNCATED)
| [[[-0.3883763551712036,0.022943725809454918,-0.05219653993844986,-0.184060737490654,-0.0273184534162(...TRUNCATED)
|
|
2
| [[[7.0307416915893555,5.170193672180176,9.56817626953125,6.53047513961792,3.810012102127075,5.498260(...TRUNCATED)
| [[[-0.3883763551712036,0.022943725809454918,-0.05219653993844986,-0.184060737490654,-0.0273184534162(...TRUNCATED)
|
|
3
| [[[-0.1349918693304062,-0.9991251230239868,0.4470149874687195,5.568943023681641,1.013342261314392,3.(...TRUNCATED)
| [[[-0.3883763551712036,0.022943725809454918,-0.05219653993844986,-0.184060737490654,-0.0273184534162(...TRUNCATED)
|
|
4
| [[[-0.6876587271690369,-1.619774580001831,0.8541242480278015,-1.3836709260940552,0.5268931984901428,(...TRUNCATED)
| [[[-0.3883763551712036,0.022943725809454918,-0.05219653993844986,-0.184060737490654,-0.0273184534162(...TRUNCATED)
|
|
5
| [[[4.029338836669922,3.9115512371063232,4.305147171020508,3.1892616748809814,6.209195613861084,3.455(...TRUNCATED)
| [[[-0.3883763551712036,0.022943725809454918,-0.05219653993844986,-0.184060737490654,-0.0273184534162(...TRUNCATED)
|
|
6
| [[[-1.1667133569717407,-2.844402313232422,-0.49423927068710327,-5.4637274742126465,-4.00132989883422(...TRUNCATED)
| [[[-0.3883763551712036,0.022943725809454918,-0.05219653993844986,-0.184060737490654,-0.0273184534162(...TRUNCATED)
|
|
7
| [[[3.9170939922332764,4.723217964172363,3.935309886932373,1.7130955457687378,9.190723419189453,9.913(...TRUNCATED)
| [[[-0.3883763551712036,0.022943725809454918,-0.05219653993844986,-0.184060737490654,-0.0273184534162(...TRUNCATED)
|
|
8
| [[[-0.11112205684185028,1.0648499727249146,2.255754232406616,1.5348564386367798,2.243645668029785,1.(...TRUNCATED)
| [[[-0.3883763551712036,0.022943725809454918,-0.05219653993844986,-0.184060737490654,-0.0273184534162(...TRUNCATED)
|
|
9
| [[[-5.273052215576172,-5.743736743927002,-2.428558349609375,-3.0584652423858643,-8.612687110900879,-(...TRUNCATED)
| [[[-0.3883763551712036,0.022943725809454918,-0.05219653993844986,-0.184060737490654,-0.0273184534162(...TRUNCATED)
|
End of preview. Expand
in Data Studio
U-ViT-coco
Download the data
Download folders datasets, fid_stats, and stable-diffusion, and put them in an assets folder.
Pre-processed MSCOCO dataset using modified code from U-ViT
- RGB images are center-cropped to 256 resolution before saving
- Latents pre-extracted from SD-VAE
- Prompt features extracted using CLIP-L/14
- Targeted for diffusion model training (with REPA / REPA-E support)
Dataset example code:
import os
import random
from datasets import load_from_disk
import numpy as np
import torch
from torch.utils.data import Dataset
class DatasetFactory(object):
def __init__(self):
self.train = None
self.test = None
def get_split(self, split, labeled=False):
if split == "train":
dataset = self.train
elif split == "test":
dataset = self.test
else:
raise ValueError
if self.has_label:
return dataset #if labeled else UnlabeledDataset(dataset)
else:
assert not labeled
return dataset
def unpreprocess(self, v): # to B C H W and [0, 1]
v = 0.5 * (v + 1.)
v.clamp_(0., 1.)
return v
@property
def has_label(self):
return True
@property
def data_shape(self):
raise NotImplementedError
@property
def data_dim(self):
return int(np.prod(self.data_shape))
@property
def fid_stat(self):
return None
def sample_label(self, n_samples, device):
raise NotImplementedError
def label_prob(self, k):
raise NotImplementedError
class HFMSCOCOFeatureDataset(Dataset):
# the image features are got through sample
def __init__(self, root):
self.root = root
self.datasets = load_from_disk(root)
def __len__(self):
return len(self.datasets)
def __getitem__(self, index):
batch = self.datasets[index]
x = batch["image"] # PIL.Image
z = np.array(batch["moments"]) # np.array [8, 32, 32]
cs = batch["contexts"] # np.array [5, 77, 768]
x = np.array(x)
x = x.reshape(*x.shape[:2], -1).transpose(2, 0, 1)
k = random.randint(0, len(cs) - 1)
c = np.array(cs[k])
x = torch.from_numpy(x)
z = torch.from_numpy(z).float()
c = torch.from_numpy(c).float()
return x, z, c
class CFGDataset(Dataset): # for classifier free guidance
def __init__(self, dataset, p_uncond, empty_token):
self.dataset = dataset
self.p_uncond = p_uncond
self.empty_token = empty_token
def __len__(self):
return len(self.dataset)
def __getitem__(self, item):
x, z, y = self.dataset[item]
if random.random() < self.p_uncond:
y = self.empty_token
return x, z, y
class MSCOCO256Features(DatasetFactory): # the moments calculated by Stable Diffusion image encoder & the contexts calculated by clip
def __init__(self, path, cfg=True, p_uncond=0.1, mode='train'):
super().__init__()
print('Prepare dataset...')
if mode == 'val':
# self.test = MSCOCOFeatureDataset(os.path.join(path, 'val'))
self.test = HFMSCOCOFeatureDataset(os.path.join(path, 'val'))
assert len(self.test) == 40504
self.empty_context = torch.from_numpy(np.load(os.path.join(path, 'empty_context.npy'))).float()
else:
# self.train = MSCOCOFeatureDataset(os.path.join(path, 'train'))
self.train = HFMSCOCOFeatureDataset(os.path.join(path, 'train'))
assert len(self.train) == 82783
self.empty_context = torch.from_numpy(np.load(os.path.join(path, 'empty_context.npy'))).float()
if cfg: # classifier free guidance
assert p_uncond is not None
print(f'prepare the dataset for classifier free guidance with p_uncond={p_uncond}')
self.train = CFGDataset(self.train, p_uncond, self.empty_context)
@property
def data_shape(self):
return 4, 32, 32
@property
def fid_stat(self):
return f'assets/fid_stats/fid_stats_mscoco256_val.npz'
- Downloads last month
- 289