main-horse
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,84 @@
|
|
1 |
-
---
|
2 |
-
license: unknown
|
3 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: unknown
|
3 |
+
---
|
4 |
+
|
5 |
+
it's like [imagenet.int8](https://huggingface.co/datasets/cloneofsimo/imagenet.int8) but
|
6 |
+
* full in1k (including val+test) 1,431,168 samples
|
7 |
+
* flux-dev vae, latent (after dequant) channels scaled to N(0,1)
|
8 |
+
* quantization uses int8 not uint8 (scaling factor 127/4)
|
9 |
+
|
10 |
+
basic decode test:
|
11 |
+
```python
|
12 |
+
# huggingface-cli download --repo-type dataset main-horse/in1k.int8 --revision flux-1.0-dev --local-dir ./imagenet_int8
|
13 |
+
import torch
|
14 |
+
from streaming import StreamingDataset
|
15 |
+
import streaming.base.util as util
|
16 |
+
from diffusers import AutoencoderKL
|
17 |
+
from diffusers.image_processor import VaeImageProcessor
|
18 |
+
|
19 |
+
# Constants from normalization
|
20 |
+
CHANNEL_MEANS = torch.tensor([
|
21 |
+
-0.008, -1.337, 0.335, -0.077, -0.134, 0.320, -1.196, 0.545,
|
22 |
+
-0.159, 0.284, 0.584, 0.062, -0.319, 0.001, -0.859, -0.246
|
23 |
+
], device='cuda')
|
24 |
+
|
25 |
+
CHANNEL_STDS = torch.tensor([
|
26 |
+
1.996, 3.531, 2.036, 1.428, 1.510, 1.710, 3.108, 2.410,
|
27 |
+
1.810, 2.670, 1.711, 1.941, 2.648, 2.734, 2.250, 2.479
|
28 |
+
], device='cuda')
|
29 |
+
|
30 |
+
def unnormalize_latents(x: torch.Tensor) -> torch.Tensor:
|
31 |
+
"""Undo the N(0,1) normalization"""
|
32 |
+
return x * CHANNEL_STDS[:,None] + CHANNEL_MEANS[:,None]
|
33 |
+
|
34 |
+
# Clean up any stale shared memory
|
35 |
+
util.clean_stale_shared_memory()
|
36 |
+
|
37 |
+
# Load the int8 dataset
|
38 |
+
remote_dir = "./imagenet_int8"
|
39 |
+
local_dir = "./local_test_dir2"
|
40 |
+
dataset = StreamingDataset(
|
41 |
+
local=local_dir,
|
42 |
+
remote=remote_dir,
|
43 |
+
split=None,
|
44 |
+
shuffle=False,
|
45 |
+
batch_size=32
|
46 |
+
)
|
47 |
+
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, num_workers=0)
|
48 |
+
|
49 |
+
# Get the 5th sample
|
50 |
+
batch = next(iter(dataloader))
|
51 |
+
i = 5 # Get 5th sample
|
52 |
+
|
53 |
+
# Convert int8 latent back to float and reshape
|
54 |
+
latent = batch['latent'][i].reshape(16, 1024).cuda().float()
|
55 |
+
label = batch['label'][i].item()
|
56 |
+
|
57 |
+
print(f"Processing sample {i} with label {label}")
|
58 |
+
print(f"Latent shape before processing: {latent.shape}")
|
59 |
+
|
60 |
+
# First undo the int8 quantization by scaling
|
61 |
+
latent = latent / 31.75 # Should now be ~N(0,1)
|
62 |
+
|
63 |
+
# Then undo the normalization to get back to original distribution
|
64 |
+
latent = unnormalize_latents(latent)
|
65 |
+
|
66 |
+
# Reshape to VAE expected format (1, 16, 32, 32)
|
67 |
+
latent = latent.reshape(1, 16, 32, 32)
|
68 |
+
print(f"Final latent shape: {latent.shape}")
|
69 |
+
print(f"Latent stats after denorm: min={latent.min().item():.3f}, max={latent.max().item():.3f}, mean={latent.mean().item():.3f}, std={latent.std().item():.3f}")
|
70 |
+
|
71 |
+
# Load and set up VAE
|
72 |
+
vae = AutoencoderKL.from_pretrained('black-forest-labs/FLUX.1-dev', subfolder='vae', device_map=0, attn_implementation='sdpa')
|
73 |
+
processor = VaeImageProcessor(vae_scale_factor=2 ** (len(vae.config.block_out_channels) - 1))
|
74 |
+
|
75 |
+
# Decode and save
|
76 |
+
with torch.no_grad():
|
77 |
+
decoded = vae.decode(latent).sample
|
78 |
+
img = processor.postprocess(decoded, do_denormalize=[True, True])[0]
|
79 |
+
img.save("5th_image_from_int8.png")
|
80 |
+
|
81 |
+
print("Saved decoded image as 5th_image_from_int8.png")
|
82 |
+
```
|
83 |
+
|
84 |
+
open an issue if you see any technical problems (license-related issues not welcome)
|