Shiroi-max
commited on
Commit
·
2dfe4e8
1
Parent(s):
d09e5d1
Setting
Browse files
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
---
|
2 |
-
datasets:
|
3 |
-
- ylecun/mnist
|
4 |
language:
|
5 |
- en
|
6 |
library_name: diffusers
|
|
|
|
|
7 |
pipeline_tag: image-to-image
|
8 |
-
---
|
|
|
1 |
---
|
|
|
|
|
2 |
language:
|
3 |
- en
|
4 |
library_name: diffusers
|
5 |
+
datasets:
|
6 |
+
- ylecun/mnist
|
7 |
pipeline_tag: image-to-image
|
8 |
+
---
|
logs/train_example/{events.out.tfevents.1717883137.federatedlearning.3604610.0 → events.out.tfevents.1717957992.federatedlearning.3567175.0}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e74fd609038695d7ce5858ced8689f9ba7e04c5a4f8823a5d98fd5f135e72a7d
|
3 |
+
size 595328
|
model_index.json
CHANGED
@@ -6,7 +6,7 @@
|
|
6 |
"DDPMScheduler"
|
7 |
],
|
8 |
"unet": [
|
9 |
-
|
10 |
-
|
11 |
]
|
12 |
}
|
|
|
6 |
"DDPMScheduler"
|
7 |
],
|
8 |
"unet": [
|
9 |
+
"diffusers",
|
10 |
+
"UNet2DModel"
|
11 |
]
|
12 |
}
|
pipeline.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
from typing import List, Optional, Tuple, Union
|
2 |
-
from diffusers import DiffusionPipeline, ImagePipelineOutput
|
3 |
-
from diffusers.utils.torch_utils import randn_tensor
|
4 |
-
|
5 |
-
import torch
|
6 |
-
|
7 |
-
|
8 |
-
class DDPMConditionalPipeline(DiffusionPipeline):
|
9 |
-
model_cpu_offload_seq = "unet"
|
10 |
-
|
11 |
-
def __init__(self, unet, scheduler):
|
12 |
-
super().__init__()
|
13 |
-
self.register_modules(unet=unet, scheduler=scheduler)
|
14 |
-
|
15 |
-
@torch.no_grad()
|
16 |
-
def __call__(
|
17 |
-
self,
|
18 |
-
label,
|
19 |
-
batch_size: int = 1,
|
20 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
21 |
-
num_inference_steps: int = 1000,
|
22 |
-
output_type: Optional[str] = "pil",
|
23 |
-
return_dict: bool = True,
|
24 |
-
) -> Union[ImagePipelineOutput, Tuple]:
|
25 |
-
# Sample gaussian noise to begin loop
|
26 |
-
if isinstance(self.unet.model.sample_size, int):
|
27 |
-
image_shape = (
|
28 |
-
batch_size,
|
29 |
-
self.unet.model.in_channels,
|
30 |
-
self.unet.model.sample_size,
|
31 |
-
self.unet.model.sample_size,
|
32 |
-
)
|
33 |
-
else:
|
34 |
-
image_shape = (
|
35 |
-
batch_size,
|
36 |
-
self.unet.model.in_channels,
|
37 |
-
*self.unet.model.sample_size,
|
38 |
-
)
|
39 |
-
|
40 |
-
image = randn_tensor(image_shape, generator=generator)
|
41 |
-
|
42 |
-
# set step values
|
43 |
-
self.scheduler.set_timesteps(num_inference_steps)
|
44 |
-
|
45 |
-
for t in self.progress_bar(self.scheduler.timesteps):
|
46 |
-
# 1. predict noise model_output
|
47 |
-
model_output = self.unet(image, t, label).sample
|
48 |
-
|
49 |
-
# 2. compute previous image: x_t -> x_t-1
|
50 |
-
image = self.scheduler.step(
|
51 |
-
model_output, t, image, generator=generator
|
52 |
-
).prev_sample
|
53 |
-
|
54 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
55 |
-
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
56 |
-
if output_type == "pil":
|
57 |
-
image = self.numpy_to_pil(image)
|
58 |
-
|
59 |
-
if not return_dict:
|
60 |
-
return (image,)
|
61 |
-
|
62 |
-
return ImagePipelineOutput(images=image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
unet/config.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DModel",
|
3 |
+
"_diffusers_version": "0.28.0",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"add_attention": true,
|
6 |
+
"attention_head_dim": 8,
|
7 |
+
"attn_norm_num_groups": null,
|
8 |
+
"block_out_channels": [
|
9 |
+
32,
|
10 |
+
64,
|
11 |
+
64
|
12 |
+
],
|
13 |
+
"center_input_sample": false,
|
14 |
+
"class_embed_type": null,
|
15 |
+
"down_block_types": [
|
16 |
+
"DownBlock2D",
|
17 |
+
"AttnDownBlock2D",
|
18 |
+
"AttnDownBlock2D"
|
19 |
+
],
|
20 |
+
"downsample_padding": 1,
|
21 |
+
"downsample_type": "conv",
|
22 |
+
"dropout": 0.0,
|
23 |
+
"flip_sin_to_cos": true,
|
24 |
+
"freq_shift": 0,
|
25 |
+
"in_channels": 1,
|
26 |
+
"layers_per_block": 2,
|
27 |
+
"mid_block_scale_factor": 1,
|
28 |
+
"norm_eps": 1e-05,
|
29 |
+
"norm_num_groups": 32,
|
30 |
+
"num_class_embeds": 10,
|
31 |
+
"num_train_timesteps": null,
|
32 |
+
"out_channels": 1,
|
33 |
+
"resnet_time_scale_shift": "default",
|
34 |
+
"sample_size": 32,
|
35 |
+
"time_embedding_type": "positional",
|
36 |
+
"up_block_types": [
|
37 |
+
"AttnUpBlock2D",
|
38 |
+
"AttnUpBlock2D",
|
39 |
+
"UpBlock2D"
|
40 |
+
],
|
41 |
+
"upsample_type": "conv"
|
42 |
+
}
|
unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a58e881a5db3333ead19d140b55f8744ab337934c05d4cd30ddc3dc7a5734415
|
3 |
+
size 6865780
|