un1xx commited on
Commit
ba92c7e
·
1 Parent(s): 1ad8b6e

Delete marin-kitagawa_config

Browse files
marin-kitagawa_config/config_file.toml DELETED
@@ -1,54 +0,0 @@
1
- [model_arguments]
2
- v2 = false
3
- v_parameterization = false
4
- pretrained_model_name_or_path = "/content/pretrained_model/Anything-v3-1.safetensors"
5
- vae = "/content/vae/anime.vae.pt"
6
-
7
- [additional_network_arguments]
8
- no_metadata = false
9
- unet_lr = 0.0005
10
- text_encoder_lr = 6e-5
11
- network_module = "networks.lora"
12
- network_dim = 8
13
- network_alpha = 16
14
- network_train_unet_only = false
15
- network_train_text_encoder_only = false
16
-
17
- [optimizer_arguments]
18
- optimizer_type = "AdamW8bit"
19
- learning_rate = 0.0005
20
- max_grad_norm = 1.0
21
- lr_scheduler = "cosine_with_restarts"
22
- lr_warmup_steps = 0
23
- lr_scheduler_num_cycles = 0
24
-
25
- [dataset_arguments]
26
- debug_dataset = false
27
-
28
- [training_arguments]
29
- output_dir = "/content/drive/MyDrive/LoRA/output"
30
- output_name = "marin-kitagawa"
31
- save_precision = "fp16"
32
- save_every_n_epochs = 1
33
- train_batch_size = 6
34
- max_token_length = 225
35
- mem_eff_attn = false
36
- xformers = true
37
- max_train_epochs = 10
38
- max_data_loader_n_workers = 8
39
- persistent_data_loader_workers = true
40
- gradient_checkpointing = false
41
- gradient_accumulation_steps = 1
42
- mixed_precision = "fp16"
43
- clip_skip = 2
44
- logging_dir = "/content/LoRA/logs"
45
- log_prefix = "marin-kitagawa"
46
- noise_offset = 0.001
47
- lowram = true
48
-
49
- [sample_prompt_arguments]
50
- sample_every_n_epochs = 999999
51
- sample_sampler = "ddim"
52
-
53
- [saving_arguments]
54
- save_model_as = "safetensors"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
marin-kitagawa_config/dataset_config.toml DELETED
@@ -1,15 +0,0 @@
1
- [[datasets]]
2
- resolution = 512
3
- caption_dropout_rate = 0
4
- caption_tag_dropout_rate = 0.2
5
- caption_dropout_every_n_epochs = 0
6
- color_aug = false
7
- [[datasets.subsets]]
8
- image_dir = "/content/LoRA/train_data"
9
- num_repeats = 4
10
- metadata_file = "/content/LoRA/meta_lat.json"
11
-
12
-
13
- [general]
14
- shuffle_caption = true
15
- keep_tokens = 1