File size: 2,163 Bytes
42d27cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# Licensed under CC BY-NC-SA 4.0 (Attribution-NonCommercial-ShareAlike 4.0 International) (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
#
# The code is released for academic research use only. For commercial use, please contact Huawei Technologies Co., Ltd.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains content licensed by https://github.com/xinntao/BasicSR/blob/master/LICENSE/LICENSE

#### general settings
name: train
use_tb_logger: true
model: SR
distortion: sr
scale: 8
gpu_ids: [ 0 ]

#### datasets
datasets:
  train:
    name: CelebA_160_tr
    mode: LRHR_PKL
    dataroot_GT: ../datasets/DF2K-train-gt_1pct.pklv4
    dataroot_LQ: ../datasets/DF2K-train-x8_1pct.pklv4
    quant: 32

    use_shuffle: true
    n_workers: 3  # per GPU
    batch_size: 16
    GT_size: 160
    use_flip: true
    color: RGB

  val:
    name: CelebA_160_va
    mode: LRHR_PKL
    dataroot_GT: ../datasets/DF2K-valid-gt_1pct.pklv4
    dataroot_LQ: ../datasets/DF2K-valid-x8_1pct.pklv4
    quant: 32
    n_max: 20

#### network structures
network_G:
  which_model_G: RRDBNet
  in_nc: 3
  out_nc: 3
  nf: 64
  nb: 23

#### path
path:
  pretrain_model_G: ~
  strict_load: true
  resume_state: auto

#### training settings: learning rate scheme, loss
train:
  lr_G: !!float 2e-4
  lr_scheme: CosineAnnealingLR_Restart
  beta1: 0.9
  beta2: 0.99
  niter: 200000
  warmup_iter: -1  # no warm up
  T_period: [ 50000, 50000, 50000, 50000 ]
  restarts: [ 50000, 100000, 150000 ]
  restart_weights: [ 1, 1, 1 ]
  eta_min: !!float 1e-7

  pixel_criterion: l1
  pixel_weight: 1.0

  manual_seed: 10
  val_freq: !!float 5e3

#### logger
logger:
  print_freq: 100
  save_checkpoint_freq: !!float 1e3