Brasd99 commited on
Commit
10076ea
·
1 Parent(s): 8456408

Refactoring

Browse files
Files changed (3) hide show
  1. app.py +15 -13
  2. config.json +5 -0
  3. helpers/processor.py +23 -23
app.py CHANGED
@@ -1,27 +1,29 @@
1
- import gradio as gr
2
  import os
3
- import wget
4
  import subprocess
5
- subprocess.call(['pip', 'install', 'git+https://github.com/facebookresearch/detectron2@main#subdirectory=projects/DensePose'])
 
 
 
 
6
  from helpers.processor import TextureProcessor
7
 
8
- def image_processing(person_img, model_img):
 
 
9
  return texture_processor.extract(person_img, model_img)
10
 
11
- def load_model(current_path):
12
  data_path = os.path.join(current_path, 'data')
13
  if not os.path.isdir(data_path):
14
  os.mkdir(data_path)
15
- loader_dict = {
16
- 'config.yaml': 'https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1M_s1x.yaml',
17
- 'weights.pkl': 'https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_WC1M_s1x/217144516/model_final_48a9d9.pkl',
18
- 'Base-DensePose-RCNN-FPN.yaml': 'https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/Base-DensePose-RCNN-FPN.yaml'
19
- }
20
- for filename, url in loader_dict.items():
21
  wget.download(url, os.path.join(data_path, filename))
22
 
 
 
 
23
  current_path = os.getcwd()
24
- load_model(current_path)
25
  densepose_config = os.path.join(current_path, 'data', 'config.yaml')
26
  densepose_weights = os.path.join(current_path, 'data', 'weights.pkl')
27
 
@@ -29,7 +31,7 @@ texture_processor = TextureProcessor(densepose_config, densepose_weights)
29
 
30
  title = '<h1 style="text-align:center">JustClothify</h1>'
31
 
32
- with gr.Blocks(theme='soft', title='AnswerMate') as blocks:
33
  gr.HTML(title)
34
  gr.Markdown('Upload an image of a person and an image of a model with clothes, the system will generate an image of a person wearing these clothes.')
35
  with gr.Row():
 
 
1
  import os
 
2
  import subprocess
3
+ from typing import Dict
4
+ import json
5
+ import numpy as np
6
+ import wget
7
+ import gradio as gr
8
  from helpers.processor import TextureProcessor
9
 
10
+ #subprocess.call(['pip', 'install', 'git+https://github.com/facebookresearch/detectron2@main#subdirectory=projects/DensePose'])
11
+
12
+ def image_processing(person_img: np.ndarray, model_img: np.ndarray) -> np.ndarray:
13
  return texture_processor.extract(person_img, model_img)
14
 
15
+ def load_model(current_path: str, config: Dict) -> None:
16
  data_path = os.path.join(current_path, 'data')
17
  if not os.path.isdir(data_path):
18
  os.mkdir(data_path)
19
+ for filename, url in config.items():
 
 
 
 
 
20
  wget.download(url, os.path.join(data_path, filename))
21
 
22
+ with open("config.json", "r") as f:
23
+ config = json.load(f)
24
+
25
  current_path = os.getcwd()
26
+ load_model(current_path, config)
27
  densepose_config = os.path.join(current_path, 'data', 'config.yaml')
28
  densepose_weights = os.path.join(current_path, 'data', 'weights.pkl')
29
 
 
31
 
32
  title = '<h1 style="text-align:center">JustClothify</h1>'
33
 
34
+ with gr.Blocks(theme='soft', title='JustClothify') as blocks:
35
  gr.HTML(title)
36
  gr.Markdown('Upload an image of a person and an image of a model with clothes, the system will generate an image of a person wearing these clothes.')
37
  with gr.Row():
config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "config.yaml": "https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1M_s1x.yaml",
3
+ "weights.pkl": "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_WC1M_s1x/217144516/model_final_48a9d9.pkl",
4
+ "Base-DensePose-RCNN-FPN.yaml": "https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/Base-DensePose-RCNN-FPN.yaml"
5
+ }
helpers/processor.py CHANGED
@@ -1,32 +1,26 @@
 
1
  import cv2
2
  import imageio
3
  import numpy as np
4
  import torch
5
- import io
6
-
7
  from detectron2.config import get_cfg
8
  from detectron2.engine.defaults import DefaultPredictor
9
  from detectron2.structures.instances import Instances
10
-
11
  from densepose import add_densepose_config
12
  from densepose.vis.base import CompoundVisualizer
13
  from densepose.vis.densepose_outputs_vertex import get_texture_atlases
14
- from densepose.vis.densepose_results_textures import (
15
- DensePoseResultsVisualizerWithTexture as dp_iuv_texture
16
- )
17
- from densepose.vis.extractor import (
18
- CompoundExtractor,
19
- create_extractor,
20
- DensePoseResultExtractor
21
- )
22
 
23
  class TextureProcessor:
24
- def __init__(self, config, weights):
25
  self.config = self.get_config(config, weights)
26
  self.predictor = DefaultPredictor(self.config)
27
  self.extractor = DensePoseResultExtractor()
28
 
29
- def process_texture(self, image):
30
  image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
31
  output = self.execute(image)
32
  if 'pred_densepose' in output:
@@ -44,7 +38,7 @@ class TextureProcessor:
44
  texture_atlas = self.process_texture(model_img)
45
  return self.overlay_texture(texture_atlas, person_img)
46
 
47
- def overlay_texture(self, texture_atlas, original_image):
48
  texture_atlas[:, :, :3] = texture_atlas[:, :, 2::-1]
49
  texture_atlases_dict = get_texture_atlases(None)
50
  vis = dp_iuv_texture(
@@ -68,17 +62,17 @@ class TextureProcessor:
68
 
69
  return image_vis
70
 
71
- def parse_iuv(self, result):
72
  i = result['pred_densepose'][0].labels.cpu().numpy().astype(float)
73
  uv = (result['pred_densepose'][0].uv.cpu().numpy() * 255.0).astype(float)
74
  iuv = np.stack((uv[1, :, :], uv[0, :, :], i))
75
  iuv = np.transpose(iuv, (1, 2, 0))
76
  return iuv
77
 
78
- def parse_bbox(self, result):
79
  return result['pred_boxes_XYXY'][0].cpu().numpy()
80
 
81
- def interpolate_tex(self, tex):
82
  valid_mask = np.array((tex.sum(0) != 0) * 1, dtype='uint8')
83
  radius_increase = 10
84
  kernel = np.ones((radius_increase, radius_increase), np.uint8)
@@ -96,12 +90,18 @@ class TextureProcessor:
96
 
97
  return actual_part
98
 
99
- def concat_textures(self, array):
100
  texture_rows = [np.concatenate(array[i:i+6], axis=1) for i in range(0, 24, 6)]
101
  texture = np.concatenate(texture_rows, axis=0)
102
  return texture
103
 
104
- def get_texture(self, im, iuv, bbox, tex_part_size=200):
 
 
 
 
 
 
105
  im = im.transpose(2, 1, 0) / 255
106
  image_w, image_h = im.shape[1], im.shape[2]
107
  bbox[2] = bbox[2] - bbox[0]
@@ -142,14 +142,14 @@ class TextureProcessor:
142
 
143
  return tex
144
 
145
- def create_iuv(self, results, image):
146
  iuv = self.parse_iuv(results)
147
  bbox = self.parse_bbox(results)
148
  uv_texture = self.get_texture(image, iuv, bbox)
149
  uv_texture = uv_texture.transpose([1, 0, 2])
150
  return uv_texture
151
 
152
- def get_config(self, config_fpath, model_fpath):
153
  cfg = get_cfg()
154
  add_densepose_config(cfg)
155
  cfg.merge_from_file(config_fpath)
@@ -158,12 +158,12 @@ class TextureProcessor:
158
  cfg.freeze()
159
  return cfg
160
 
161
- def execute(self, image):
162
  with torch.no_grad():
163
  outputs = self.predictor(image)['instances']
164
  return self.execute_on_outputs(outputs)
165
 
166
- def execute_on_outputs(self, outputs: Instances):
167
  result = {}
168
  if outputs.has('scores'):
169
  result['scores'] = outputs.get('scores').cpu()
 
1
+ import io
2
  import cv2
3
  import imageio
4
  import numpy as np
5
  import torch
6
+ from typing import Dict, List
7
+ from fvcore.common.config import CfgNode
8
  from detectron2.config import get_cfg
9
  from detectron2.engine.defaults import DefaultPredictor
10
  from detectron2.structures.instances import Instances
 
11
  from densepose import add_densepose_config
12
  from densepose.vis.base import CompoundVisualizer
13
  from densepose.vis.densepose_outputs_vertex import get_texture_atlases
14
+ from densepose.vis.densepose_results_textures import DensePoseResultsVisualizerWithTexture as dp_iuv_texture
15
+ from densepose.vis.extractor import CompoundExtractor, create_extractor, DensePoseResultExtractor
 
 
 
 
 
 
16
 
17
  class TextureProcessor:
18
+ def __init__(self, config: str, weights: str) -> None:
19
  self.config = self.get_config(config, weights)
20
  self.predictor = DefaultPredictor(self.config)
21
  self.extractor = DensePoseResultExtractor()
22
 
23
+ def process_texture(self, image: np.ndarray) -> np.ndarray:
24
  image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
25
  output = self.execute(image)
26
  if 'pred_densepose' in output:
 
38
  texture_atlas = self.process_texture(model_img)
39
  return self.overlay_texture(texture_atlas, person_img)
40
 
41
+ def overlay_texture(self, texture_atlas: np.ndarray, original_image: np.ndarray) -> np.ndarray:
42
  texture_atlas[:, :, :3] = texture_atlas[:, :, 2::-1]
43
  texture_atlases_dict = get_texture_atlases(None)
44
  vis = dp_iuv_texture(
 
62
 
63
  return image_vis
64
 
65
+ def parse_iuv(self, result: Dict) -> np.ndarray:
66
  i = result['pred_densepose'][0].labels.cpu().numpy().astype(float)
67
  uv = (result['pred_densepose'][0].uv.cpu().numpy() * 255.0).astype(float)
68
  iuv = np.stack((uv[1, :, :], uv[0, :, :], i))
69
  iuv = np.transpose(iuv, (1, 2, 0))
70
  return iuv
71
 
72
+ def parse_bbox(self, result: Dict) -> np.ndarray:
73
  return result['pred_boxes_XYXY'][0].cpu().numpy()
74
 
75
+ def interpolate_tex(self, tex: np.ndarray) -> np.ndarray:
76
  valid_mask = np.array((tex.sum(0) != 0) * 1, dtype='uint8')
77
  radius_increase = 10
78
  kernel = np.ones((radius_increase, radius_increase), np.uint8)
 
90
 
91
  return actual_part
92
 
93
+ def concat_textures(self, array: List[np.ndarray]) -> np.ndarray:
94
  texture_rows = [np.concatenate(array[i:i+6], axis=1) for i in range(0, 24, 6)]
95
  texture = np.concatenate(texture_rows, axis=0)
96
  return texture
97
 
98
+ def get_texture(
99
+ self,
100
+ im: np.ndarray,
101
+ iuv: np.ndarray,
102
+ bbox: List[int],
103
+ tex_part_size: int = 200) -> np.ndarray:
104
+
105
  im = im.transpose(2, 1, 0) / 255
106
  image_w, image_h = im.shape[1], im.shape[2]
107
  bbox[2] = bbox[2] - bbox[0]
 
142
 
143
  return tex
144
 
145
+ def create_iuv(self, results: Dict, image: np.ndarray) -> np.ndarray:
146
  iuv = self.parse_iuv(results)
147
  bbox = self.parse_bbox(results)
148
  uv_texture = self.get_texture(image, iuv, bbox)
149
  uv_texture = uv_texture.transpose([1, 0, 2])
150
  return uv_texture
151
 
152
+ def get_config(self, config_fpath: str, model_fpath: str) -> CfgNode:
153
  cfg = get_cfg()
154
  add_densepose_config(cfg)
155
  cfg.merge_from_file(config_fpath)
 
158
  cfg.freeze()
159
  return cfg
160
 
161
+ def execute(self, image: np.ndarray) -> Dict:
162
  with torch.no_grad():
163
  outputs = self.predictor(image)['instances']
164
  return self.execute_on_outputs(outputs)
165
 
166
+ def execute_on_outputs(self, outputs: Instances) -> Dict:
167
  result = {}
168
  if outputs.has('scores'):
169
  result['scores'] = outputs.get('scores').cpu()