|
{ |
|
"1": { |
|
"inputs": { |
|
"model_name": "sam_hq_vit_h (2.57GB)" |
|
}, |
|
"class_type": "SAMModelLoader (segment anything)", |
|
"_meta": { |
|
"title": "SAMModelLoader (segment anything)" |
|
} |
|
}, |
|
"2": { |
|
"inputs": { |
|
"model_name": "GroundingDINO_SwinB (938MB)" |
|
}, |
|
"class_type": "GroundingDinoModelLoader (segment anything)", |
|
"_meta": { |
|
"title": "GroundingDinoModelLoader (segment anything)" |
|
} |
|
}, |
|
"3": { |
|
"inputs": { |
|
"prompt": "t-shirt", |
|
"threshold": 0.5, |
|
"sam_model": [ |
|
"1", |
|
0 |
|
], |
|
"grounding_dino_model": [ |
|
"2", |
|
0 |
|
], |
|
"image": [ |
|
"4", |
|
0 |
|
] |
|
}, |
|
"class_type": "GroundingDinoSAMSegment (segment anything)", |
|
"_meta": { |
|
"title": "GroundingDinoSAMSegment (segment anything)" |
|
} |
|
}, |
|
"4": { |
|
"inputs": { |
|
"image": "00079-2293829935.png", |
|
"upload": "image" |
|
}, |
|
"class_type": "LoadImage", |
|
"_meta": { |
|
"title": "Load Image" |
|
} |
|
}, |
|
"5": { |
|
"inputs": { |
|
"model": "densepose_r50_fpn_dl.torchscript", |
|
"cmap": "Parula (CivitAI)", |
|
"resolution": 832, |
|
"image": [ |
|
"4", |
|
0 |
|
] |
|
}, |
|
"class_type": "DensePosePreprocessor", |
|
"_meta": { |
|
"title": "DensePose Estimator" |
|
} |
|
}, |
|
"6": { |
|
"inputs": { |
|
"image": "awrwS6PNov44MbWO.webp", |
|
"upload": "image" |
|
}, |
|
"class_type": "LoadImage", |
|
"_meta": { |
|
"title": "Load Image" |
|
} |
|
}, |
|
"9": { |
|
"inputs": { |
|
"mask": [ |
|
"3", |
|
1 |
|
] |
|
}, |
|
"class_type": "MaskToImage", |
|
"_meta": { |
|
"title": "Convert Mask to Image" |
|
} |
|
}, |
|
"10": { |
|
"inputs": { |
|
"images": [ |
|
"13", |
|
0 |
|
] |
|
}, |
|
"class_type": "PreviewImage", |
|
"_meta": { |
|
"title": "Preview Image" |
|
} |
|
}, |
|
"13": { |
|
"inputs": { |
|
"garment_description": "model is wearing a tee-shirt", |
|
"negative_prompt": "nsfw, naked, nudity, worst quality, low quality, poor quality, normal quality, depth of field, jpeg artefacts, jpeg, compression artefacts, chromatic aberration, diffusion, diffraction, distortion, noise, gaussian, blurry, cropped", |
|
"width": 832, |
|
"height": 1216, |
|
"num_inference_steps": 30, |
|
"guidance_scale": 2, |
|
"strength": 1, |
|
"seed": 615869150627749, |
|
"pipeline": [ |
|
"15", |
|
0 |
|
], |
|
"human_img": [ |
|
"4", |
|
0 |
|
], |
|
"pose_img": [ |
|
"5", |
|
0 |
|
], |
|
"mask_img": [ |
|
"9", |
|
0 |
|
], |
|
"garment_img": [ |
|
"6", |
|
0 |
|
] |
|
}, |
|
"class_type": "IDM-VTON", |
|
"_meta": { |
|
"title": "Run IDM-VTON Inference" |
|
} |
|
}, |
|
"15": { |
|
"inputs": { |
|
"ckpt_path": "yisol/IDM-VTON", |
|
"weight_dtype": "float16" |
|
}, |
|
"class_type": "PipelineLoader", |
|
"_meta": { |
|
"title": "Load IDM-VTON Pipeline" |
|
} |
|
} |
|
} |