nehulagrawal's picture
Upload 11 files
8209de3 verified
#This is an example that uses the websockets api to know when a prompt execution is done
#Once the prompt execution is done it downloads the images using the /history endpoint
import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
import uuid
import json
import urllib.request
import urllib.parse
server_address = "127.0.0.1:8188"
client_id = str(uuid.uuid4())
def queue_prompt(prompt):
p = {"prompt": prompt, "client_id": client_id}
data = json.dumps(p).encode('utf-8')
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
return json.loads(urllib.request.urlopen(req).read())
def get_image(filename, subfolder, folder_type):
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
url_values = urllib.parse.urlencode(data)
with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
return response.read()
def get_history(prompt_id):
with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
return json.loads(response.read())
def get_images(ws, prompt):
prompt_id = queue_prompt(prompt)['prompt_id']
output_images = {}
while True:
out = ws.recv()
if isinstance(out, str):
message = json.loads(out)
if message['type'] == 'executing':
data = message['data']
if data['node'] is None and data['prompt_id'] == prompt_id:
break #Execution is done
else:
continue #previews are binary data
history = get_history(prompt_id)[prompt_id]
for node_id in history['outputs']:
node_output = history['outputs'][node_id]
images_output = []
if 'images' in node_output:
for image in node_output['images']:
image_data = get_image(image['filename'], image['subfolder'], image['type'])
images_output.append(image_data)
output_images[node_id] = images_output
return output_images
prompt_text = """
{
"1": {
"inputs": {
"image": "/home/ml/Desktop/comfy_to_python/output.jpg",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"2": {
"inputs": {
"image": "/home/ml/Desktop/comfy_to_python/me.jpg",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"4": {
"inputs": {
"images": [
"5",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"5": {
"inputs": {
"enabled": true,
"swap_model": "inswapper_128.onnx",
"facedetection": "YOLOv5l",
"face_restore_model": "none",
"face_restore_visibility": 1,
"codeformer_weight": 1,
"detect_gender_input": "no",
"detect_gender_source": "no",
"input_faces_index": "0",
"source_faces_index": "0",
"console_log_level": 1,
"input_image": [
"1",
0
],
"source_image": [
"2",
0
]
},
"class_type": "ReActorFaceSwap",
"_meta": {
"title": "ReActor ๐ŸŒŒ Fast Face Swap"
}
}
}
"""
prompt = json.loads(prompt_text)
# #set the text prompt for our positive CLIPTextEncode
# prompt["6"]["inputs"]["text"] = "your instruction here"
prompt["1"]["inputs"]["image"] = "/home/ml/Desktop/comfy_to_python/66.jpg"
prompt["2"]["inputs"]["image"] = "/home/ml/Desktop/comfy_to_python/me.jpg"
# # If you have a group input face image change the number here (1,2,3,..) if single then put 0.
# prompt["5"]["inputs"]["input_faces_index"] = ""
# # If you have a group source face image change the number here (1,2,3,..) if single then put 0.
# prompt["5"]["inputs"]["source_faces_index"] = ""
ws = websocket.WebSocket()
ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
images = get_images(ws, prompt)
# Commented out code to display the output images:
for node_id in images:
for image_data in images[node_id]:
from PIL import Image
import io
image = Image.open(io.BytesIO(image_data))
image.save("output1.jpg")
# image.show()