Siyun He commited on
Commit
d04b2ef
·
1 Parent(s): 35c6cce

add transform effect

Browse files
Files changed (2) hide show
  1. __pycache__/app.cpython-311.pyc +0 -0
  2. app.py +55 -6
__pycache__/app.cpython-311.pyc CHANGED
Binary files a/__pycache__/app.cpython-311.pyc and b/__pycache__/app.cpython-311.pyc differ
 
app.py CHANGED
@@ -88,18 +88,67 @@ def process_frame(frame):
88
 
89
  return frame
90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  # Gradio webcam input
92
- def webcam_input(frame):
93
  frame = process_frame(frame)
 
94
  return frame
95
 
96
  # Gradio Interface
97
  with gr.Blocks() as demo:
98
- with gr.Row():
99
- with gr.Column():
100
- input_img = gr.Image(label="Input", sources="webcam", streaming=True)
 
 
101
  next_button = gr.Button("Next Glasses")
102
- input_img.stream(webcam_input, [input_img], [input_img], stream_every=0.1, concurrency_limit=30)
 
103
  next_button.click(change_glasses, [], [])
104
  if __name__ == "__main__":
105
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
  return frame
90
 
91
+ # Transform function
92
+ def transform_cv2(frame, transform):
93
+ if transform == "cartoon":
94
+ # prepare color
95
+ img_color = cv2.pyrDown(cv2.pyrDown(frame))
96
+ for _ in range(6):
97
+ img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
98
+ img_color = cv2.pyrUp(cv2.pyrUp(img_color))
99
+
100
+ # prepare edges
101
+ img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
102
+ img_edges = cv2.adaptiveThreshold(
103
+ cv2.medianBlur(img_edges, 7),
104
+ 255,
105
+ cv2.ADAPTIVE_THRESH_MEAN_C,
106
+ cv2.THRESH_BINARY,
107
+ 9,
108
+ 2,
109
+ )
110
+ img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
111
+ # combine color and edges
112
+ img = cv2.bitwise_and(img_color, img_edges)
113
+ return img
114
+ elif transform == "edges":
115
+ # perform edge detection
116
+ img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)
117
+ return img
118
+ else:
119
+ return frame
120
+
121
+
122
+
123
  # Gradio webcam input
124
+ def webcam_input(frame, transform):
125
  frame = process_frame(frame)
126
+ frame = transform_cv2(frame, transform)
127
  return frame
128
 
129
  # Gradio Interface
130
  with gr.Blocks() as demo:
131
+ with gr.Column(elem_classes=["my-column"]):
132
+ with gr.Group(elem_classes=["my-group"]):
133
+ transform = gr.Dropdown(choices=["cartoon", "edges", "none"],
134
+ value="none", label="Transformation")
135
+ input_img = gr.Image(sources=["webcam"], type="numpy", streaming=True)
136
  next_button = gr.Button("Next Glasses")
137
+ input_img.stream(webcam_input, [input_img, transform], [input_img], time_limit=30, stream_every=0.1)
138
+
139
  next_button.click(change_glasses, [], [])
140
  if __name__ == "__main__":
141
+ demo.launch(share=True)
142
+
143
+
144
+
145
+ # # Gradio Interface
146
+ # with gr.Blocks() as demo:
147
+ # with gr.Row():
148
+ # with gr.Column():
149
+ # input_img = gr.Image(label="Input", sources="webcam", streaming=True)
150
+ # next_button = gr.Button("Next Glasses")
151
+ # input_img.stream(webcam_input, [input_img], [input_img], stream_every=0.1, concurrency_limit=30)
152
+ # next_button.click(change_glasses, [], [])
153
+ # if __name__ == "__main__":
154
+ # demo.launch(share=True)