|
import streamlit as st |
|
from PIL import Image |
|
import cv2 |
|
import numpy as np |
|
|
|
def diff(image_file1, image_file2): |
|
TYPE = st.sidebar.selectbox("可視化手法の選択", ["矩形", "点群"]) |
|
if (TYPE == "矩形"): |
|
rectangle_diff(image_file1, image_file2) |
|
elif (TYPE == "点群"): |
|
point_diff(image_file1, image_file2) |
|
|
|
def point_diff(image_file1, image_file2): |
|
image1 = Image.open(image_file1) |
|
image2 = Image.open(image_file2) |
|
|
|
col1, col2 = st.columns(2) |
|
diff_Thresholds = st.sidebar.slider("差分の閾値処理", 10, 255, 50) |
|
with col1: |
|
st.image(image1, caption='1枚目の画像', use_column_width=True) |
|
|
|
with col2: |
|
st.image(image2, caption='2枚目の画像', use_column_width=True) |
|
|
|
image1_cv = cv2.cvtColor(np.array(image1), cv2.COLOR_RGB2BGR) |
|
image2_cv = cv2.cvtColor(np.array(image2), cv2.COLOR_RGB2BGR) |
|
|
|
|
|
gray1 = cv2.cvtColor(image1_cv, cv2.COLOR_BGR2GRAY) |
|
gray2 = cv2.cvtColor(image2_cv, cv2.COLOR_BGR2GRAY) |
|
|
|
sift = cv2.SIFT_create() |
|
|
|
keypoints1, descriptors1 = sift.detectAndCompute(gray1, None) |
|
keypoints2, descriptors2 = sift.detectAndCompute(gray2, None) |
|
|
|
FLANN_INDEX_KDTREE = 1 |
|
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) |
|
search_params = dict(checks=50) |
|
flann = cv2.FlannBasedMatcher(index_params, search_params) |
|
|
|
matches = flann.knnMatch(descriptors1, descriptors2, k=2) |
|
|
|
good_matches = [] |
|
for m, n in matches: |
|
if m.distance < 0.8 * n.distance: |
|
good_matches.append(m) |
|
|
|
if len(good_matches) > 4: |
|
src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2) |
|
dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2) |
|
|
|
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) |
|
|
|
height, width, channels = image2_cv.shape |
|
transformed_img = cv2.warpPerspective(image1_cv, H, (width, height)) |
|
|
|
transformed_gray = cv2.cvtColor(transformed_img, cv2.COLOR_BGR2GRAY) |
|
|
|
img_diff = cv2.absdiff(transformed_gray, gray2) |
|
|
|
_, img_th = cv2.threshold(img_diff, diff_Thresholds, 255, cv2.THRESH_BINARY) |
|
|
|
kernel = np.ones((3, 3), np.uint8) |
|
img_th = cv2.morphologyEx(img_th, cv2.MORPH_OPEN, kernel, iterations=2) |
|
img_th = cv2.dilate(img_th, kernel, iterations=1) |
|
|
|
points = np.column_stack(np.where(img_th > 0.00000001)) |
|
|
|
for point in points: |
|
cv2.circle(transformed_img, (point[1], point[0]), 2, (0, 0, 255), -1) |
|
|
|
st.image(cv2.cvtColor(transformed_img, cv2.COLOR_BGR2RGB), caption='変換後の画像と差異', use_column_width=True) |
|
|
|
def merge(rectangles, image_area, dist_threshold=10, diff_rate=0.5): |
|
merged_rec = [] |
|
|
|
used = [False] * len(rectangles) |
|
|
|
for i, rect1 in enumerate(rectangles): |
|
if used[i]: |
|
continue |
|
|
|
x1, y1, w1, h1 = rect1 |
|
area1 = w1 * h1 |
|
merged = False |
|
|
|
for j, rect2 in enumerate(rectangles): |
|
if i == j or used[j]: |
|
continue |
|
|
|
x2, y2, w2, h2 = rect2 |
|
area2 = w2 * h2 |
|
|
|
center1 = np.array([x1 + w1 / 2, y1 + h1 / 2]) |
|
center2 = np.array([x2 + w2 / 2, y2 + h2 / 2]) |
|
distance = np.linalg.norm(center1 - center2) |
|
|
|
if distance < dist_threshold and abs(area1 - area2) < diff_rate * max(area1, area2): |
|
new_x = min(x1, x2) |
|
new_y = min(y1, y2) |
|
new_w = max(x1 + w1, x2 + w2) - new_x |
|
new_h = max(y1 + h1, y2 + h2) - new_y |
|
merged_rec.append((new_x, new_y, new_w, new_h)) |
|
used[i] = used[j] = True |
|
merged = True |
|
break |
|
|
|
if not merged: |
|
merged_rec.append(rect1) |
|
|
|
filter_rect = [] |
|
for rect in merged_rec: |
|
x, y, w, h = rect |
|
area = w * h |
|
ok = True |
|
|
|
if area >= (1/3) * image_area: |
|
ok = False |
|
|
|
for other_rect in merged_rec: |
|
if rect == other_rect: |
|
continue |
|
ox, oy, ow, oh = other_rect |
|
other_area = ow * oh |
|
if area < other_area and abs(area - other_area) > diff_rate * max(area, other_area): |
|
ok = False |
|
break |
|
|
|
if ok: |
|
filter_rect.append(rect) |
|
|
|
return filter_rect |
|
|
|
def rectangle_diff(image_file1, image_file2): |
|
image1 = Image.open(image_file1) |
|
image2 = Image.open(image_file2) |
|
|
|
col1, col2 = st.columns(2) |
|
diff_Thresholds = st.sidebar.slider("差分の閾値処理", 10, 255, 50) |
|
distance_threshold = st.sidebar.slider("矩形の結合距離", 1, 50, 10) |
|
size_difference_ratio = st.sidebar.slider("サイズ差異の割合", 0.0, 1.0, 0.5) |
|
|
|
with col1: |
|
st.image(image1, caption='1枚目の画像', use_column_width=True) |
|
|
|
with col2: |
|
st.image(image2, caption='2枚目の画像', use_column_width=True) |
|
|
|
image1_cv = cv2.cvtColor(np.array(image1), cv2.COLOR_RGB2BGR) |
|
image2_cv = cv2.cvtColor(np.array(image2), cv2.COLOR_RGB2BGR) |
|
|
|
gray1 = cv2.cvtColor(image1_cv, cv2.COLOR_BGR2GRAY) |
|
gray2 = cv2.cvtColor(image2_cv, cv2.COLOR_BGR2GRAY) |
|
|
|
sift = cv2.SIFT_create() |
|
|
|
keypoints1, descriptors1 = sift.detectAndCompute(gray1, None) |
|
keypoints2, descriptors2 = sift.detectAndCompute(gray2, None) |
|
|
|
FLANN_INDEX_KDTREE = 1 |
|
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) |
|
search_params = dict(checks=50) |
|
flann = cv2.FlannBasedMatcher(index_params, search_params) |
|
|
|
matches = flann.knnMatch(descriptors1, descriptors2, k=2) |
|
|
|
good_matches = [] |
|
for m, n in matches: |
|
if m.distance < 0.8 * n.distance: |
|
good_matches.append(m) |
|
|
|
if len(good_matches) > 4: |
|
src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2) |
|
dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2) |
|
|
|
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) |
|
|
|
height, width, channels = image2_cv.shape |
|
transformed_img = cv2.warpPerspective(image1_cv, H, (width, height)) |
|
|
|
transformed_gray = cv2.cvtColor(transformed_img, cv2.COLOR_BGR2GRAY) |
|
|
|
img_diff = cv2.absdiff(transformed_gray, gray2) |
|
|
|
_, img_th = cv2.threshold(img_diff, diff_Thresholds, 255, cv2.THRESH_BINARY) |
|
|
|
kernel = np.ones((3, 3), np.uint8) |
|
img_th = cv2.morphologyEx(img_th, cv2.MORPH_OPEN, kernel, iterations=2) |
|
img_th = cv2.dilate(img_th, kernel, iterations=1) |
|
|
|
contours, _ = cv2.findContours(img_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
|
|
|
rectangles = [cv2.boundingRect(contour) for contour in contours] |
|
|
|
image_area = height * width |
|
filtered_rectangles = merge(rectangles, image_area, distance_threshold, size_difference_ratio) |
|
|
|
for x, y, w, h in filtered_rectangles: |
|
cv2.rectangle(transformed_img, (x, y), (x + w, y + h), (0, 0, 255), 2) |
|
|
|
st.image(cv2.cvtColor(transformed_img, cv2.COLOR_BGR2RGB), caption='変換後の画像と差異', use_column_width=True) |
|
|