|
import cv2 |
|
import numpy as np |
|
import streamlit as st |
|
from PIL import Image |
|
|
|
def Match(image_file1, image_file2): |
|
t = st.sidebar.selectbox("Good or Great", ["Good", "Great"]) |
|
rate = st.sidebar.slider("特徴量抽出の厳しさ", 0.0, 1.0, 0.8) |
|
if (t == "Good"): |
|
Good_Match(image_file1, image_file2, rate) |
|
elif (t == "Great"): |
|
Great_Match(image_file1, image_file2, rate) |
|
|
|
def Good_Match(image_file1, image_file2,rate = 0.8): |
|
|
|
image1 = Image.open(image_file1) |
|
image2 = Image.open(image_file2) |
|
|
|
image1_cv = cv2.cvtColor(np.array(image1), cv2.COLOR_RGB2BGR) |
|
image2_cv = cv2.cvtColor(np.array(image2), cv2.COLOR_RGB2BGR) |
|
|
|
gray1 = cv2.cvtColor(image1_cv, cv2.COLOR_BGR2GRAY) |
|
gray2 = cv2.cvtColor(image2_cv, cv2.COLOR_BGR2GRAY) |
|
|
|
sift = cv2.SIFT_create() |
|
keypoints1, descriptors1 = sift.detectAndCompute(gray1, None) |
|
keypoints2, descriptors2 = sift.detectAndCompute(gray2, None) |
|
|
|
FLANN_INDEX_KDTREE = 1 |
|
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) |
|
search_params = dict(checks=50) |
|
flann = cv2.FlannBasedMatcher(index_params, search_params) |
|
|
|
matches = flann.knnMatch(descriptors1, descriptors2, k=2) |
|
|
|
good_matches = [] |
|
for m, n in matches: |
|
if m.distance < rate * n.distance: |
|
good_matches.append(m) |
|
img_matches = cv2.drawMatches(image1_cv, keypoints1, image2_cv, keypoints2, good_matches, None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) |
|
|
|
st.image(cv2.cvtColor(img_matches, cv2.COLOR_BGR2RGB), caption='Good Matches', use_column_width=True) |
|
|
|
def Great_Match(image_file1, image_file2, rate=0.8): |
|
image1 = Image.open(image_file1) |
|
image2 = Image.open(image_file2) |
|
|
|
|
|
image1_cv = cv2.cvtColor(np.array(image1), cv2.COLOR_RGB2BGR) |
|
image2_cv = cv2.cvtColor(np.array(image2), cv2.COLOR_RGB2BGR) |
|
|
|
gray1 = cv2.cvtColor(image1_cv, cv2.COLOR_BGR2GRAY) |
|
gray2 = cv2.cvtColor(image2_cv, cv2.COLOR_BGR2GRAY) |
|
|
|
sift = cv2.SIFT_create() |
|
|
|
keypoints1, descriptors1 = sift.detectAndCompute(gray1, None) |
|
keypoints2, descriptors2 = sift.detectAndCompute(gray2, None) |
|
|
|
FLANN_INDEX_KDTREE = 1 |
|
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) |
|
search_params = dict(checks=50) |
|
flann = cv2.FlannBasedMatcher(index_params, search_params) |
|
|
|
matches = flann.knnMatch(descriptors1, descriptors2, k=2) |
|
|
|
good_matches = [] |
|
for m, n in matches: |
|
if m.distance < rate * n.distance: |
|
good_matches.append(m) |
|
|
|
if len(good_matches) > 4: |
|
src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2) |
|
dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2) |
|
|
|
|
|
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) |
|
matches_mask = mask.ravel().tolist() |
|
|
|
|
|
height, width, channels = image2_cv.shape |
|
transformed_img = cv2.warpPerspective(image1_cv, H, (width, height)) |
|
else: |
|
matches_mask = None |
|
|
|
|
|
img_matches = cv2.drawMatches(image1_cv, keypoints1, image2_cv, keypoints2, good_matches, None, |
|
matchesMask=matches_mask, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) |
|
|
|
|
|
st.image(cv2.cvtColor(img_matches, cv2.COLOR_BGR2RGB), caption='Great Matches', use_column_width=True) |
|
|