I'm wanting to use orb detectors to draw a bounding box around a found image, similarly to the example here, which is using sift detectors: SIFT Refrence
The Linked example uses a FlannBasedMatcher. My Code uses a BFMatcher. I have no preference in the Matcher used.
MIN_MATCH_COUNT = 10
img1 = cv2.imread('box.png',0)
img2 = cv2.imread('box_in_scene.png',0)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1,des2)
How would I continue this code to use homography to draw around the box_in_scene image?
EDIT: I tried the following, but the output wasn't as expected.
src_pts = np.float32([ kp1[m.queryIdx].pt for m in matches[:50] ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in matches[:50] ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
This my result.
The code (the description was wrote as the comment):
#!/usr/bin/python3
# 2017.11.26 23:27:12 CST
## Find object by orb features matching
import numpy as np
import cv2
imgname = "box.png" # query image (small object)
imgname2 = "box_in_scene.png" # train image (large scene)
MIN_MATCH_COUNT = 4
## Create ORB object and BF object(using HAMMING)
orb = cv2.ORB_create()
img1 = cv2.imread(imgname)
img2 = cv2.imread(imgname2)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
## Find the keypoints and descriptors with ORB
kpts1, descs1 = orb.detectAndCompute(gray1,None)
kpts2, descs2 = orb.detectAndCompute(gray2,None)
## match descriptors and sort them in the order of their distance
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(descs1, descs2)
dmatches = sorted(matches, key = lambda x:x.distance)
## extract the matched keypoints
src_pts = np.float32([kpts1[m.queryIdx].pt for m in dmatches]).reshape(-1,1,2)
dst_pts = np.float32([kpts2[m.trainIdx].pt for m in dmatches]).reshape(-1,1,2)
## find homography matrix and do perspective transform
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
h,w = img1.shape[:2]
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
## draw found regions
img2 = cv2.polylines(img2, [np.int32(dst)], True, (0,0,255), 1, cv2.LINE_AA)
cv2.imshow("found", img2)
## draw match lines
res = cv2.drawMatches(img1, kpts1, img2, kpts2, dmatches[:20],None,flags=2)
cv2.imshow("orb_match", res);
cv2.waitKey();cv2.destroyAllWindows()