Hi all. Im trying to make an featuring match web app for learn opencv, but i actually have problems with the crop.
My code do this:
- Compare two images
- Find feature matchs from one to other
- Warp this featured region
- Crop it
My results:

How you can see i get images like that, with a black stripe on the borders. I execute the code with a bat file, with arguments, ill paste both codes:
- Full code:
# python libraries
import rawpy
import os
import sys
import json
import argparse
import cv2
import numpy as np
import math
from PIL import Image
MIN_MATCH_COUNT = 4
HORIZONTAL = 0
VERTICAL = 1
VERTICAL_HORIZONTAL = -1
FLIPNONE = 2
RATIO = 0.75
RAN_VAL = 8.0
def calculateDistance(p0, p1):
dist = math.sqrt((p1[0][0] - p0[0][0])**2 + (p1[0][1] - p0[0][1])**2)
return dist
def detect_matches(jpguser,raworiginal,flipmode):
if flipmode == FLIPNONE:
raworiginalflip = raworiginal
else:
raworiginalflip = cv2.flip(raworiginal,flipmode)
#Detect Item
detect = {}
gray1 = cv2.cvtColor(jpguser, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(raworiginalflip, cv2.COLOR_BGR2GRAY)
minHessian = 100
sift = cv2.xfeatures2d_SURF.create(hessianThreshold=minHessian,nOctaveLayers=6)
## (3) Create flann matcher
matcher = cv2.FlannBasedMatcher(dict(algorithm = 1, trees = 2), {})
## (4) Detect keypoints and compute keypointer descriptors
kpts1, descs1 = sift.detectAndCompute(gray1,None)
kpts2, descs2 = sift.detectAndCompute(gray2,None)
## (5) knnMatch to get Top2
matches = matcher.knnMatch(descs1, descs2, 2)
# Sort by their distance.
matches = sorted(matches, key = lambda x:x[0].distance)
## (6) Ratio test, to get good matches.
ratio_thresh = RATIO
good = []
for m,n in matches:
if m.distance < ratio_thresh * n.distance:
good.append(m)
detect['kpts1'] = kpts1
detect['kpts2'] = kpts2
detect['good'] = good
detect['raworiginal'] = raworiginalflip
detect['flipmode'] = flipmode
if len(detect['good']) >= MIN_MATCH_COUNT:
return detect
else:
if flipmode > VERTICAL_HORIZONTAL:
return detect_matches(jpguser,raworiginal,flipmode-1)
return detect
def find_homography(item,jpguser,raworiginal,detect,matchedfile):
canvas = raworiginal.copy()
## (queryIndex for the small object, trainIndex for the scene )
src_pts = np.float32([detect['kpts1'][m.queryIdx].pt for m in detect['good']]).reshape(-1, 1, 2)
dst_pts = np.float32([detect['kpts2'][m.trainIdx].pt for m in detect['good']]).reshape(-1, 1, 2)
## find homography matrix in cv2.RANSAC using good match points
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, RAN_VAL)
h,w = jpguser.shape[:2]
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
p1 = dst[0]
p2 = dst[3]
p3 = dst[1]
p4 = dst[2]
wNew = calculateDistance(p1, p2)
hNew = calculateDistance(p1, p3)
a = M[0,0]
b = M[0,1]
c = M[0,2]
d = M[1,0]
e = M[1,1]
f = M[1,2]
p = math.sqrt(a*a + b*b)
r = (a*e - b*d)/(p)
theta = - math.atan2(b, a) * 180 / math.pi
cv2.polylines(canvas,[np.int32(dst)],True,(0,0,255),3, cv2.LINE_AA)
## (8) drawMatches
matched = cv2.drawMatches(jpguser,detect['kpts1'],canvas,detect['kpts2'],detect['good'],None)#,**draw_params)
## (9) Crop the matched region from scene
h,w = jpguser.shape[:2]
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
perspectiveM = cv2.getPerspectiveTransform(np.float32(dst),pts)
found = cv2.warpPerspective(raworiginal,perspectiveM,(w,h))
## (10) % CROP
h2,w2 = raworiginal.shape[:2]
item['CropH'] = (hNew/h2)*100
item['CropW'] = (wNew/w2)*100
item['FlipMode'] = detect['flipmode']
item['Rotation'] = theta
item['ScaleX'] = p*100
item['ScaleY'] = r*100
## (11) Save
cv2.imwrite(item['PathImageJPGOriginal'], found)
if(matchedfile):
cv2.imwrite(matchedfile, matched)
item['Success'] = True
return item
def extract_original(raworiginal, user, original, matchedfile):
# JSON ITEM
item = {}
try:
item['ImageJPGOriginal'] = original.split("\\")[-1]
item['PathImageJPGOriginal'] = original
item['ImageRawOriginal'] = raworiginal.split("\\")[-1]
item['PathImageRawOriginal'] = raworiginal
item['ImageJPGUser'] = user.split("\\")[-1]
item['PathImageJPGUser'] = user
item['Success'] = False
if (not(matchedfile is None)):
item['PathImageMatched'] = matchedfile
item['ImageMatched'] = matchedfile.split("\\")[-1]
if (os.path.exists(item['PathImageJPGUser']) and os.path.exists(item['PathImageRawOriginal'])):
if os.path.exists(item['PathImageJPGOriginal']):
os.remove(item['PathImageJPGOriginal'])
## Preparación de los datos
jpguser = cv2.imread(item['PathImageJPGUser'])
raworiginal = cv2.imread(item['PathImageRawOriginal'])
detect = detect_matches(jpguser,raworiginal,FLIPNONE)
print(len(detect['good']))
if len(detect['good']) > MIN_MATCH_COUNT:
item = find_homography(item,jpguser,detect['raworiginal'],detect,matchedfile)
else:
item['NotMatched'] = True
item['Error'] = "Not enough matches are found - {}/{}".format(len(detect['good']),MIN_MATCH_COUNT)
else:
item['Error'] = "File not exists"
except OSError as e:
item['Success'] = False
item['Error'] = '%s %s %s' % (e.errno, e.strerror, e.filename)
except Exception as e:
item['Success'] = False
item['Error'] = getattr(e, 'message', str(e))
finally:
return item
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--raworiginal", help="a path that contains the RAW Camera file")
parser.add_argument("-u", "--user", help="a path where the JPGuser file")
parser.add_argument("-o", "--original", help="a path where the JPGoriginal file will be copied"),
parser.add_argument("-m", "--matched", help="a path where the matched file will be copied"),
parser.add_argument("-json", "--json", help="a JSON string with images")
args = parser.parse_args()
if (not ((args.raworiginal and args.user and args.original) or args.json)):
parser.print_help()
if (args.raworiginal and args.user and args.original):
print(json.dumps(extract_original(args.raworiginal, args.user, args.original, args.matched)), end='', flush=True)
else:
if (args.json):
data = []
data_json = json.loads(args.json)
for i in range(len(data_json)):
item = extract_original(data_json[i]["PathImageRawOriginal"], data_json[i]["PathImageJPGUser"], data_json[i]["PathImageJPGOriginal"], data_json[i]["PathImageMatched"])
item["Id"] = data_json[i]["Id"]
data.append(item)
print(json.dumps(data), end='', flush=True)
if __name__ == "__main__":
main()
- bat file:
@echo off
python.exe extract.py -r "Insert image to crop" -u "insert image for find and comare" -o "save path" -m "matched file for see matches of the result image"
pause
How can solve this? i want accuracy in my project but... how can do it? Actually im using python and want more accuracy process, but i dont get good results.
I start thinking that warpTransform apply any rounding and for this i get this results but im newbie on opencv
↧