I have this error when i run this code:
import cv2
cascPath = "/usr/local/lib/python2.7/site-packages/h.xml"
eyePath = "/usr/local/lib/python2.7/site-packages/e.xml"
smilePath = "/usr/local/lib/python2.7/site-packages/le.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
eyeCascade = cv2.CascadeClassifier(eyePath)
smileCascade = cv2.CascadeClassifier(smilePath)
font = cv2.FONT_HERSHEY_SIMPLEX
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(200, 200),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 3)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
cv2.putText(frame,'Face',(x, y), font, 2,(255,0,0),5)
smile = smileCascade.detectMultiScale(
roi_gray,
scaleFactor= 1.16,
minNeighbors=35,
minSize=(25, 25),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (sx, sy, sw, sh) in smile:
cv2.rectangle(roi_color, (sh, sy), (sx+sw, sy+sh), (255, 0, 0), 2)
cv2.putText(frame,'Smile',(x + sx,y + sy), 1, 1, (0, 255, 0), 1)
eyes = eyeCascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.putText(frame,'Eye',(x + ex,y + ey), 1, 1, (0, 255, 0), 1)
cv2.putText(frame,'Number of Faces : ' + str(len(faces)),(40, 40), font, 1,(255,0,0),2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
↧
Traceback (most recent call last): File "./Documents/S.py", line 36, in roi_gray, NameError: name 'roi_gray' is not defined
↧
I am trying to run yolo in opencv using darknet on cpu code works fine but not getting camera displayed.[Solved]
code is here you guys can see`
import cv2
from darkflow.net.build import TFNet
import numpy as np
import time
option= {
'model':'cfg/yolo.cfg',
'load':'bin/yolov2.weights',
'threshold': 0.13,
}
tfnet=TFNet(option)
#camera_port=0
capture=cv2.VideoCapture(0)
colors =[tuple(255*np.random.rand(3)) for i in range(5)]
while (capture.isOpened()):
stime=time.time()
ret,frame=capture.read()
result=tfnet.return_predict(frame)
if ret:
for colors,result in zip (colors,result):
tl= (result['topleft']['x'],result['topleft']['y'])
br =(result['bottomright']['x'],result['bottomright']['y'])
label = result['label']
frame=cv2.rectangle(frame,tl,br,(0,255,0),7)
frame =cv2.putText(frame,label,br,cv2.FONT_HERSHEY_COMPLEX, 10,(0,0,0),2)
cv2.imshow('frame',frame)
print('FPS{:.1f}'.format(1/(time.time() -stime)))
if cv2.waitKey(1)& 0xff ==ord('q'):
break
else:
capture.release()
cv2.destroyAllWindows()
break
`error is
[ WARN:0] global C:\projects\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (674) SourceReaderCB::~SourceReaderCB terminating async callback
↧
↧
Use custom classifier in ERFilter with python
I want to use a custom classifier and use the python opencv API (3.x) for ERFilter.
I wonder why the `eval` method is not called, from what I found in the sources, this is the method that classifies the regions and I need to implement.
I appreciate any help, it seems a very uncommon situation since I cannot find examples for this...
Thank you all!
Below is a minimum example (also hosted [here](https://colab.research.google.com/drive/1SF_D1MXMKekY5Aw9w9Q0pbE5HfZhuc-H))
import cv2
import numpy as np
import ctypes
# needs opencv-contrib
class customClassifier(cv2.text_ERFilter_Callback):
def eval(self, stat) -> ctypes.c_double:
print("!!!!!! did run", stat)
return 0.0
erc1 = customClassifier()
print("running manually: ")
erc1.eval(1)
er1 = cv2.text.createERFilterNM1(erc1,16,0.00015,0.13,0.2,True,0.1)
#erc2 = cv2.text.loadClassifierNM2('/Users/bongo/Downloads/trained_classifierNM2.xml')
#er2 = cv2.text.createERFilterNM2(erc2,0.5)
print("should run through detectRegions:")
img = np.zeros((10,10,3), dtype=np.uint8)
regions = cv2.text.detectRegions(img,er1,None)
output:
running manually:
!!!!!! did run 1
should run through detectRegions:
↧
No module named 'cv2'
I have install opencv using pip install opencv-python ,it install but when i try to run code error pops up "no module named'cv2'" The code is
import cv2
from darkflow.net.build import TFNet
import numpy as np
import time
option= {
'model':'cfg/yolo.cfg',
'load':'bin/yolov2.weights',
'threshold': 0.13,
}
tfnet = TFNet(option)
colors = [tuple(255 * np.random.rand(3)) for _ in range(10)]
capture = cv2.VideoCapture('me.mp4')
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 512)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 512)
while True:
stime = time.time()
ret, frame = capture.read()
if ret:
results = tfnet.return_predict(frame)
for color, result in zip(colors, results):
tl = (result['topleft']['x'], result['topleft']['y'])
br = (result['bottomright']['x'], result['bottomright']['y'])
label = result['label']
confidence = result['confidence']
text = '{}: {:.0f}%'.format(label, confidence * 100)
frame = cv2.rectangle(frame, tl, br, color, 5)
frame = cv2.putText(
frame, text, tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)
cv2.imshow('frame', frame)
print('FPS {:.1f}'.format(1 / (time.time() - stime)))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
The error is
>>>
RESTART: C:\Users\Raja kashif\Desktop\project\darkflow-master\video test.py
Traceback (most recent call last):
File "C:\Users\Raja kashif\Desktop\project\darkflow-master\video test.py", line 1, in
import cv2
ModuleNotFoundError: No module named 'cv2'>>>
↧
trouble with imdecode: always returns none. Why?
I wrote streaming via UDP in python applying compression with send and rec as below
send.py
import socket
import numpy as np
import cv2 as cv
import sys
addr = ("127.0.0.1", 65534)
buf = 512
width = 640
height = 480
cap = cv.VideoCapture('test.mp4')
cap.set(3, width)
cap.set(4, height)
code = 'start'
code = ('start' + (buf - len(code)) * 'a').encode('utf-8')
np.set_printoptions(threshold=sys.maxsize)
lenght=bytearray(3)
encode_param = [int(cv.IMWRITE_JPEG_QUALITY), 90]
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
frame=np.array([[[255, 255, 216], # i use a small array instead of frame from test video to debug
[255, 255, 215],
[255, 255, 210],
[255, 255, 210],
[255, 255 ,209],
[255, 255, 209],
[255, 255 ,213],
[255, 255, 213],
[255, 255, 208],
[255, 255 ,208]]])
print('original')
print(frame)
result, frame = cv.imencode('.jpg', frame, encode_param)
print('frame after encode')
print(frame)
print('length')
print(len(frame))
frame2 = cv.imdecode(frame, cv.IMREAD_COLOR)#
print('frame2 test decode before sending')
print(frame2)
lenght[0]=len(frame)//(256**2)
lenght[1]=((len(frame)%(256**2))//256)
lenght[2]=((len(frame)%(256**2))%256)
data=lenght+bytearray(frame)
for i in range(0, len(data), buf):
s.sendto(data[i:i+buf], addr)
cv.imshow('send', frame2)
rec.py
import socket
import numpy as np
import cv2 as cv
import sys
np.set_printoptions(threshold=sys.maxsize)
addr = ("127.0.0.1", 65534)
buf = 512
width = 640
height = 480
code = b'start'
num_of_chunks = width * height * 3 / buf
data = b""
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(addr)
while True:
while len(data)<3:
data+= s.recvfrom(buf)[0]
length=data[0]*(256**2)+data[1]*256+data[2]
data = data[3:]
#print("msg_size: {}".format(length))
while len(data) < length:
data+= s.recvfrom(buf)[0]
frame_data = data[:length]
data = data[length:]
frame=np.zeros((len(frame_data),1),dtype=int)
for j in range(len(frame_data)):
frame[j]=frame_data[j]
print('frame collected before decode')
print(frame)
print('length')
print(len(frame))
frame2 = cv.imdecode(frame, cv.IMREAD_COLOR)
cv.imshow('recv', frame2)
if cv.waitKey(1) & 0xFF == ord('q'):
break
s.close()
cv.destroyAllWindows()
Now is the problem, the rec.py gets frame2 after decode is None. I thought my transmission is problematic so data is lost but i check "frame collected before decode"in rec.py and "'frame after encode" printed in send.py. they are identical, it means the data sent and collected properly.
any opinion why decode gives None in my case?
↧
↧
Option: BUILD_opencv_python3 missing from cmake-gui
I've downloaded versions of OpenCV 4.1.2 a few days apart.
The earlier version has a cmake-gui option:
BUILD_opencv_python3:BOOL=ON
It is not present in today's version (2019 nov 04). It's not that
it's switched off--the option does not appear at all. I'm not
sure of its exact function (anyone?) but since it had defaulted
to ON in the previous download, it sounded significant.
Another new flag is present in today's build:
OPENCV_ENABLE_MEMALIGN:BOOL=ON
I'm not as concerned about that, but it does indicate that some
things have changed.
↧
late fusion over color and texture descriptors
Any idea on how to do late fusion over color and texture descriptors?
I am doing a texture matching project. I want to merge color descriptor (I used calchist()) and texture descriptor (i used LBP). From a paper, i get to know that late fusion is a way to do it. They meant sum or product of sum to do it but unfortunately i did not get it.
Now i am comparing the histogram of color and LBP among test image and Database images and computing the average of them.
Any idea on how to do this late fusion on Color descriptor and texture descriptors?
Thanks!
↧
Get video input when device not on uvc input
I am using OpenMV H7 camera as the camera for stereo vision application. The OpenMV camera shows up on /dev/ttyACMx port on the machine as it isn't uvc enabled. Due to this I am unable to capture frames onto my computer using cv2.VIdeoCapture(x). Does anyone know any workaround this?
I have already tried the uvc firmware on the OpenMV camera. That works, but the quality of video output is very bad.
↧
How to draw contours on squares drawn with pencil (Python)
Code:
import cv2
img = cv2.imread('media/multi-rec.jpg')
img = cv2.resize(img, (414, 732))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges_high_thresh = cv2.Canny(image=gray, threshold1=60, threshold2=120)
contours, _ = cv2.findContours(image=edges_high_thresh,
mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
peri = cv2.arcLength(curve=cnt, closed=True)
approx = cv2.approxPolyDP(curve=cnt,
epsilon=0.01*peri,
closed=True)
if area > 100 and len(approx) == 4:
cv2.drawContours(image=img, contours=[approx],
contourIdx=-1, color=(0, 255, 0), thickness=1)
cv2.imshow('Image edges', edges_high_thresh)
cv2.waitKey(0)
cv2.imshow('Image', img)
cv2.waitKey(0)


↧
↧
Any implementation of LineSegmentDetector for python ?
I see that cv2.createLineSegmentDetector implementation has been removed due original code license issues in function, any workarounds that don't include downgrading to an older version ?
Thanks a lot!
↧
Draw a line connecting overhangs (ridges)
I want to connect the overhang (ridge) points of an image to create a separation between left and right.
 
The final output should look like this. Then I can identify them as 4 separate inner contours.

Is there a method to get the coordinates of the end points of overhangs (ridges) in order to draw a vertical line connecting them? Or is there a better way to achieve this?
↧
GCR and UCR - python Possible
GCR (gray component replacement) and under color removal (UCR)
Can we use opencv for the above functions in python
↧
Syncing multiple cameras with the multiprocessing library
I have 4 usb cameras and I was interested in using VideoCapture.grab to synchronize them.
I need 4 processes, one for each camera, to be able to distribute the load between the 4 cores of the Rpi 4B.
I tried using a separate process to grab the images and another to Videocapture.retrieve but retrieve returns True and a zero matrix
import cv2
import time
import multiprocessing
def derp(capp):
print(cv2.VideoCapture.grab(capp))
time.sleep(2)
print(cv2.VideoCapture.retrieve(capp))
cap = cv2.VideoCapture(0)
process = multiprocessing.Process(target=derp, args=(cap,))
process.start()
time.sleep(1)
print(cv2.VideoCapture.retrieve(cap))
time.sleep(2)
print(cv2.VideoCapture.retrieve(cap))
the retrieve call in derp() returns an image but the other two in "main" return zero matrixes.
Can anyone tell me why that is the case?
Is that maybe not a good approach to the problem of synchronizing the cameras?
↧
↧
No module named 'cv2' but opencv is installed
Hey Guys,
i've installed opencv-python using pip and tried to run a code that uses opencv in Visual Studio Code. I can't run the code, everytime I try to run it, i get this error message:> Traceback (most recent call last):
File "c:/Users/Marius/Entwicklung/local_light_absorber/opencv_test.py", line 1, in
import cv2
ModuleNotFoundError: No module named 'cv2'
I know there are already questions in this forum dealing with this problem but none of them helped me.
Can you please help me so I can run my code?
↧
Difference in output of minAreaRect in python and c++
I am doing text skewness correction and when I wrote the same code in python and c++ the results are different for the box angle
Here is the Python version
```
import numpy as np
import cv2 as cv
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--image", required=True, help="path to input image file")
args = vars(parser.parse_args())
image = cv.imread(cv.samples.findFile(args["image"]))
if image is None:
print("can't read image " + args["image"])
sys.exit(-1)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gray = cv.bitwise_not(gray)
# threshold the image, setting all foreground pixels to
# 255 and all background pixels to 0
thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)[1]
coords = np.column_stack(np.where(thresh > 0))
angle = cv.minAreaRect(coords)[-1]
print(cv.minAreaRect(coords))
#print(coords)
print(angle)
# the `cv.minAreaRect` function returns values in the
# range [-90, 0); as the rectangle rotates clockwise the
# returned angle trends to 0 -- in this special case we
# need to add 90 degrees to the angle
if angle < -45:
angle = -(90 + angle)
# otherwise, just take the inverse of the angle to make
# it positive
else:
angle = -angle
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv.getRotationMatrix2D(center, angle, 1.0)
rotated = cv.warpAffine(image, M, (w, h), flags=cv.INTER_CUBIC, borderMode=cv.BORDER_REPLICATE)
cv.putText(rotated, "Angle: {:.2f} degrees".format(angle), (10, 30), cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# show the output image
print("[INFO] angle: {:.3f}".format(angle))
cv.imshow("Input", image)
cv.imshow("Rotated", rotated)
cv.waitKey(0)
if __name__ == "__main__":
main()
```
C++ code
```
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
int main( int argc, char** argv )
{
Mat image=imread(samples::findFile(imageName));
if(image.empty()){
cout<<"Cannot load the image "+imageName<coords;
findNonZero(thresh,coords);
cout<
↧
CUDA: Has anyone tried passing GpuMats to other Python CUDA modules?
I am trying to accelerate some image processing that requires tasks not currently available in any one python CUDA module. For example, [`cupy`](https://cupy.chainer.org/) has a lot of `numpy`/`scipy` functions that are not available in `OpenCV`. I'm curious if anyone has had success passing the data on the GPU between different code bases, instead of spending a lot of compute time passing back and forth between CPU and GPU to do the conversions? I'm specifically interested in `cupy` but could use any experiences with other codes as a start.
↧
How to get daytime of each frame when use cv2.VideoCapture("ip camera") ?

↧
↧
How to use cudacodec + blobFromImage
The problem is that i can't transfer my frame from cv2.cudastream directly to cv2.blobFromImage.
Here is a part of my implementation (I skipped many parts of code, 'cause it is not necessary)
...
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
vs2 = cv2.cudacodec.createVideoReader(stream_name)
while True:
(grabbed, frame1) = vs2.nextFrame()
frame = cv2.cuda.resize(frame1, (416, 416))
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
...
Separately cudacodec and blobFromImage (with VideoCapture) works fine, also if i do
frame2 = frame_res.download()
frame = frame2[:, :, :3]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
It works still fine.
However, if i load frame without `.download` and directly to blobFromImage an error occurs
Traceback (most recent call last): File "yolo_recog.py", line 559, in
main() File "yolo_recog.py", line 552, in main
args.one_video File "yolo_recog.py", line 455, in testsys
dir_to_images) File "yolo_recog.py", line 181, in video_processing
blob = cv2.dnn.blobFromImage(frame, 1 /
255.0, (416, 416), swapRB=True, crop=False) TypeError: Expected Ptr for argument 'image'
After that i change blobFromImage to blobFromImages and get another error
Traceback (most recent call last):
File "yolo_recog.py", line 559, in
main()
File "yolo_recog.py", line 552, in main
args.one_video
File "yolo_recog.py", line 455, in testsys
dir_to_images)
File "yolo_recog.py", line 181, in video_processing
blob = cv2.dnn.blobFromImages(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
SystemError: returned NULL without setting an error
How can i transfer a frame from cudacodec to blobfromimage without downloading it to CPU?
↧
How to remove some black parts from image
This is my [frame](https://i.stack.imgur.com/pvWJS.jpg). I would like to remove some black parts. I tried filter those parts with HSV values. But i couldn't it because hsv values are close to each other and some parts of my object is being deleted. how can i remove parts which i have indicated on image
↧
Laplacian Pyramid not matching
Hi,
I try to adapt this code [here](https://pysource.com/2018/03/16/image-pyramids-blending-and-reconstruction-opencv-3-4-with-python-3-tutorial-24/) but I get this error, when I try to reconstruct the image from the layers and blend them together in the end of the code.
faces_reconstructed = cv2.pyrUp(faces_reconstructed, dstsize=size)
cv2.error: OpenCV(4.1.1) C:\projects\opencv-python\opencv\modules\imgproc\src\pyramids.cpp:880: error: (-215:Assertion failed) std::abs(dsize.width - ssize.width*2) == dsize.width % 2 && std::abs(dsize.height - ssize.height*2) == dsize.height % 2 in function 'cv::pyrUp_'
I asked on another forum and someone suggested to delete the size, but then I get an error at faces_reconstructed = cv2.add(faces_pyramid[i], faces_reconstructed) since the size does not match (I suggest, eventhough the images all have the same size). Any help is appreciated.
my code:
import cv2
import numpy as np
width = 800
height = 200
img1 = cv2.imread("head.jpg")
img1 = cv2.resize(img1, (width, height))
img2 = cv2.imread("eye.jpg")
img2 = cv2.resize(img2, (width, height))
img3 = cv2.imread("nose.jpg")
img3= cv2.resize(img3, (width, height))
img4 = cv2.imread("mouth.jpg")
img4 = cv2.resize(img4, (width, height))
facestack = np.vstack((img1[:,:], img2[:,:], img3[:,:], img4[:,:]))
cv2.imshow("faces",facestack)
# Gaussian Pyramid 1
layer = img1.copy()
gaussian_pyramid = [layer]
for i in range(6):
layer = cv2.pyrDown(layer)
gaussian_pyramid.append(layer)
# Laplacian Pyramid 1
layer = gaussian_pyramid[5]
laplacian_pyramid = [layer]
for i in range(5, 0, -1):
size = (gaussian_pyramid[i - 1].shape[1], gaussian_pyramid[i - 1].shape[0])
gaussian_expanded = cv2.pyrUp(gaussian_pyramid[i], dstsize=size)
laplacian = cv2.subtract(gaussian_pyramid[i - 1], gaussian_expanded)
laplacian_pyramid.append(laplacian)
#cv2.imshow(str(i), laplacian)
# Gaussian Pyramid 2
layer = img2.copy()
gaussian_pyramid2 = [layer]
for i in range(6):
layer = cv2.pyrDown(layer)
gaussian_pyramid2.append(layer)
# Laplacian Pyramid 2
layer = gaussian_pyramid2[5]
laplacian_pyramid2 = [layer]
for i in range(5, 0, -1):
size = (gaussian_pyramid2[i - 1].shape[1], gaussian_pyramid2[i - 1].shape[0])
gaussian_expanded = cv2.pyrUp(gaussian_pyramid2[i], dstsize=size)
laplacian = cv2.subtract(gaussian_pyramid2[i - 1], gaussian_expanded)
laplacian_pyramid2.append(laplacian)
# Gaussian Pyramid 3
layer = img3.copy()
gaussian_pyramid3 = [layer]
for i in range(6):
layer = cv2.pyrDown(layer)
gaussian_pyramid3.append(layer)
# Laplacian Pyramid 3
layer = gaussian_pyramid3[5]
laplacian_pyramid3 = [layer]
for i in range(5, 0, -1):
size = (gaussian_pyramid3[i - 1].shape[1], gaussian_pyramid3[i - 1].shape[0])
gaussian_expanded = cv2.pyrUp(gaussian_pyramid3[i], dstsize=size)
laplacian = cv2.subtract(gaussian_pyramid3[i - 1], gaussian_expanded)
laplacian_pyramid3.append(laplacian)
# Gaussian Pyramid 4
layer = img4.copy()
gaussian_pyramid4 = [layer]
for i in range(6):
layer = cv2.pyrDown(layer)
gaussian_pyramid4.append(layer)
# Laplacian Pyramid 4
layer = gaussian_pyramid4[5]
laplacian_pyramid4 = [layer]
for i in range(5, 0, -1):
size = (gaussian_pyramid4[i - 1].shape[1], gaussian_pyramid4[i - 1].shape[0])
gaussian_expanded = cv2.pyrUp(gaussian_pyramid4[i], dstsize=size)
laplacian = cv2.subtract(gaussian_pyramid4[i - 1], gaussian_expanded)
laplacian_pyramid4.append(laplacian)
# Laplacian Pyramid Footbase_ball
faces_pyramid = []
n = 0
for img1_lap, img2_lap, img3_lap, img4_lap in zip(laplacian_pyramid, laplacian_pyramid2, laplacian_pyramid3, laplacian_pyramid4):
n += 1
laplacian = np.vstack((img1_lap, img2_lap,img3_lap,img4_lap))
#cv2.imshow(str(n), img2_lap)
faces_pyramid.append(laplacian)
#Reconstructed Faces
faces_reconstructed = faces_pyramid[0]
for i in range(1, 6):
size = (faces_pyramid[i].shape[1], faces_pyramid[i].shape[0])
faces_reconstructed = cv2.pyrUp(faces_reconstructed, dstsize=size)
faces_reconstructed = cv2.add(faces_pyramid[i], faces_reconstructed)
cv2.imshow("Faces reconstructed", faces_reconstructed)
cv2.imshow("Faces", faces)
#cv2.imshow("img1", img1)
#cv2.imshow("img2", img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
↧