Components and supplies
Arduino UNO
Apps and platforms
Crazyflie Python Client
Project description
Code
Code snippet #10
text
1import numpy as np 2 3import cv2 4 5face_cascade = cv2.CascadeClassifier('F:/Program Files/opencv/sources/data/haarcascades/haarcascade_frontalface_default.xml') 6 7eye_cascade = cv2.CascadeClassifier('F:/Program Files/opencv/sources/data/haarcascades/haarcascade_eye.xml') 8 9cap = cv2.VideoCapture(0)<br>while 1: 10 11 ret, img = cap.read() 12 13 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 14 15 faces = face_cascade.detectMultiScale(gray, 1.5, 5) 16 17 for (x,y,w,h) in faces: 18 19 cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 20 21 roi_gray = gray[y:y+h, x:x+w] 22 23 roi_color = img[y:y+h, x:x+w] 24 25 eyes = eye_cascade.detectMultiScale(roi_gray) 26 27 for (ex,ey,ew,eh) in eyes: 28 29 cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) 30 31 print "found " +str(len(faces)) +" face(s)" 32 33 cv2.imshow('img',img) 34 35 k = cv2.waitKey(30) & 0xff 36 37 if k == 27: 38 39 break 40 41cap.release() 42 43cv2.destroyAllWindows()
Code snippet #14
text
1import numpy as npimport cv2 2face_cascade = cv2.CascadeClassifier('F:/Program Files/opencv/sources/data/haarcascades/haarcascade_frontalface_default.xml') 3cap = cv2.VideoCapture(0) 4rec = cv2.createLBPHFaceRecognizer(); 5rec.load("F:/Program Files/projects/face_rec/faceREC/trainingdata.yml") 6id=0 7font=cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_COMPLEX_SMALL,5,1,0,4) 8while 1: 9 ret, img = cap.read() 10 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 11 faces = face_cascade.detectMultiScale(gray, 1.5, 5) 12 for (x,y,w,h) in faces: 13 cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 14 id,conf=rec.predict(gray[y:y+h,x:x+w]) 15 if(id==2): 16 id="alok" 17 if id==1: 18 id="alok" 19 if id==3: 20 id="anjali" 21 if id==4: 22 id="Gaurav" 23 if id==5: 24 id='rahul' 25 if id==6: 26 id="akshay" 27 cv2.cv.PutText(cv2.cv.fromarray(img),str(id),(x,y+h),font,255) 28 cv2.imshow('img',img) 29 30 if cv2.waitKey(1) == ord('q'): 31 break 32cap.release()cv2.destroyAllWindows()
Code snippet #11
text
1import numpy as np 2 3import cv2 4 5face_cascade = cv2.CascadeClassifier('F:/Program Files/opencv/sources/data/haarcascades/haarcascade_frontalface_default.xml') 6 7cap = cv2.VideoCapture(0) 8 9id = raw_input('enter user id') 10 11sampleN=0; 12 13while 1: 14 15 ret, img = cap.read() 16 17 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 18 19 faces = face_cascade.detectMultiScale(gray, 1.3, 5) 20 21 for (x,y,w,h) in faces: 22 23 sampleN=sampleN+1; 24 25 cv2.imwrite("F:/Program Files/projects/face_rec/facesData/User."+str(id)+ "." +str(sampleN)+ ".jpg", gray[y:y+h, x:x+w]) 26 27 cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 28 29 cv2.waitKey(100) 30 31 cv2.imshow('img',img) 32 33 cv2.waitKey(1) 34 35 if sampleN > 20: 36 37 break 38 39cap.release() 40 41cv2.destroyAllWindows()
Code snippet #13
text
1import os 2 3import numpy as np 4 5import cv2 6 7from PIL import Image # For face recognition we will the the LBPH Face Recognizer 8 9recognizer = cv2.createLBPHFaceRecognizer(); 10 11path="F:/Program Files/projects/face_rec/facesData" 12 13def getImagesWithID(path): 14 15 imagePaths = [os.path.join(path, f) for f in os.listdir(path)] 16 17 # print image_path 18 19 #getImagesWithID(path) 20 21 faces = [] 22 23 IDs = [] 24 25 for imagePath in imagePaths: 26 27 # Read the image and convert to grayscale 28 29 facesImg = Image.open(imagePath).convert('L') 30 31 faceNP = np.array(facesImg, 'uint8') 32 33 # Get the label of the image 34 35 ID= int(os.path.split(imagePath)[-1].split(".")[1]) 36 37 # Detect the face in the image 38 39 faces.append(faceNP) 40 41 IDs.append(ID) 42 43 cv2.imshow("Adding faces for traning",faceNP) 44 45 cv2.waitKey(10) 46 47 return np.array(IDs), faces 48 49Ids,faces = getImagesWithID(path) 50 51recognizer.train(faces,Ids) 52 53recognizer.save("F:/Program Files/projects/face_rec/faceREC/trainingdata.yml") 54 55cv2.destroyAllWindows()
Code snippet #14
text
1import numpy as npimport cv2 2face_cascade = cv2.CascadeClassifier('F:/Program Files/opencv/sources/data/haarcascades/haarcascade_frontalface_default.xml') 3cap = cv2.VideoCapture(0) 4rec = cv2.createLBPHFaceRecognizer(); 5rec.load("F:/Program Files/projects/face_rec/faceREC/trainingdata.yml") 6id=0 7font=cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_COMPLEX_SMALL,5,1,0,4) 8while 1: 9 ret, img = cap.read() 10 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 11 faces = face_cascade.detectMultiScale(gray, 1.5, 5) 12 for (x,y,w,h) in faces: 13 cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 14 id,conf=rec.predict(gray[y:y+h,x:x+w]) 15 if(id==2): 16 id="alok" 17 if id==1: 18 id="alok" 19 if id==3: 20 id="anjali" 21 if id==4: 22 id="Gaurav" 23 if id==5: 24 id='rahul' 25 if id==6: 26 id="akshay" 27 cv2.cv.PutText(cv2.cv.fromarray(img),str(id),(x,y+h),font,255) 28 cv2.imshow('img',img) 29 30 if cv2.waitKey(1) == ord('q'): 31 break 32cap.release()cv2.destroyAllWindows()
Code snippet #13
text
1import os 2 3import numpy as np 4 5import cv2 6 7from PIL 8 import Image # For face recognition we will the the LBPH Face Recognizer 9 10recognizer 11 = cv2.createLBPHFaceRecognizer(); 12 13path="F:/Program Files/projects/face_rec/facesData" 14 15def 16 getImagesWithID(path): 17 18 imagePaths = [os.path.join(path, f) for f in os.listdir(path)] 19 20 21 # print image_path 22 23 #getImagesWithID(path) 24 25 faces 26 = [] 27 28 IDs = [] 29 30 for imagePath in imagePaths: 31 32 # 33 Read the image and convert to grayscale 34 35 facesImg = Image.open(imagePath).convert('L') 36 37 38 faceNP = np.array(facesImg, 'uint8') 39 40 # Get the label of the 41 image 42 43 ID= int(os.path.split(imagePath)[-1].split(".")[1]) 44 45 46 # Detect the face in the image 47 48 faces.append(faceNP) 49 50 51 IDs.append(ID) 52 53 cv2.imshow("Adding faces for traning",faceNP) 54 55 56 cv2.waitKey(10) 57 58 return np.array(IDs), faces 59 60Ids,faces 61 = getImagesWithID(path) 62 63recognizer.train(faces,Ids) 64 65recognizer.save("F:/Program 66 Files/projects/face_rec/faceREC/trainingdata.yml") 67 68cv2.destroyAllWindows()
Code snippet #11
text
1import numpy as np 2 3import cv2 4 5face_cascade = cv2.CascadeClassifier('F:/Program Files/opencv/sources/data/haarcascades/haarcascade_frontalface_default.xml') 6 7cap = cv2.VideoCapture(0) 8 9id = raw_input('enter user id') 10 11sampleN=0; 12 13while 1: 14 15 ret, img = cap.read() 16 17 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 18 19 faces = face_cascade.detectMultiScale(gray, 1.3, 5) 20 21 for (x,y,w,h) in faces: 22 23 sampleN=sampleN+1; 24 25 cv2.imwrite("F:/Program Files/projects/face_rec/facesData/User."+str(id)+ "." +str(sampleN)+ ".jpg", gray[y:y+h, x:x+w]) 26 27 cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 28 29 cv2.waitKey(100) 30 31 cv2.imshow('img',img) 32 33 cv2.waitKey(1) 34 35 if sampleN > 20: 36 37 break 38 39cap.release() 40 41cv2.destroyAllWindows()
Code snippet #10
text
1import numpy as np 2 3import cv2 4 5face_cascade = cv2.CascadeClassifier('F:/Program 6 Files/opencv/sources/data/haarcascades/haarcascade_frontalface_default.xml') 7 8eye_cascade 9 = cv2.CascadeClassifier('F:/Program Files/opencv/sources/data/haarcascades/haarcascade_eye.xml') 10 11cap 12 = cv2.VideoCapture(0)<br>while 1: 13 14 ret, img = cap.read() 15 16 gray 17 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 18 19 faces = face_cascade.detectMultiScale(gray, 20 1.5, 5) 21 22 for (x,y,w,h) in faces: 23 24 cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 25 26 27 roi_gray = gray[y:y+h, x:x+w] 28 29 roi_color = img[y:y+h, x:x+w] 30 31 32 eyes = eye_cascade.detectMultiScale(roi_gray) 33 34 for (ex,ey,ew,eh) 35 in eyes: 36 37 cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) 38 39 40 print "found " +str(len(faces)) +" face(s)" 41 42 cv2.imshow('img',img) 43 44 45 k = cv2.waitKey(30) & 0xff 46 47 if k == 27: 48 49 break 50 51cap.release() 52 53cv2.destroyAllWindows()
Code snippet #9
text
1import cv2 2 3cap = cv2.VideoCapture("input_video.mp4") 4 5print cap.isOpened() # True = read video successfully. False - fail to read video. 6 7fourcc = cv2.VideoWriter_fourcc(*'XVID') 8 9out = cv2.VideoWriter("output_video.avi", fourcc, 20.0, (640, 360)) 10 11print out.isOpened() # True = write out video successfully. False - fail to write out video. 12 13cap.release() 14 15out.release()
Comments
Only logged in users can leave comments
Ameduino
0 Followers
•0 Projects
0