Use Python + OpenCV to realize real-time eye tracking, do not need high-end hardware simple camera can be realized, the effect is shown below.
For a demonstration of the project see:/video/av75181965/
The main program of the project is as follows:
import sys import cv2 import numpy as np import process from import QTimer from import QApplication, QMainWindow from import loadUi from import QPixmap, QImage class Window(QMainWindow): def __init__(self): super(Window, self).__init__() loadUi('', self) with open("", "r") as css: (()) self.face_decector, self.eye_detector, = process.init_cv() (self.start_webcam) (self.stop_webcam) self.camera_is_running = False self.previous_right_keypoints = None self.previous_left_keypoints = None self.previous_right_blob_area = None self.previous_left_blob_area = None def start_webcam(self): if not self.camera_is_running: = (cv2.CAP_DSHOW) # VideoCapture(0) sometimes drops error #-1072875772 if is None: = (0) self.camera_is_running = True = QTimer(self) (self.update_frame) (2) def stop_webcam(self): if self.camera_is_running: () () self.camera_is_running = not self.camera_is_running def update_frame(self): # logic of the main loop _, base_image = () self.display_image(base_image) processed_image = (base_image, cv2.COLOR_RGB2GRAY) face_frame, face_frame_gray, left_eye_estimated_position, right_eye_estimated_position, _, _ = process.detect_face( base_image, processed_image, self.face_decector) if face_frame is not None: left_eye_frame, right_eye_frame, left_eye_frame_gray, right_eye_frame_gray = process.detect_eyes(face_frame, face_frame_gray, left_eye_estimated_position, right_eye_estimated_position, self.eye_detector) if right_eye_frame is not None: if (): right_eye_threshold = () right_keypoints, self.previous_right_keypoints, self.previous_right_blob_area = self.get_keypoints( right_eye_frame, right_eye_frame_gray, right_eye_threshold, previous_area=self.previous_right_blob_area, previous_keypoint=self.previous_right_keypoints) process.draw_blobs(right_eye_frame, right_keypoints) right_eye_frame = (right_eye_frame, np.uint8, 'C') self.display_image(right_eye_frame, window='right') if left_eye_frame is not None: if (): left_eye_threshold = () left_keypoints, self.previous_left_keypoints, self.previous_left_blob_area = self.get_keypoints( left_eye_frame, left_eye_frame_gray, left_eye_threshold, previous_area=self.previous_left_blob_area, previous_keypoint=self.previous_left_keypoints) process.draw_blobs(left_eye_frame, left_keypoints) left_eye_frame = (left_eye_frame, np.uint8, 'C') self.display_image(left_eye_frame, window='left') if (): # draws keypoints on pupils on main window self.display_image(base_image) def get_keypoints(self, frame, frame_gray, threshold, previous_keypoint, previous_area): keypoints = process.process_eye(frame_gray, threshold, , prevArea=previous_area) if keypoints: previous_keypoint = keypoints previous_area = keypoints[0].size else: keypoints = previous_keypoint return keypoints, previous_keypoint, previous_area def display_image(self, img, window='main'): # Makes OpenCV images displayable on PyQT, displays them qformat = QImage.Format_Indexed8 if len() == 3: if [2] == 4: # RGBA qformat = QImage.Format_RGBA8888 else: # RGB qformat = QImage.Format_RGB888 out_image = QImage(img, [1], [0], [0], qformat) # BGR to RGB out_image = out_image.rgbSwapped() if window == 'main': # main window ((out_image)) (True) if window == 'left': # left eye window ((out_image)) (True) if window == 'right': # right eye window ((out_image)) (True) if __name__ == "__main__": app = QApplication() window = Window() ("GUI") () (app.exec_())
The human eye detection procedure is as follows:
import os import cv2 import numpy as np def init_cv(): """loads all of cv2 tools""" face_detector = ( ("Classifiers", "haar", "haarcascade_frontalface_default.xml")) eye_detector = (("Classifiers", "haar", 'haarcascade_eye.xml')) detector_params = cv2.SimpleBlobDetector_Params() detector_params.filterByArea = True detector_params.maxArea = 1500 detector = cv2.SimpleBlobDetector_create(detector_params) return face_detector, eye_detector, detector def detect_face(img, img_gray, cascade): """ Detects all faces, if multiple found, works with the biggest. Returns the following parameters: 1. The face frame 2. A gray version of the face frame 2. Estimated left eye coordinates range 3. Estimated right eye coordinates range 5. X of the face frame 6. Y of the face frame """ coords = (img, 1.3, 5) if len(coords) > 1: biggest = (0, 0, 0, 0) for i in coords: if i[3] > biggest[3]: biggest = i biggest = ([i], np.int32) elif len(coords) == 1: biggest = coords else: return None, None, None, None, None, None for (x, y, w, h) in biggest: frame = img[y:y + h, x:x + w] frame_gray = img_gray[y:y + h, x:x + w] lest = (int(w * 0.1), int(w * 0.45)) rest = (int(w * 0.55), int(w * 0.9)) X = x Y = y return frame, frame_gray, lest, rest, X, Y def detect_eyes(img, img_gray, lest, rest, cascade): """ :param img: image frame :param img_gray: gray image frame :param lest: left eye estimated position, needed to filter out nostril, know what eye is found :param rest: right eye estimated position :param cascade: Hhaar cascade :return: colored and grayscale versions of eye frames """ leftEye = None rightEye = None leftEyeG = None rightEyeG = None coords = (img_gray, 1.3, 5) if coords is None or len(coords) == 0: pass else: for (x, y, w, h) in coords: eyecenter = int(float(x) + (float(w) / float(2))) if lest[0] < eyecenter and eyecenter < lest[1]: leftEye = img[y:y + h, x:x + w] leftEyeG = img_gray[y:y + h, x:x + w] leftEye, leftEyeG = cut_eyebrows(leftEye, leftEyeG) elif rest[0] < eyecenter and eyecenter < rest[1]: rightEye = img[y:y + h, x:x + w] rightEyeG = img_gray[y:y + h, x:x + w] rightEye, rightEye = cut_eyebrows(rightEye, rightEyeG) else: pass # nostril return leftEye, rightEye, leftEyeG, rightEyeG def process_eye(img, threshold, detector, prevArea=None): """ :param img: eye frame :param threshold: threshold value for threshold function :param detector: blob detector :param prevArea: area of the previous keypoint(used for filtering) :return: keypoints """ _, img = (img, threshold, 255, cv2.THRESH_BINARY) img = (img, None, iterations=2) img = (img, None, iterations=4) img = (img, 5) keypoints = (img) if keypoints and prevArea and len(keypoints) > 1: tmp = 1000 for keypoint in keypoints: # filter out odd blobs if abs( - prevArea) < tmp: ans = keypoint tmp = abs( - prevArea) keypoints = (ans) return keypoints def cut_eyebrows(img, imgG): height, width = [:2] img = img[15:height, 0:width] # cut eyebrows out (15 px) imgG = imgG[15:height, 0:width] return img, imgG def draw_blobs(img, keypoints): """Draws blobs""" (img, keypoints, img, (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
This is the whole content of this article.