From 4a31c10194955ec4c4d8ed98afbe5f5ecd74d801 Mon Sep 17 00:00:00 2001 From: Cyborg Girl Date: Tue, 17 Oct 2017 15:36:46 -0500 Subject: [PATCH] Motion detection using raspberry pi --- basicmotiondetector.py | 55 +++++++++++++++++++++++ basicmotiondetector.pyc | Bin 0 -> 1551 bytes multi_cam_motion.py | 95 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 150 insertions(+) create mode 100644 basicmotiondetector.py create mode 100644 basicmotiondetector.pyc create mode 100644 multi_cam_motion.py diff --git a/basicmotiondetector.py b/basicmotiondetector.py new file mode 100644 index 0000000..80a8c40 --- /dev/null +++ b/basicmotiondetector.py @@ -0,0 +1,55 @@ +# import the necessary packages +import imutils +import cv2 + +class BasicMotionDetector: + def __init__(self, accumWeight=0.5, deltaThresh=5, minArea=5000): + # determine the OpenCV version, followed by storing the + # the frame accumulation weight, the fixed threshold for + # the delta image, and finally the minimum area required + # for "motion" to be reported + self.isv2 = imutils.is_cv2() + self.accumWeight = accumWeight + self.deltaThresh = deltaThresh + self.minArea = minArea + + # initialize the average image for motion detection + self.avg = None + + def update(self, image): + # initialize the list of locations containing motion + locs = [] + + # if the average image is None, initialize it + if self.avg is None: + self.avg = image.astype("float") + return locs + + # otherwise, accumulate the weighted average between + # the current frame and the previous frames, then compute + # the pixel-wise differences between the current frame + # and running average + cv2.accumulateWeighted(image, self.avg, self.accumWeight) + frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg)) + + # threshold the delta image and apply a series of dilations + # to help fill in holes + thresh = cv2.threshold(frameDelta, self.deltaThresh, 255, + cv2.THRESH_BINARY)[1] + thresh = cv2.dilate(thresh, None, iterations=2) + + # find contours in the thresholded image, taking care to + # use the appropriate version of OpenCV + cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, + cv2.CHAIN_APPROX_SIMPLE) + cnts = cnts[0] if self.isv2 else cnts[1] + + # loop over the contours + for c in cnts: + # only add the contour to the locations list if it + # exceeds the minimum area + if cv2.contourArea(c) > self.minArea: + locs.append(c) + + # return the set of locations + return locs \ No newline at end of file diff --git a/basicmotiondetector.pyc b/basicmotiondetector.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f90fe99422766afe28517e65379db4951ffd5cc GIT binary patch literal 1551 zcmb_c&5qMZ5U!4$%nu0}vK+_(gv2Eh5|KEtS_#31nMgAdgi&Y2&q&Df*quoS#}4k! zz(~nyIP7&_i&x;nJM06n^_3%d0C3!u>guk)ufKNiUu(Bp|MyiwnEgVHC}MRcn`q25I?_ng<53Cs7i;}# zF*p5%?fZ{Kl*DsW#;(7(wDb7fI1`u2w0{Pi=Yn&(!TG`B(xZ(ivbpt9gurf>1=ZVX z2frWzV(1c?1-TpCK0~|~kY;k^@3*PE5Bz3w#8(xNj9HDw+|7)U6cg`e7-x053P@q0 zfdwdAvcNpy^Qh4}5aagd0A@Sj61MHql`yKi4rP9gRy^k#SaQ^w+Sr@0!=WYeIufqy zlA}%K7h3U=HmI9(TGj&U^5UmQm>j%&K81edLL5x6&=kw8@u}OmH2&) zE_;Az}+;9fHU2%`0Dq$5nGrz*@=QRL$D|rLRO! literal 0 HcmV?d00001 diff --git a/multi_cam_motion.py b/multi_cam_motion.py new file mode 100644 index 0000000..1c30107 --- /dev/null +++ b/multi_cam_motion.py @@ -0,0 +1,95 @@ +# USAGE +# python multi_cam_motion.py + +# import the necessary packages +from __future__ import print_function +from pyimagesearch.basicmotiondetector import BasicMotionDetector +from imutils.video import VideoStream +import numpy as np +import datetime +import imutils +import time +import cv2 + +# initialize the video streams and allow them to warmup +print("[INFO] starting cameras...") +webcam = VideoStream(src=0).start() +picam = VideoStream(usePiCamera=True).start() +time.sleep(2.0) + +# initialize the two motion detectors, along with the total +# number of frames read +camMotion = BasicMotionDetector() +piMotion = BasicMotionDetector() +total = 0 + +# loop over frames from the video streams +while True: + # initialize the list of frames that have been processed + frames = [] + + # loop over the frames and their respective motion detectors + for (stream, motion) in zip((webcam, picam), (camMotion, piMotion)): + # read the next frame from the video stream and resize + # it to have a maximum width of 400 pixels + frame = stream.read() + frame = imutils.resize(frame, width=400) + + # convert the frame to grayscale, blur it slightly, update + # the motion detector + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + gray = cv2.GaussianBlur(gray, (21, 21), 0) + locs = motion.update(gray) + + # we should allow the motion detector to "run" for a bit + # and accumulate a set of frames to form a nice average + if total < 32: + frames.append(frame) + continue + + # otherwise, check to see if motion was detected + if len(locs) > 0: + # initialize the minimum and maximum (x, y)-coordinates, + # respectively + (minX, minY) = (np.inf, np.inf) + (maxX, maxY) = (-np.inf, -np.inf) + + # loop over the locations of motion and accumulate the + # minimum and maximum locations of the bounding boxes + for l in locs: + (x, y, w, h) = cv2.boundingRect(l) + (minX, maxX) = (min(minX, x), max(maxX, x + w)) + (minY, maxY) = (min(minY, y), max(maxY, y + h)) + + # draw the bounding box + cv2.rectangle(frame, (minX, minY), (maxX, maxY), + (0, 0, 255), 3) + + # update the frames list + frames.append(frame) + + # increment the total number of frames read and grab the + # current timestamp + total += 1 + timestamp = datetime.datetime.now() + ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") + + # loop over the frames a second time + for (frame, name) in zip(frames, ("Webcam", "Picamera")): + # draw the timestamp on the frame and display it + cv2.putText(frame, ts, (10, frame.shape[0] - 10), + cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) + cv2.imshow(name, frame) + + # check to see if a key was pressed + key = cv2.waitKey(1) & 0xFF + + # if the `q` key was pressed, break from the loop + if key == ord("q"): + break + +# do a bit of cleanup +print("[INFO] cleaning up...") +cv2.destroyAllWindows() +webcam.stop() +picam.stop() \ No newline at end of file