我正在做一个基于运动检测程序的项目。
它将背景中的变化检测为“运动”,因此我想要一种每隔几分钟重新捕获新的第一帧以替换当前帧以解决此问题的方法。
我使用的是 Raspberry Pi 2B 和罗技网络摄像头。
我使用的代码基于Pyimagesearch。
这是我的代码版本:
import sys
sys.path.append('/usr/local/lib/python3.4/site-packages')
import numpy as np
import cv2
import imutils
from imutils import contours
from skimage import measure
import datetime
import time
#capture feed from webcam
cap = cv2.VideoCapture(0)
#intialize first frame of video stream
firstframe = None
avg = None
#loop frames of video/stream
while (cap.isOpened()):
#grab the current frame and initialize the occupied and unoccupied
(grabbed, frame) = cap.read()
text = "Unoccupied"
#if the frame could not be grabbed, then we have reached the end of the vid
if not grabbed:
break
#resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21,21), 0)
gray = cv2.medianBlur(gray, 5)
#if first frame is None, initialize it
if firstframe is None:
firstframe = gray
continue
if avg is None:
print ("[INFO] starting background model...")
avg = gray.copy().astype("float")
cap
continue
#compute the absolute difference between the current frame and the firstframe
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(firstframe, gray)
thresh = cv2.threshold(frameDelta, 70, 255, cv2.THRESH_BINARY)[1]
#perform a series of erosions and dilations to remove any small
# blobs of noise from the thresholded image
# then from the threshold image find contours
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=4)
(_,cnts,hierarchy) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#loop over the contours
for (i,c) in enumerate(cnts):
#if the contour is too small, ignore it
if cv2.contourArea(c) < 500:
continue
#compute the bounding box for the contour, draw it on frame, and update the text
#And print the time of object detection
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 2)
cv2.putText(frame, "#{}".format(i + 1), (x, y - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
text = "Occupied"
print("Object detected at " + time.strftime("%I:%M:%S"))
#draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}". format(text), (10,20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,(0,0,255), 1)
#show the frame and record if the user presses a key
cv2.imshow('frame',frame)
cv2.imshow('Thresh',thresh)
cv2.imshow('FrameDelta',frameDelta)
#if 'q' is pressed break from loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#cleanup the capture and close any open windows
cap.release()
cv2.destroyAllWindows()