|
|
|
@ -1,12 +1,24 @@
|
|
|
|
|
#This is a proof of concept for motion tracking of the vernier in very early stages
|
|
|
|
|
# TODO: stabilize the tracker and connect the plumbing via OSC to the SuperCollider app
|
|
|
|
|
# and get the stream to feed to the Open Stage Control GUI for calibration
|
|
|
|
|
|
|
|
|
|
import cv2
|
|
|
|
|
import sys
|
|
|
|
|
from pythonosc.udp_client import SimpleUDPClient
|
|
|
|
|
from flask import Flask, render_template, Response
|
|
|
|
|
import threading
|
|
|
|
|
import argparse
|
|
|
|
|
|
|
|
|
|
outputFrame = None
|
|
|
|
|
lock = threading.Lock()
|
|
|
|
|
|
|
|
|
|
app = Flask(__name__)
|
|
|
|
|
|
|
|
|
|
ip = "127.0.0.1"
|
|
|
|
|
port = 57120
|
|
|
|
|
|
|
|
|
|
client = SimpleUDPClient(ip, port) # Create client
|
|
|
|
|
|
|
|
|
|
# Read video (eventually will be the live capture from the camera)
|
|
|
|
|
video = cv2.VideoCapture("/home/mwinter/Sketches/a_history_of_the_domino_problem/recs/a_history_of_the_domino_problem_final_documentation_hq.mp4")
|
|
|
|
|
video = cv2.VideoCapture("/home/mwinter/Portfolio/a_history_of_the_domino_problem/a_history_of_the_domino_problem/recs/a_history_of_the_domino_problem_final_documentation_hq.mp4")
|
|
|
|
|
|
|
|
|
|
# Exit if video not opened.
|
|
|
|
|
if not video.isOpened():
|
|
|
|
@ -15,85 +27,136 @@ if not video.isOpened():
|
|
|
|
|
|
|
|
|
|
# Read first frame.
|
|
|
|
|
video.set(cv2.CAP_PROP_POS_FRAMES, 5000)
|
|
|
|
|
ok, frame = video.read()
|
|
|
|
|
ok, initFrame = video.read()
|
|
|
|
|
if not ok:
|
|
|
|
|
print('Cannot read video file')
|
|
|
|
|
sys.exit()
|
|
|
|
|
|
|
|
|
|
# Define an initial bounding box
|
|
|
|
|
#bbox = (287, 23, 86, 320)
|
|
|
|
|
|
|
|
|
|
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
|
|
|
|
|
#frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
|
|
|
|
|
#frame = cv2.GaussianBlur(frame,(5,5),cv2.BORDER_DEFAULT)
|
|
|
|
|
r1 = cv2.selectROI('Tracking', frame)
|
|
|
|
|
r2 = cv2.selectROI('Tracking', frame)
|
|
|
|
|
#r = (606, 448, 35, 177);
|
|
|
|
|
#cv2.destroyWindow('select')
|
|
|
|
|
#print(r)
|
|
|
|
|
crop1 = frame[int(r1[1]):int(r1[1]+r1[3]), int(r1[0]):int(r1[0]+r1[2])]
|
|
|
|
|
crop2 = frame[int(r2[1]):int(r2[1]+r2[3]), int(r2[0]):int(r2[0]+r2[2])]
|
|
|
|
|
|
|
|
|
|
# all this for selecting ROI
|
|
|
|
|
#xROI = cv2.selectROI('Tracking', initFrame)
|
|
|
|
|
#yROI = cv2.selectROI('Tracking', initFrame)
|
|
|
|
|
#print(xROI)
|
|
|
|
|
#print(yROI)
|
|
|
|
|
#xFine = (xROI[0], xROI[1], xROI[2], xROI[3] / 2)
|
|
|
|
|
#xCourse = (xROI[0], xROI[1] + (xROI[3] / 2), xROI[2], xROI[3] / 2)
|
|
|
|
|
#yFine = (yROI[0], yROI[1], yROI[2] / 2, yROI[3])
|
|
|
|
|
#yCourse = (yROI[0] + (yROI[2] / 2), yROI[1], yROI[2] / 2, yROI[3])
|
|
|
|
|
#print(xFine)
|
|
|
|
|
#print(yFine)
|
|
|
|
|
|
|
|
|
|
xFine = (848, 187, 225, 21.0)
|
|
|
|
|
yFine = (604, 402, 20.5, 276)
|
|
|
|
|
|
|
|
|
|
while True:
|
|
|
|
|
# Read a new frame
|
|
|
|
|
ok, frame = video.read()
|
|
|
|
|
if not ok:
|
|
|
|
|
break
|
|
|
|
|
frameCountMod = 0
|
|
|
|
|
centroidX = [0, 0]
|
|
|
|
|
centroidY = [0, 0]
|
|
|
|
|
|
|
|
|
|
def track(frame, ROI, centroid, update):
|
|
|
|
|
if(update):
|
|
|
|
|
crop = frame[int(ROI[1]):int(ROI[1]+ROI[3]), int(ROI[0]):int(ROI[0]+ROI[2])]
|
|
|
|
|
crop = cv2.cvtColor(crop, cv2.COLOR_RGB2GRAY)
|
|
|
|
|
crop = cv2.GaussianBlur(crop,(7,7),cv2.BORDER_DEFAULT)
|
|
|
|
|
|
|
|
|
|
#ret, thresh = cv2.threshold(crop, 100, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
|
|
|
|
|
ret,thresh = cv2.threshold(crop, 50, 255, 0)
|
|
|
|
|
M = cv2.moments(thresh)
|
|
|
|
|
|
|
|
|
|
crop1 = frame[int(r1[1]):int(r1[1]+r1[3]), int(r1[0]):int(r1[0]+r1[2])]
|
|
|
|
|
crop1 = cv2.cvtColor(crop1, cv2.COLOR_RGB2GRAY)
|
|
|
|
|
crop1 = cv2.GaussianBlur(crop1,(5,5),cv2.BORDER_DEFAULT)
|
|
|
|
|
|
|
|
|
|
crop2 = frame[int(r2[1]):int(r2[1]+r2[3]), int(r2[0]):int(r2[0]+r2[2])]
|
|
|
|
|
crop2 = cv2.cvtColor(crop2, cv2.COLOR_RGB2GRAY)
|
|
|
|
|
crop2 = cv2.GaussianBlur(crop2,(5,5),cv2.BORDER_DEFAULT)
|
|
|
|
|
|
|
|
|
|
ret1, thresh1 = cv2.threshold(crop1, 230, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
|
|
|
|
|
cnts1 = cv2.findContours(thresh1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
|
|
|
cnts1 = cnts1[1]
|
|
|
|
|
|
|
|
|
|
ret2, thresh2 = cv2.threshold(crop2, 230, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
|
|
|
|
|
cnts2 = cv2.findContours(thresh2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
|
|
|
cnts2 = cnts2[1]
|
|
|
|
|
|
|
|
|
|
center = None
|
|
|
|
|
|
|
|
|
|
for c in cnts1[0:2]:
|
|
|
|
|
# calculate moments for each contour
|
|
|
|
|
M = cv2.moments(c)
|
|
|
|
|
# calculate x,y coordinate of center
|
|
|
|
|
if M["m00"] != 0:
|
|
|
|
|
cX = int(M["m10"] / M["m00"])
|
|
|
|
|
cY = int(M["m01"] / M["m00"])
|
|
|
|
|
centroid[0] = int(M["m10"] / M["m00"])
|
|
|
|
|
centroid[1] = int(M["m01"] / M["m00"])
|
|
|
|
|
#else:
|
|
|
|
|
# cX, cY = 0, 0
|
|
|
|
|
#print(cY)
|
|
|
|
|
cv2.circle(frame, (int(r1[0]) + cX, int(r1[1]) + cY), 5, (255, 255, 255), -1)
|
|
|
|
|
cv2.circle(frame, (int(ROI[0]) + centroid[0], int(ROI[1]) + centroid[1]), 5, (255, 255, 255), -1)
|
|
|
|
|
|
|
|
|
|
def detect_motion():
|
|
|
|
|
# grab global references to the video stream, output frame, and
|
|
|
|
|
# lock variables
|
|
|
|
|
global vs, outputFrame, lock
|
|
|
|
|
|
|
|
|
|
frameCountMod = 0
|
|
|
|
|
centroidX = [0, 0]
|
|
|
|
|
centroidY = [0, 0]
|
|
|
|
|
"""Video streaming generator function."""
|
|
|
|
|
while True:
|
|
|
|
|
# Read a new frame
|
|
|
|
|
ok, frame = video.read()
|
|
|
|
|
if not ok:
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
if(frameCountMod == 0):
|
|
|
|
|
track(frame, xFine, centroidX, True)
|
|
|
|
|
track(frame, yFine, centroidY, True)
|
|
|
|
|
xPos = (centroidX[0] / xFine[2]) * 2 - 1
|
|
|
|
|
yPos = (centroidY[1] / yFine[3]) * 2 - 1
|
|
|
|
|
client.send_message("/trackerpos", [xPos, yPos])
|
|
|
|
|
else:
|
|
|
|
|
track(frame, xFine, centroidX, False)
|
|
|
|
|
track(frame, yFine, centroidY, False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
frameCountMod = (frameCountMod + 1) % 10
|
|
|
|
|
|
|
|
|
|
cv2.rectangle(frame, (int(xFine[0]), int(xFine[1])), (int(xFine[0]+int(xFine[2])),int(xFine[1]+xFine[3])), (255, 255, 255), 5)
|
|
|
|
|
cv2.rectangle(frame, (int(yFine[0]), int(yFine[1])), (int(yFine[0]+int(yFine[2])),int(yFine[1]+yFine[3])), (255, 255, 255), 5)
|
|
|
|
|
|
|
|
|
|
# Display result
|
|
|
|
|
#cv2.imshow("Tracking", frame)
|
|
|
|
|
#cv2.imshow("Crop", crop)
|
|
|
|
|
|
|
|
|
|
# only proceed if at least one contour was found
|
|
|
|
|
if len(cnts2) > 0:
|
|
|
|
|
# find the largest contour in the mask, then use
|
|
|
|
|
# it to compute the minimum enclosing circle and
|
|
|
|
|
# centroid
|
|
|
|
|
c = max(cnts2, key=cv2.contourArea)
|
|
|
|
|
((x, y), radius) = cv2.minEnclosingCircle(c)
|
|
|
|
|
M = cv2.moments(c)
|
|
|
|
|
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
|
|
|
|
|
|
|
|
|
|
# only proceed if the radius meets a minimum size
|
|
|
|
|
if radius > 5:
|
|
|
|
|
# draw the circle and centroid on the frame,
|
|
|
|
|
# then update the list of tracked points
|
|
|
|
|
cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
|
|
|
|
|
cv2.circle(frame, center, 5, (0, 0, 255), -1)
|
|
|
|
|
with lock:
|
|
|
|
|
outputFrame = frame.copy()
|
|
|
|
|
|
|
|
|
|
# Exit if ESC pressed
|
|
|
|
|
#k = cv2.waitKey(1) & 0xff
|
|
|
|
|
#if k == 27 :
|
|
|
|
|
# cv2.destroyWindow('Tracking')
|
|
|
|
|
# break
|
|
|
|
|
|
|
|
|
|
# Display result
|
|
|
|
|
cv2.imshow("Tracking", frame)
|
|
|
|
|
#cv2.imshow("Crop", crop)
|
|
|
|
|
|
|
|
|
|
# Exit if ESC pressed
|
|
|
|
|
k = cv2.waitKey(1) & 0xff
|
|
|
|
|
if k == 27 :
|
|
|
|
|
cv2.destroyWindow('Tracking')
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
@app.route('/')
|
|
|
|
|
def index():
|
|
|
|
|
"""Video streaming home page."""
|
|
|
|
|
return render_template('index.html')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate():
|
|
|
|
|
# grab global references to the output frame and lock variables
|
|
|
|
|
global outputFrame, lock
|
|
|
|
|
|
|
|
|
|
# loop over frames from the output stream
|
|
|
|
|
while True:
|
|
|
|
|
# wait until the lock is acquired
|
|
|
|
|
with lock:
|
|
|
|
|
# check if the output frame is available, otherwise skip
|
|
|
|
|
# the iteration of the loop
|
|
|
|
|
if outputFrame is None:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# encode the frame in JPEG format
|
|
|
|
|
(flag, encodedImage) = cv2.imencode(".jpg", outputFrame)
|
|
|
|
|
|
|
|
|
|
# ensure the frame was successfully encoded
|
|
|
|
|
if not flag:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# yield the output frame in the byte format
|
|
|
|
|
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
|
|
|
|
|
bytearray(encodedImage) + b'\r\n')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.route('/video_feed')
|
|
|
|
|
def video_feed():
|
|
|
|
|
"""Video streaming route. Put this in the src attribute of an img tag."""
|
|
|
|
|
return Response(generate(),mimetype='multipart/x-mixed-replace; boundary=frame')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
t = threading.Thread(target=detect_motion)
|
|
|
|
|
t.daemon = True
|
|
|
|
|
t.start()
|
|
|
|
|
app.run(host='10.0.0.5', threaded=True)
|
|
|
|
|