Here you go B
This commit is contained in:
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
.vscode
|
||||||
|
*.tsv
|
||||||
|
__pycache__
|
||||||
14
README.md
14
README.md
@@ -0,0 +1,14 @@
|
|||||||
|
To install dependencies
|
||||||
|
```
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
To process video and generate tsv data
|
||||||
|
```
|
||||||
|
python video.py
|
||||||
|
```
|
||||||
|
|
||||||
|
To process tsv data
|
||||||
|
```
|
||||||
|
python data.py
|
||||||
|
```
|
||||||
58
data.py
Normal file
58
data.py
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from scipy import signal
|
||||||
|
|
||||||
|
from signalprocess import SignalProcess
|
||||||
|
|
||||||
|
# Read data from the video processing pipeline
|
||||||
|
sig = SignalProcess(path="out.tsv")
|
||||||
|
# Uncomment to see fft plot
|
||||||
|
# sig.fft()
|
||||||
|
|
||||||
|
# Applies the butter filter from the Signal Process class
|
||||||
|
data = sig.butter_filter()
|
||||||
|
|
||||||
|
|
||||||
|
plt.plot(sig.times, data)
|
||||||
|
plt.plot(sig.times, sig.ave_counts)
|
||||||
|
# Uncomment to see raw waveform
|
||||||
|
# plt.plot(sig.times,sig.counts)
|
||||||
|
|
||||||
|
# Peak finding algo provided by scipy package
|
||||||
|
peaks, descript = signal.find_peaks((-1) * data, prominence=0.2, distance=30)
|
||||||
|
|
||||||
|
# Plot the calculated peaks
|
||||||
|
plt.scatter(
|
||||||
|
sig.times[peaks],
|
||||||
|
data[peaks],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Array of peak to peak distances
|
||||||
|
distances = []
|
||||||
|
# Array of the peak heights as calculated in the plot
|
||||||
|
heights = []
|
||||||
|
for i in range(len(peaks)):
|
||||||
|
heights.append(data[peaks[i]])
|
||||||
|
|
||||||
|
for i in range(1, len(peaks) - 1):
|
||||||
|
distance = (peaks[i] - peaks[i - 1]) / 30.0
|
||||||
|
distances.append(distance)
|
||||||
|
|
||||||
|
# Basic stats
|
||||||
|
print("distances: median={}, std={}".format(np.median(distances), np.std(distances)))
|
||||||
|
print("heights: median={}, std={}".format(np.median(heights), np.std(heights)))
|
||||||
|
|
||||||
|
# Standard deviation rejection of outliers
|
||||||
|
d = np.abs(heights - np.median(heights))
|
||||||
|
mdev = np.median(d)
|
||||||
|
s = d / mdev if mdev else 0.0
|
||||||
|
peaks = peaks[s < 3.0]
|
||||||
|
|
||||||
|
# plot the non-rejected data on top of the old peak data
|
||||||
|
plt.scatter(
|
||||||
|
sig.times[peaks],
|
||||||
|
data[peaks],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Draw that bitch
|
||||||
|
plt.show()
|
||||||
4
requirements.txt
Normal file
4
requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
opencv-python
|
||||||
|
numpy
|
||||||
|
matplotlib
|
||||||
|
scipy
|
||||||
44
signalprocess.py
Normal file
44
signalprocess.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
import numpy as np
|
||||||
|
from scipy import signal
|
||||||
|
from numpy.fft import fft, ifft
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
|
||||||
|
# Filtering things I have mainly stolen from the internet. I reallt need to learn more so I can do something compitent.
|
||||||
|
class SignalProcess:
|
||||||
|
def __init__(self, path):
|
||||||
|
self.data = np.loadtxt(path, delimiter="\t", dtype=np.float64)
|
||||||
|
self.times = self.data[:, 0]
|
||||||
|
self.counts = self.data[:, 1]
|
||||||
|
self.ave_counts = self.data[:, 2]
|
||||||
|
|
||||||
|
def band_pass(self):
|
||||||
|
fs = 30.0
|
||||||
|
lowcut = 1.5 * 1.0 / 30.0
|
||||||
|
highcut = 2.5 * 1.0 / 30.0
|
||||||
|
nyqs = 0.5 * fs
|
||||||
|
low = lowcut / nyqs
|
||||||
|
high = highcut / nyqs
|
||||||
|
b, a = signal.butter(2, [low, high], "bandpass", analog=False)
|
||||||
|
filtered_counts = signal.filtfilt(b, a, self.ave_counts, axis=0, padlen=150)
|
||||||
|
return filtered_counts
|
||||||
|
|
||||||
|
def butter_filter(self):
|
||||||
|
|
||||||
|
b, a = signal.butter(2, 0.02)
|
||||||
|
filtered_counts = signal.filtfilt(b, a, self.ave_counts, padlen=150)
|
||||||
|
return filtered_counts
|
||||||
|
|
||||||
|
def fft(self):
|
||||||
|
sr = 30
|
||||||
|
ts = 1.0 / sr
|
||||||
|
X = fft(self.counts)
|
||||||
|
N = len(X)
|
||||||
|
n = np.arange(N)
|
||||||
|
T = N / sr
|
||||||
|
freq = n / T
|
||||||
|
plt.stem(freq, np.abs(X), "b", markerfmt=" ", basefmt="-b")
|
||||||
|
plt.xlabel("Freq (Hz)")
|
||||||
|
plt.ylabel("FFT Amplitude |X(freq)|")
|
||||||
|
plt.xlim(0, 10)
|
||||||
|
plt.show()
|
||||||
BIN
squat_cut_7.mp4
Normal file
BIN
squat_cut_7.mp4
Normal file
Binary file not shown.
14
video.py
Normal file
14
video.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
from videoprocess import VideoProcess
|
||||||
|
|
||||||
|
# Process video using video process class
|
||||||
|
proc = VideoProcess(path="squat_cut_7.mp4")
|
||||||
|
|
||||||
|
# Output tsv for later processing
|
||||||
|
out = open("out.tsv", "w")
|
||||||
|
result = True
|
||||||
|
while result is True:
|
||||||
|
result = proc.processFrame()
|
||||||
|
|
||||||
|
for i in range(len(proc.counts)):
|
||||||
|
out.write("{}\t{}\t{}\n".format(proc.times[i], proc.counts[i], proc.aveCounts[i]))
|
||||||
|
out.close()
|
||||||
104
videoprocess.py
Normal file
104
videoprocess.py
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
|
||||||
|
class VideoProcess:
|
||||||
|
def __init__(self, path):
|
||||||
|
# Used for running average calculation
|
||||||
|
self.alpha = 0.9
|
||||||
|
# Unused for now. Working on ideas for other potential processing algos
|
||||||
|
self.bucketSize = 2
|
||||||
|
# Unused for now. Working on ideas for other potential processing algos
|
||||||
|
self.frames = []
|
||||||
|
# Frame counter
|
||||||
|
self.frame = 0
|
||||||
|
# Opencv Capture object
|
||||||
|
self.cap = cv2.VideoCapture(path)
|
||||||
|
# Amount to reduce the video res by
|
||||||
|
self.scalingFactor = 4
|
||||||
|
# Rolling average of the frame to smooth out comparison and random passers by
|
||||||
|
self.frameAverage = None
|
||||||
|
# Array of the counts of diff pixels which represents motion
|
||||||
|
self.counts = []
|
||||||
|
# Rolling average of the counts
|
||||||
|
self.aveCount = 0.0
|
||||||
|
# Used to determine rolling average size
|
||||||
|
self.aveCountAlpha = 1.0 - 1.0 / 10.0
|
||||||
|
# place to store rolling average
|
||||||
|
self.aveCounts = []
|
||||||
|
# Place to store calculated times given the FPS
|
||||||
|
self.times = []
|
||||||
|
# the FPS of the camera and thus the signal rate for the sampler
|
||||||
|
self.FPS = 30.0
|
||||||
|
|
||||||
|
# Single step in the process getting a motion count
|
||||||
|
def processFrame(self):
|
||||||
|
# Making sure frames left in video
|
||||||
|
if self.cap.isOpened():
|
||||||
|
# Read a frame
|
||||||
|
ret, src = self.cap.read()
|
||||||
|
# Bails if no frames
|
||||||
|
if not ret:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Get the height and width of the video
|
||||||
|
h, w, c = src.shape
|
||||||
|
# Scale
|
||||||
|
src = cv2.resize(
|
||||||
|
src, (int(w / self.scalingFactor), int(h / self.scalingFactor))
|
||||||
|
)
|
||||||
|
# Convert to gray
|
||||||
|
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
|
||||||
|
# Populate the frame average with the first frame
|
||||||
|
if self.frameAverage is None:
|
||||||
|
self.frameAverage = gray
|
||||||
|
|
||||||
|
# Wait till there are at least a few frames to compare
|
||||||
|
if len(self.frames) < self.bucketSize:
|
||||||
|
self.frames.append(gray)
|
||||||
|
else:
|
||||||
|
self.frames.append(gray)
|
||||||
|
self.frames.pop(0)
|
||||||
|
# Rolling average calculation
|
||||||
|
self.frameAverage = self.frameAverage * self.alpha + self.frames[0] * (
|
||||||
|
1.0 - self.alpha
|
||||||
|
)
|
||||||
|
self.frameAverage = np.uint8(self.frameAverage)
|
||||||
|
# Calculate the absolute difference between a current frame and the rolling average
|
||||||
|
# This is the magic
|
||||||
|
diff = cv2.absdiff(gray, self.frameAverage)
|
||||||
|
# Normalize the count to the maximum of pixel value
|
||||||
|
count = np.sum(diff) / (w * h * 255 / self.scalingFactor) * 100.0
|
||||||
|
|
||||||
|
# Show the result
|
||||||
|
cv2.imshow("src", src)
|
||||||
|
cv2.imshow("diff", diff)
|
||||||
|
# Unused live charting the results
|
||||||
|
# if self.frame % 10 ==0:
|
||||||
|
# self.drawPlot()
|
||||||
|
|
||||||
|
# Capture the rolling average, non-average count, and timestamps for analysis.
|
||||||
|
self.counts.append(count)
|
||||||
|
self.aveCount = self.aveCount * self.aveCountAlpha + count * (
|
||||||
|
1.0 - self.aveCountAlpha
|
||||||
|
)
|
||||||
|
self.aveCounts.append(self.aveCount)
|
||||||
|
self.times.append(self.frame / self.FPS)
|
||||||
|
|
||||||
|
# Loop control
|
||||||
|
key = cv2.waitKey(1)
|
||||||
|
if key == 27:
|
||||||
|
return False
|
||||||
|
# Iterate frame count
|
||||||
|
self.frame += 1
|
||||||
|
else:
|
||||||
|
plt.show(block=True)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def drawPlot(self):
|
||||||
|
plt.cla()
|
||||||
|
plt.plot(self.times, self.counts)
|
||||||
|
plt.plot(self.times, self.aveCounts)
|
||||||
|
plt.pause(0.0001)
|
||||||
Reference in New Issue
Block a user