diff --git a/arduino/ws2812_controller/ws2812_controller.ino b/arduino/ws2812_controller/ws2812_controller.ino index b4e4caa..925f34b 100644 --- a/arduino/ws2812_controller/ws2812_controller.ino +++ b/arduino/ws2812_controller/ws2812_controller.ino @@ -9,7 +9,8 @@ #define BUFFER_LEN 1024 // Wifi and socket settings -const char* ssid = "LAWSON-LINK-2.4"; +//const char* ssid = "LAWSON-LINK-2.4"; +const char* ssid = "led_strip"; const char* password = "felixlina10"; unsigned int localPort = 7777; char packetBuffer[BUFFER_LEN]; @@ -19,11 +20,16 @@ static WS2812 ledstrip; static Pixel_t pixels[NUM_LEDS]; WiFiUDP port; +// Network information +IPAddress ip(192, 168, 1, 150); +IPAddress gateway(192, 168, 1, 1); +IPAddress subnet(255, 255, 255, 0); + void setup() { Serial.begin(115200); + WiFi.config(ip, gateway, subnet); WiFi.begin(ssid, password); Serial.println(""); - // Connect to wifi and print the IP address over serial while (WiFi.status() != WL_CONNECTED) { delay(500); @@ -59,4 +65,4 @@ void loop() { // Always update strip to improve temporal dithering performance ledstrip.show(pixels); -} \ No newline at end of file +} diff --git a/python/config.py b/python/config.py index 0ce0242..090a0a4 100644 --- a/python/config.py +++ b/python/config.py @@ -1,13 +1,16 @@ """Settings for audio reactive LED strip""" +from __future__ import print_function +from __future__ import division import os -N_PIXELS = 240 +N_PIXELS = 60 """Number of pixels in the LED strip (must match ESP8266 firmware)""" GAMMA_TABLE_PATH = os.path.join(os.path.dirname(__file__), 'gamma_table.npy') """Location of the gamma correction table""" -UDP_IP = '192.168.0.100' +UDP_IP = '192.168.0.101' +#UDP_IP = '192.168.137.28' """IP address of the ESP8266""" UDP_PORT = 7777 @@ -16,7 +19,7 @@ UDP_PORT = 7777 MIC_RATE = 44100 """Sampling frequency of the microphone in Hz""" -FPS = 66 +FPS = 50 """Desired LED strip update rate in frames (updates) per second This is the desired update rate of the LED strip. The actual refresh rate of @@ -28,33 +31,33 @@ the duration of the short-time Fourier transform. This can negatively affect low frequency (bass) response. """ -ENERGY_THRESHOLD = 5.5 +ENERGY_THRESHOLD = 14.0 """Energy threshold for determining whether a beat has been detected One aspect of beat detection is comparing the current energy of a frequency -subband to the average energy of the subband over some time interval. Beats +subband to the average energy of the subband over some time interval. Beats are often associated with large spikes in energy relative to the recent average energy. ENERGY_THRESHOLD is the threshold used to determine if the energy spike is sufficiently large to be considered a beat. -For example, if ENERGY_THRESHOLD = 2, then a beat is detected if the current +For example, if ENERGY_THRESHOLD = 2, then a beat is detected if the current frequency subband energy is more than 2 times the recent average energy. """ -VARIANCE_THRESHOLD = 10.0 +VARIANCE_THRESHOLD = 0.0 """Variance threshold for determining whether a beat has been detected Beat detection is largely determined by the ENERGY_THRESHOLD, but we can also require frequency bands to have a certain minimum variance over some past -time interval before a beat can be detected. +time interval before a beat can be detected. One downside to using a variance threshold is that it is an absolute threshold which is affected by the current volume. """ -N_SUBBANDS = 128 +N_SUBBANDS = 40 # 240 #48 """Number of frequency bins to use for beat detection More subbands improve beat detection sensitivity but it may become more @@ -64,7 +67,7 @@ Fewer subbands reduces signal processing time at the expense of beat detection sensitivity. """ -N_HISTORY = int(1.2 * FPS) +N_HISTORY = int(0.8 * FPS) """Number of previous samples to consider when doing beat detection Beats are detected by comparing the most recent audio recording to a collection @@ -75,10 +78,18 @@ For example, setting N_HISTORY = int(1.0 * config.FPS) means that one second of previous audio recordings will be used for beat detection. Smaller values reduces signal processing time but values too small may reduce -beat detection accuracy. Larger values increase signal processing time and -values too large can also reduce beat detection accuracy. Roughly one second +beat detection accuracy. Larger values increase signal processing time and +values too large can also reduce beat detection accuracy. Roughly one second of previous data tends to work well. """ GAMMA_CORRECTION = True -"""Whether to correct LED brightness for nonlinear brightness perception""" \ No newline at end of file +"""Whether to correct LED brightness for nonlinear brightness perception""" + + +N_CURVES = 4 +"""Number of curves to plot in the visualization window""" + + +N_ROLLING_HISTORY = 2 +"""Number of past audio frames to include in the rolling window""" \ No newline at end of file diff --git a/python/dsp.py b/python/dsp.py index de79a41..efe051e 100644 --- a/python/dsp.py +++ b/python/dsp.py @@ -1,19 +1,58 @@ from __future__ import print_function -from __future__ import division +#from __future__ import division import numpy as np from scipy.interpolate import interp1d -import matplotlib -matplotlib.use('TkAgg') -import matplotlib.pylab as plt -plt.style.use('lawson') -import microphone as mic +import scipy.fftpack import config + +class ExponentialFilter: + """Simple exponential smoothing filter""" + def __init__(self, val=0.0, alpha_decay=0.5, alpha_rise=0.5): + """Small rise / decay factors = more smoothing""" + assert 0.0 < alpha_decay < 1.0, 'Invalid decay smoothing factor' + assert 0.0 < alpha_rise < 1.0, 'Invalid rise smoothing factor' + self.alpha_decay = alpha_decay + self.alpha_rise = alpha_rise + self.value = val + + def update(self, value): + if not isinstance(self.value, (int, long, float)): + alpha = value - self.value + alpha[alpha > 0.0] = self.alpha_rise + alpha[alpha <= 0.0] = self.alpha_decay + else: + alpha = self.alpha_rise if value > self.value else self.alpha_decay + self.value = alpha * value + (1.0 - alpha) * self.value + return self.value + + # FFT statistics for a few previous updates -_ys_historical_energy = np.zeros(shape=(config.N_SUBBANDS, config.N_HISTORY)) +_ys_historical_energy = np.tile(1.0, (config.N_SUBBANDS, config.N_HISTORY)) def beat_detect(ys): + """Detect beats using an energy and variance theshold + + Parameters + ---------- + ys : numpy.array + Array containing the magnitudes for each frequency bin of the + fast fourier transformed audio data. + + Returns + ------- + has_beat : numpy.array + Array of booleans indicating a beat was detected in each of the + frequency bins of ys. + current_energy / mean_energy : numpy.array + Array containing the ratios of the energies relative to the + historical average energy for each of the frequency bins. The energies + are calculated as the square of the real FFT magnitudes + ys_variance : numpy.array + The historical variance of the energies associated with each frequency + bin in ys. + """ global _ys_historical_energy # Beat energy criterion current_energy = ys * ys @@ -26,29 +65,126 @@ def beat_detect(ys): has_beat_variance = ys_variance > config.VARIANCE_THRESHOLD # Combined energy + variance detection has_beat = has_beat_energy * has_beat_variance - return has_beat + return has_beat, current_energy / mean_energy, ys_variance -def fft(data): - """Returns |fft(data)|""" - yL, yR = np.split(np.abs(np.fft.fft(data)), 2) - ys = np.add(yL, yR[::-1]) - xs = np.arange(int(config.MIC_RATE / config.FPS) / 2, dtype=float) - xs *= float(config.MIC_RATE) / int(config.MIC_RATE / config.FPS) +def wrap_phase(phase): + """Converts phases in the range [0, 2 pi] to [-pi, pi]""" + return (phase + np.pi) % (2 * np.pi) - np.pi + + +ys_prev = None +phase_prev = None +dphase_prev = None + + +def onset(yt): + """Detects onsets in the given audio time series data + + Onset detection is perfomed using an ensemble of three onset detection + functions. + + The first onset detection function uses the rectified spectral flux (SF) + of successive FFT data frames. + The second onset detection function uses the normalized weighted phase + difference (NWPD) of successive FFT data frames. + The third is a rectified complex domain onset detection function. + + The product of these three functions forms an ensemble onset detection + function that returns continuous valued onset detection estimates. + + Parameters + ---------- + yt : numpy.array + Array of time series data to perform onset detection on + + Returns + ------- + SF : numpy.array + Array of rectified spectral flux values + NWPD : numpy.array + Array of normalized weighted phase difference values + RCD : numpy.array + Array of rectified complex domain values + + References + ---------- + Dixon, Simon "Onset Detection Revisted" + """ + global ys_prev, phase_prev, dphase_prev + xs, ys = fft_log_partition(yt, + subbands=config.N_SUBBANDS, + window=np.hamming, + fmin=1, + fmax=14000) + #ys = music_fft(yt) + magnitude = np.abs(ys) + phase = wrap_phase(np.angle(ys)) + # Special case for initialization + if ys_prev is None: + ys_prev = ys + phase_prev = phase + dphase_prev = phase + # Rectified spectral flux + SF = np.abs(ys) - np.abs(ys_prev) + SF[SF < 0.0] = 0.0 + # First difference of phase + dphase = wrap_phase(phase - phase_prev) + # Second difference of phase + ddphase = wrap_phase(dphase - dphase_prev) + # Normalized weighted phase deviation + NWPD = np.abs(ddphase * magnitude) / magnitude + # Rectified complex domain onset detection function + RCD = np.abs(ys - ys_prev * dphase_prev) + RCD[RCD < 0.0] = 0.0 + RCD = RCD + # Update previous values + ys_prev = ys + phase_prev = phase + dphase_prev = dphase + # Replace NaN values with zero + SF = np.nan_to_num(SF) + NWPD = np.nan_to_num(NWPD) + RCD = np.nan_to_num(RCD) + return SF, NWPD, RCD + + +def rfft(data, window=None): + if window is None: + window = 1.0 + else: + window = window(len(data)) + ys = np.abs(np.fft.rfft(data*window)) + xs = np.fft.rfftfreq(len(data), 1.0 / config.MIC_RATE) return xs, ys -# def fft(data): -# """Returns |fft(data)|""" -# yL, yR = np.split(np.abs(np.fft.fft(data)), 2) -# ys = np.add(yL, yR[::-1]) -# xs = np.arange(mic.CHUNK / 2, dtype=float) * float(mic.RATE) / mic.CHUNK -# return xs, ys - - -def fft_log_partition(data, fmin=30, fmax=20000, subbands=64): +def rfft_log_partition(data, fmin=30, fmax=20000, subbands=64, window=None): """Returns FFT partitioned into subbands that are logarithmically spaced""" - xs, ys = fft(data) + xs, ys = rfft(data, window=window) + xs_log = np.logspace(np.log10(fmin), np.log10(fmax), num=subbands * 32) + f = interp1d(xs, ys) + ys_log = f(xs_log) + X, Y = [], [] + for i in range(0, subbands * 32, 32): + X.append(np.mean(xs_log[i:i + 32])) + Y.append(np.mean(ys_log[i:i + 32])) + return np.array(X), np.array(Y) + + +def fft(data, window=None): + if window is None: + window = 1.0 + else: + window = window(len(data)) + ys = np.fft.fft(data*window) + xs = np.fft.fftfreq(len(data), 1.0 / config.MIC_RATE) + return xs, ys + + +def fft_log_partition(data, fmin=30, fmax=20000, subbands=64, window=None): + """Returns FFT partitioned into subbands that are logarithmically spaced""" + xs, ys = fft(data, window=window) xs_log = np.logspace(np.log10(fmin), np.log10(fmax), num=subbands * 32) f = interp1d(xs, ys) ys_log = f(xs_log) diff --git a/python/led.py b/python/led.py index b38128a..9baf7fd 100644 --- a/python/led.py +++ b/python/led.py @@ -1,5 +1,4 @@ from __future__ import print_function -import time import socket import numpy as np import config @@ -8,7 +7,7 @@ _sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) _gamma = np.load('gamma_table.npy') _prev_pixels = np.tile(0, (config.N_PIXELS, 3)) -pixels = np.tile(0, (config.N_PIXELS, 3)) +pixels = np.tile(1, (config.N_PIXELS, 3)) """Array containing the pixel values for the LED strip""" @@ -24,70 +23,11 @@ def update(): g = _gamma[pixels[i][1]] if config.GAMMA_CORRECTION else pixels[i][1] b = _gamma[pixels[i][2]] if config.GAMMA_CORRECTION else pixels[i][2] m += chr(i) + chr(r) + chr(g) + chr(b) - _prev_pixels = pixels + _prev_pixels = np.copy(pixels) _sock.sendto(m, (config.UDP_IP, config.UDP_PORT)) - -# def set_all(R, G, B): -# for i in range(config.N_PIXELS): -# set_pixel(i, R, G, B) -# update_pixels() - - -# def autocolor(x, speed=1.0): -# dt = 2.0 * np.pi / config.N_PIXELS -# t = time.time() * speed -# def r(t): return (np.sin(t + 0.0) + 1.0) * 1.0 / 2.0 -# def g(t): return (np.sin(t + (2.0 / 3.0) * np.pi) + 1.0) * 1.0 / 2.0 -# def b(t): return (np.sin(t + (4.0 / 3.0) * np.pi) + 1.0) * 1.0 / 2.0 -# for n in range(config.N_PIXELS): -# set_pixel(N=n, -# R=r(n * dt + t) * x[n], -# G=g(n * dt + t) * x[n], -# B=b(n * dt + t) * x[n], -# gamma_correction=True) -# update_pixels() - - -# def set_pixel(N, R, G, B, gamma_correction=True): -# global _m -# r = int(min(max(R, 0), 255)) -# g = int(min(max(G, 0), 255)) -# b = int(min(max(B, 0), 255)) -# if gamma_correction: -# r = _gamma_table[r] -# g = _gamma_table[g] -# b = _gamma_table[b] -# if _m is None: -# _m = chr(N) + chr(r) + chr(g) + chr(b) -# else: -# _m += chr(N) + chr(r) + chr(g) + chr(b) - - -# def update_pixels(): -# global _m -# _sock.sendto(_m, (config.UDP_IP, config.UDP_PORT)) -# _m = None - - -# def rainbow(brightness=255.0, speed=1.0, fps=10): -# offset = 132 -# dt = 2.0 * np.pi / config.N_PIXELS -# def r(t): return (np.sin(t + 0.0) + 1.0) * brightness / 2.0 + offset -# def g(t): return (np.sin(t + (2.0 / 3.0) * np.pi) + 1.0) * brightness / 2.0 + offset -# def b(t): return (np.sin(t + (4.0 / 3.0) * np.pi) + 1.0) * brightness / 2.0 + offset -# while True: -# t = time.time() * speed -# for n in range(config.N_PIXELS): -# T = t + n * dt -# set_pixel(N=n, R=r(T), G=g(T), B=b(T)) -# update_pixels() -# time.sleep(1.0 / fps) - - if __name__ == '__main__': - while True: - update() - #set_all(0, 0, 0) - # rainbow(speed=0.025, fps=40, brightness=0) + + pixels += 0.0 + update() diff --git a/python/microphone.py b/python/microphone.py index abb7158..a27e417 100644 --- a/python/microphone.py +++ b/python/microphone.py @@ -1,7 +1,6 @@ import pyaudio import config -CHUNK = int(config.MIC_RATE / config.FPS) def start_stream(callback): p = pyaudio.PyAudio() diff --git a/python/sandbox.py b/python/sandbox.py new file mode 100644 index 0000000..4332c72 --- /dev/null +++ b/python/sandbox.py @@ -0,0 +1,457 @@ +from __future__ import print_function +from __future__ import division +import time +import numpy as np +from pyqtgraph.Qt import QtGui +import pyqtgraph as pg +import config +import microphone +import dsp +import led + + + +def rainbow(length, speed=1.0 / 5.0): + """Returns a rainbow colored array with desired length + + Returns a rainbow colored array with shape (length, 3). + Each row contains the red, green, and blue color values between 0 and 1. + + Parameters + ---------- + length : int + The length of the rainbow colored array that should be returned + + speed : float + Value indicating the speed that the rainbow colors change. + If speed > 0, then successive calls to this function will return + arrays with different colors assigned to the indices. + If speed == 0, then this function will always return the same colors. + + Returns + ------- + x : numpy.array + np.ndarray with shape (length, 3). + Columns denote the red, green, and blue color values respectively. + Each color is a float between 0 and 1. + + """ + dt = 2.0 * np.pi / length + t = time.time() * speed + def r(t): return (np.sin(t + 0.0) + 1.0) * 1.0 / 2.0 + def g(t): return (np.sin(t + (2.0 / 3.0) * np.pi) + 1.0) * 1.0 / 2.0 + def b(t): return (np.sin(t + (4.0 / 3.0) * np.pi) + 1.0) * 1.0 / 2.0 + x = np.tile(0.0, (length, 3)) + for i in range(length): + x[i][0] = r(i * dt + t) + x[i][1] = g(i * dt + t) + x[i][2] = b(i * dt + t) + return x + + +_time_prev = time.time() * 1000.0 +"""The previous time that the frames_per_second() function was called""" + +_fps = dsp.ExponentialFilter(val=config.FPS, alpha_decay=0.01, alpha_rise=0.01) +"""The low-pass filter used to estimate frames-per-second""" + +def frames_per_second(): + """Return the estimated frames per second + + Returns the current estimate for frames-per-second (FPS). + FPS is estimated by measured the amount of time that has elapsed since + this function was previously called. The FPS estimate is low-pass filtered + to reduce noise. + + This function is intended to be called one time for every iteration of + the program's main loop. + + Returns + ------- + fps : float + Estimated frames-per-second. This value is low-pass filtered + to reduce noise. + """ + global _time_prev, _fps + time_now = time.time() * 1000.0 + dt = time_now - _time_prev + _time_prev = time_now + if dt == 0.0: + return _fps.value + return _fps.update(1000.0 / dt) + + +def update_plot_1(x, y): + """Updates pyqtgraph plot 1 + + Parameters + ---------- + x : numpy.array + 1D array containing the X-axis values that should be plotted. + There should only be one X-axis array. + + y : numpy.ndarray + Array containing each of the Y-axis series that should be plotted. + Each row of y corresponds to a Y-axis series. The columns in each row + are the values that should be plotted. + + Returns + ------- + None + """ + global curves, p1 + colors = rainbow(config.N_CURVES) * 255.0 + for i in range(config.N_CURVES): + curves[i].setPen((colors[i][0], colors[i][1], colors[i][2])) + curves[i].setData(x=x, y=y[i]) + p1.autoRange() + p1.setRange(yRange=(0, 2.0)) + + +_EA_norm = dsp.ExponentialFilter(np.tile(1e-4, config.N_PIXELS), 0.005, 0.25) +"""Onset energy per-bin normalization constants + +This filter is responsible for individually normalizing the onset bin energies. +This is used to obtain per-bin automatic gain control. +""" + +_EA_smooth = dsp.ExponentialFilter(np.tile(1.0, config.N_PIXELS), 0.15, 0.80) +"""Asymmetric exponential low-pass filtered onset energies + +This filter is responsible for smoothing the displayed onset energies. +Asymmetric rise and fall constants allow the filter to quickly respond to +increases in onset energy, while slowly responded to decreases. +""" + +def interpolate(y, new_length): + """Intelligently resizes the array by linearly interpolating the values + + Parameters + ---------- + y : np.array + Array that should be resized + + new_length : int + The length of the new interpolated array + + Returns + ------- + z : np.array + New array with length of new_length that contains the interpolated + values of y. + """ + x_old = np.linspace(0, 1, len(y)) + x_new = np.linspace(0, 1, new_length) + z = np.interp(x_new, x_old, y) + return z + + +# Individually normalized energy spike method +# Works well with GAMMA_CORRECTION = True +# This is one of the best visualizations, but doesn't work for everything +def update_leds_6(y): + """Visualization using per-bin normalized onset energies + + Visualizes onset energies by normalizing each frequency bin individually. + The normalized bins are then processed and displayed onto the LED strip. + + This function visualizes the onset energies by individually normalizing + each onset energy bin. The normalized onset bins are then scaled and + + Parameters + ---------- + y : numpy.array + Array containing the onset energies that should be visualized. + The + """ + + # Scale y to emphasize large spikes and attenuate small changes + # Exponents < 1.0 emphasize small changes and penalize large spikes + # Exponents > 1.0 emphasize large spikes and penalize small changes + y = np.copy(y) ** 1.5 + + # Use automatic gain control to normalize bin values + # Update normalization constants and then normalize each bin + _EA_norm.update(y) + y /= _EA_norm.value + + """Force saturated pixels to leak brighness into neighbouring pixels""" + + def smooth(): + for n in range(1, len(y) - 1): + excess = y[n] - 1.0 + if excess > 0.0: + y[n] = 1.0 + y[n - 1] += excess / 2.0 + y[n + 1] += excess / 2.0 + + # Several iterations because the adjacent pixels could also be saturated + for i in range(6): + smooth() + + # Update the onset energy low-pass filter and discard value too dim + _EA_smooth.update(y) + _EA_smooth.value[_EA_smooth.value < .1] = 0.0 + + # If some pixels are too bright, allow saturated pixels to become white + color = rainbow(config.N_PIXELS) * 255.0 + for i in range(config.N_PIXELS): + # Update LED strip pixel + led.pixels[i, :] = np.round(color[i, :] * _EA_smooth.value[i]**1.5) + # Leak excess red + excess_red = max(led.pixels[i, 0] - 255, 0) + led.pixels[i, 1] += excess_red + led.pixels[i, 2] += excess_red + # Leak excess green + excess_green = max(led.pixels[i, 1] - 255, 0) + led.pixels[i, 0] += excess_green + led.pixels[i, 2] += excess_green + # Leak excess blue + excess_blue = max(led.pixels[i, 2] - 255, 0) + led.pixels[i, 0] += excess_blue + led.pixels[i, 1] += excess_blue + led.update() + + +_prev_energy = 0.0 +_energy_flux = dsp.ExponentialFilter(1.0, alpha_decay=0.05, alpha_rise=0.9) +_EF_norm = dsp.ExponentialFilter(np.tile(1.0, config.N_PIXELS), 0.05, 0.9) +_EF_smooth = dsp.ExponentialFilter(np.tile(1.0, config.N_PIXELS), 0.1, 0.9) + + +# Individually normalized energy flux +def update_leds_5(y): + global _prev_energy + # Scale y + y = np.copy(y) + y = y ** 0.5 + + # Calculate raw energy flux + # Update previous energy + # Rectify energy flux + # Update the normalization constants + # Normalize the individual energy flux values + # Smooth the result using another smoothing filter + EF = y - _prev_energy + _prev_energy = np.copy(y) + EF[EF < 0] = 0.0 + _EF_norm.update(EF) + EF /= _EF_norm.value + _EF_smooth.update(EF) + # Cutoff values below 0.1 + _EF_smooth.value[_EF_smooth.value < 0.1] = 0.0 + + color = rainbow(config.N_PIXELS) * 255.0 + for i in range(config.N_PIXELS): + led.pixels[i, :] = np.round(color[i, :] * _EF_smooth.value[i]) + # Share excess red + excess_red = max(led.pixels[i, 0] - 255, 0) + led.pixels[i, 1] += excess_red + led.pixels[i, 2] += excess_red + # Share excess green + excess_green = max(led.pixels[i, 1] - 255, 0) + led.pixels[i, 0] += excess_green + led.pixels[i, 2] += excess_green + # Share excess blue + excess_blue = max(led.pixels[i, 2] - 255, 0) + led.pixels[i, 0] += excess_blue + led.pixels[i, 1] += excess_blue + led.update() + + +# Modulate brightness of the entire strip with no individual addressing +def update_leds_4(y): + y = np.copy(y) + energy = np.sum(y * y) + _energy_flux.update(energy) + energy /= _energy_flux.value + led.pixels = np.round((color * energy)).astype(int) + led.update() + + +# Energy flux based motion across the LED strip +def update_leds_3(y): + global pixels, color, _prev_energy, _energy_flux + y = np.copy(y) + # Calculate energy flux + energy = np.sum(y) + energy_flux = max(energy - _prev_energy, 0) + _prev_energy = energy + # Normalize energy flux + _energy_flux.update(energy_flux) + # Update pixels + pixels = np.roll(pixels, 1) + color = np.roll(color, 1, axis=0) + pixels *= 0.99 + pixels[0] = energy_flux + + led.pixels = np.copy(np.round((color.T * pixels).T).astype(int)) + for i in range(config.N_PIXELS): + # Share excess red + excess_red = max(led.pixels[i, 0] - 255, 0) + led.pixels[i, 1] += excess_red + led.pixels[i, 2] += excess_red + # Share excess green + excess_green = max(led.pixels[i, 1] - 255, 0) + led.pixels[i, 0] += excess_green + led.pixels[i, 2] += excess_green + # Share excess blue + excess_blue = max(led.pixels[i, 2] - 255, 0) + led.pixels[i, 0] += excess_blue + led.pixels[i, 1] += excess_blue + # Update LEDs + led.update() + + +# Energy based motion across the LED strip +def update_leds_2(y): + global pixels, color + y = np.copy(y) + # Calculate energy + energy = np.sum(y**2.0) + onset_energy.update(energy) + energy /= onset_energy.value + # Update pixels + pixels = np.roll(pixels, 1) + color = np.roll(color, 1, axis=0) + pixels *= 0.99 + pixels[pixels < 0.0] = 0.0 + pixels[0] = energy + pixels -= 0.005 + pixels[pixels < 0.0] = 0.0 + led.pixels = np.copy(np.round((color.T * pixels).T).astype(int)) + for i in range(config.N_PIXELS): + # Share excess red + excess_red = max(led.pixels[i, 0] - 255, 0) + led.pixels[i, 1] += excess_red + led.pixels[i, 2] += excess_red + # Share excess green + excess_green = max(led.pixels[i, 1] - 255, 0) + led.pixels[i, 0] += excess_green + led.pixels[i, 2] += excess_green + # Share excess blue + excess_blue = max(led.pixels[i, 2] - 255, 0) + led.pixels[i, 0] += excess_blue + led.pixels[i, 1] += excess_blue + # Update LEDs + led.update() + + + +def update_leds_1(y): + """Display the raw onset spectrum on the LED strip""" + y = np.copy(y) + y = y ** 0.5 + color = rainbow(config.N_PIXELS) * 255.0 + + led.pixels = np.copy(np.round((color.T * y).T).astype(int)) + for i in range(config.N_PIXELS): + # Share excess red + excess_red = max(led.pixels[i, 0] - 255, 0) + led.pixels[i, 1] += excess_red + led.pixels[i, 2] += excess_red + # Share excess green + excess_green = max(led.pixels[i, 1] - 255, 0) + led.pixels[i, 0] += excess_green + led.pixels[i, 2] += excess_green + # Share excess blue + excess_blue = max(led.pixels[i, 2] - 255, 0) + led.pixels[i, 0] += excess_blue + led.pixels[i, 1] += excess_blue + led.update() + + +def microphone_update(stream): + global y_roll, median, onset, SF_peak, NWPD_peak, RCD_peak, onset_peak + # Retrieve new audio samples and construct the rolling window + y = np.fromstring(stream.read(samples_per_frame), dtype=np.int16) + y = y / 2.0**15 + y_roll = np.roll(y_roll, -1, axis=0) + y_roll[-1, :] = np.copy(y) + y_data = np.concatenate(y_roll, axis=0) + # Calculate onset detection functions + SF, NWPD, RCD = dsp.onset(y_data) + # Update and normalize peak followers + SF_peak.update(np.max(SF)) + NWPD_peak.update(np.max(NWPD)) + RCD_peak.update(np.max(RCD)) + SF /= SF_peak.value + NWPD /= NWPD_peak.value + RCD /= RCD_peak.value + # Normalize and update onset spectrum + onset = SF * NWPD * RCD + onset_peak.update(np.max(onset)) + onset /= onset_peak.value + onsets.update(onset) + # Update the LED strip and resize if necessary + if len(onsets.value) != config.N_PIXELS: + onset_values = interpolate(onsets.value, config.N_PIXELS) + else: + onset_values = np.copy(onsets.value) + led_visualization(onset_values) + # Plot the onsets + plot_x = np.array(range(1, len(onsets.value) + 1)) + plot_y = [onsets.value**i for i in np.linspace(1, 0.25, config.N_CURVES)] + update_plot_1(plot_x, plot_y) + app.processEvents() + print('{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}'.format(SF_peak.value, + NWPD_peak.value, + RCD_peak.value, + onset_peak.value, + frames_per_second())) + + +# Create plot and window +app = QtGui.QApplication([]) +win = pg.GraphicsWindow('Audio Visualization') +win.resize(800, 600) +win.setWindowTitle('Audio Visualization') +# Create plot 1 containing config.N_CURVES +p1 = win.addPlot(title='Onset Detection Function') +p1.setLogMode(x=False) +curves = [] +colors = rainbow(config.N_CURVES) * 255.0 +for i in range(config.N_CURVES): + curve = p1.plot(pen=(colors[i][0], colors[i][1], colors[i][2])) + curves.append(curve) + + +# Pixel values for each LED +pixels = np.tile(0.0, config.N_PIXELS) +# Used to colorize the LED strip +color = rainbow(config.N_PIXELS) * 255.0 + +# Tracks average onset spectral energy +onset_energy = dsp.ExponentialFilter(1.0, alpha_decay=0.1, alpha_rise=0.99) + + +# Tracks the location of the spectral median +median = dsp.ExponentialFilter(val=config.N_SUBBANDS / 2.0, + alpha_decay=0.1, alpha_rise=0.1) +# Smooths the decay of the onset detection function +onsets = dsp.ExponentialFilter(val=np.tile(0.0, (config.N_SUBBANDS)), + alpha_decay=0.05, alpha_rise=0.75) + +# Peak followers used for normalization +SF_peak = dsp.ExponentialFilter(1.0, alpha_decay=0.01, alpha_rise=0.99) +NWPD_peak = dsp.ExponentialFilter(1.0, alpha_decay=0.01, alpha_rise=0.99) +RCD_peak = dsp.ExponentialFilter(1.0, alpha_decay=0.01, alpha_rise=0.99) +onset_peak = dsp.ExponentialFilter(0.1, alpha_decay=0.002, alpha_rise=0.1) + +# Number of audio samples to read every time frame +samples_per_frame = int(config.MIC_RATE / config.FPS) +# Array containing the rolling audio sample window +y_roll = np.random.rand(config.N_ROLLING_HISTORY, samples_per_frame) / 100.0 + +# Which LED visualization to use +# update_leds_1 = raw onset spectrum without normalization (GAMMA = True) +# update_leds_2 = energy average chase effect (GAMMA = True) +# update_leds_3 = energy flux chase effect (GAMMA = True) +# update_leds_4 = brightness modulation effect (GAMMA = True) +# update_leds_5 = energy flux normalized per-bin spectrum (GAMMA = True) +# update_leds_6 = energy average normalized per-bin spectrum (GAMMA = True) +led_visualization = update_leds_6 + +if __name__ == '__main__': + led.update() + microphone.start_stream(microphone_update) diff --git a/python/visualize.py b/python/visualize.py index 2c89b49..f2dadf1 100644 --- a/python/visualize.py +++ b/python/visualize.py @@ -24,7 +24,7 @@ class Beat: self.pixels = np.roll(self.pixels, roll, axis=0) self.pixels[:roll] *= 0.0 - # Apply Gaussian blur to create a dispersion effect + # Apply Gaussian blur to create a dispersion effect # Dispersion increases in strength over time sigma = (2. * .14 * self.iteration / (config.N_PIXELS * self.speed))**4. self.pixels = gaussian_filter1d(self.pixels, sigma, mode='constant') @@ -35,11 +35,13 @@ class Beat: self.pixels = np.round(self.pixels, decimals=2) self.pixels = np.clip(self.pixels, 0, 255) + self.speed *= np.exp(2. * np.log(.8) / config.N_PIXELS) + def finished(self): return np.array_equal(self.pixels, self.pixels * 0.0) -def rainbow(speed=1.0 / 5.0): +def rainbow(speed=10.0 / 5.0): # Note: assumes array is N_PIXELS / 2 long dt = np.pi / config.N_PIXELS t = time.time() * speed @@ -54,84 +56,70 @@ def rainbow(speed=1.0 / 5.0): return x -def radiate(beats, beat_speed=1.0, max_length=26, min_beats=1): - N_beats = len(beats[beats == True]) - # Add new beat if beats were detected - if N_beats > 0 and N_beats >= min_beats: - # Beat properties - beat_power = float(N_beats) / config.N_SUBBANDS - beat_brightness = min(beat_power * 40.0, 255.0) - beat_brightness = max(beat_brightness, 40) - beat_length = int(np.sqrt(beat_power) * max_length) - beat_length = max(beat_length, 2) - # Beat pixels - beat_pixels = np.zeros(config.N_PIXELS / 2) - beat_pixels[:beat_length] = beat_brightness - # Create and add the new beat - beat = Beat(beat_pixels, beat_speed) - radiate.beats = np.append(radiate.beats, beat) - # Pixels that will be displayed on the LED strip - pixels = np.zeros(config.N_PIXELS / 2) - if len(radiate.beats): - pixels += sum([b.pixels for b in radiate.beats]) - for b in radiate.beats: - b.update_pixels() - # Only keep the beats that are still visible on the strip - radiate.beats = [b for b in radiate.beats if not b.finished()] - pixels = np.append(pixels[::-1], pixels) - pixels = np.clip(pixels, 0.0, 255.0) - pixels = (pixels * rainbow().T).T - pixels = np.round(pixels).astype(int) - led.pixels = pixels - led.update() - - -def radiate2(beats, beat_speed=0.8, max_length=26, min_beats=1): +def radiate(beats, energy, beat_speed=1.0, max_length=7, min_beats=1): N_beats = len(beats[beats == True]) if N_beats > 0 and N_beats >= min_beats: index_to_color = rainbow() # Beat properties beat_power = float(N_beats) / config.N_SUBBANDS + # energy = np.copy(energy) + # energy -= np.min(energy) + # energy /= (np.max(energy) - np.min(energy)) beat_brightness = np.round(256.0 / config.N_SUBBANDS) beat_brightness *= np.sqrt(config.N_SUBBANDS / N_beats) + beat_brightness *= 1.3 beat_length = int(np.sqrt(beat_power) * max_length) beat_length = max(beat_length, 2) beat_pixels = np.tile(0.0, (config.N_PIXELS / 2, 3)) for i in range(len(beats)): if beats[i]: - beat_color = np.round(index_to_color[i] * beat_brightness) + beat_color = np.round(index_to_color[i] * beat_brightness * energy[i] / 2.0) beat_pixels[:beat_length] += beat_color beat_pixels = np.clip(beat_pixels, 0.0, 255.0) beat = Beat(beat_pixels, beat_speed) - radiate2.beats = np.append(radiate2.beats, beat) + radiate.beats = np.append(radiate.beats, beat) # Pixels that will be displayed on the LED strip pixels = np.zeros((config.N_PIXELS / 2, 3)) - if len(radiate2.beats): - pixels += sum([b.pixels for b in radiate2.beats]) - for b in radiate2.beats: + if len(radiate.beats): + pixels += sum([b.pixels for b in radiate.beats]) + for b in radiate.beats: b.update_pixels() - radiate2.beats = [b for b in radiate2.beats if not b.finished()] + radiate.beats = [b for b in radiate.beats if not b.finished()] pixels = np.append(pixels[::-1], pixels, axis=0) pixels = np.clip(pixels, 0.0, 255.0) - pixels = np.round(pixels).astype(int) - led.pixels = pixels + led.pixels = np.round(pixels).astype(int) led.update() +# Number of audio samples to read every time frame +samples_per_frame = int(config.MIC_RATE / config.FPS) +# Array containing the rolling audio sample window +y_roll = np.random.rand(config.N_ROLLING_HISTORY, samples_per_frame) / 100.0 + def microphone_update(stream): - frames_per_buffer = int(config.MIC_RATE / config.FPS) - data = np.fromstring(stream.read(frames_per_buffer), dtype=np.int16) - data = data / 2.0**15 - xs, ys = dsp.fft_log_partition(data=data, subbands=config.N_SUBBANDS) - beats = dsp.beat_detect(ys) - radiate2(beats) + global y_roll + # Read new audio data + y = np.fromstring(stream.read(samples_per_frame), dtype=np.int16) + y = y / 2.0**15 + # Construct rolling window of audio data + y_roll = np.roll(y_roll, -1, axis=0) + y_roll[-1, :] = np.copy(y) + y_data = np.concatenate(y_roll, axis=0) + # Take the real FFT with logarithmic bin spacing + xs, ys = dsp.rfft_log_partition(y_data, + subbands=config.N_SUBBANDS, + window=np.hamming, + fmin=1, + fmax=14000) + # Visualize the result + beats, energy, variance = dsp.beat_detect(ys) + radiate(beats, energy) # Initial values for the radiate effect radiate.beats = np.array([]) -radiate2.beats = np.array([]) if __name__ == "__main__": mic.start_stream(microphone_update)