Added experimental support for Raspberry Pi devices

This commit adds support for the Raspberry Pi, which allows users to
create a completely standalone music visualization system. The Raspberry
Pi should be connected directly to a ws2812b LED strip. A PWM-capable
GPIO pin should be connected to the data line of the LED strip. A USB
audio input device should be connected to one of the Raspberry Pi's USB
ports.

It is recommended that the GUI and FPS output be disabled when running
the visualization on the Raspberry Pi. These features can degrade
performance on the already computationally limited Raspberry Pi.
This commit is contained in:
Scott Lawson 2017-01-03 16:33:28 -08:00
parent 4b6f9a807b
commit b10a7d0396
3 changed files with 195 additions and 121 deletions

View File

@ -3,19 +3,43 @@ from __future__ import print_function
from __future__ import division
import os
DEVICE = 'pi'
"""Device used to control LED strip. Must be 'pi' or 'esp8266'"""
if DEVICE == 'esp8266':
UDP_IP = '192.168.137.150'
"""IP address of the ESP8266. Must match IP in ws2812_controller.ino"""
UDP_PORT = 7777
"""Port number used for socket communication between Python and ESP8266"""
if DEVICE == 'pi':
LED_PIN = 18
"""GPIO pin connected to the LED strip pixels (must support PWM)"""
LED_FREQ_HZ = 800000
"""LED signal frequency in Hz (usually 800kHz)"""
LED_DMA = 5
"""DMA channel used for generating PWM signal (try 5)"""
BRIGHTNESS = 255
"""Brightness of LED strip between 0 and 255"""
LED_INVERT = True
"""Set True if using an inverting logic level converter"""
USE_GUI = False
"""Whether or not to display a PyQtGraph GUI plot of visualization"""
DISPLAY_FPS = False
"""Whether to display the FPS when running (can reduce performance)"""
N_PIXELS = 60
"""Number of pixels in the LED strip (must match ESP8266 firmware)"""
GAMMA_TABLE_PATH = os.path.join(os.path.dirname(__file__), 'gamma_table.npy')
"""Location of the gamma correction table"""
UDP_IP = '192.168.137.150'
"""IP address of the ESP8266. Must match IP in ws2812_controller.ino"""
GAMMA_CORRECTION = True
"""Whether to correct LED brightness for nonlinear brightness perception"""
UDP_PORT = 7777
"""Port number used for socket communication between Python and ESP8266"""
MIC_RATE = 48000
MIC_RATE = 44100
"""Sampling frequency of the microphone in Hz"""
FPS = 60
@ -43,7 +67,7 @@ MIN_FREQUENCY = 200
MAX_FREQUENCY = 12000
"""Frequencies above this value will be removed during audio processing"""
N_FFT_BINS = 30
N_FFT_BINS = 15
"""Number of frequency bins to use when transforming audio to frequency domain
Fast Fourier transforms are used to transform time-domain audio data to the
@ -58,9 +82,6 @@ number of bins.
There is no point using more bins than there are pixels on the LED strip.
"""
GAMMA_CORRECTION = True
"""Whether to correct LED brightness for nonlinear brightness perception"""
N_ROLLING_HISTORY = 2
"""Number of past audio frames to include in the rolling window"""

View File

@ -1,27 +1,59 @@
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import socket
import numpy as np
import config
_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# ESP8266 uses WiFi communication
if config.DEVICE == 'esp8266':
import socket
_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Raspberry Pi controls the LED strip directly
elif config.DEVICE == 'pi':
import neopixel
strip = neopixel.Adafruit_NeoPixel(config.N_PIXELS, config.LED_PIN,
config.LED_FREQ_HZ, config.LED_DMA,
config.LED_INVERT, config.BRIGHTNESS)
strip.begin()
_gamma = np.load(config.GAMMA_TABLE_PATH)
"""Gamma lookup table used for nonlinear brightness correction"""
_prev_pixels = np.tile(253, (3, config.N_PIXELS))
"""Pixel values that were most recently displayed on the LED strip"""
pixels = np.tile(1, (3, config.N_PIXELS))
"""Array containing the pixel values for the LED strip"""
"""Pixel values for the LED strip"""
def update():
def _update_esp8266():
"""Sends UDP packets to ESP8266 to update LED strip values
The ESP8266 will receive and decode the packets to determine what values
to display on the LED strip. The communication protocol supports LED strips
with a maximum of 256 LEDs.
The packet encoding scheme is:
|i|r|g|b|
where
i (0 to 255): Index of LED to change (zero-based)
r (0 to 255): Red value of LED
g (0 to 255): Green value of LED
b (0 to 255): Blue value of LED
"""
global pixels, _prev_pixels
pixels = np.clip(pixels, 0, 255).astype(int)
# Truncate values and cast to integer
pixels = np.clip(pixels, 0, 255).astype(long)
# Optionally apply gamma correctio
p = _gamma[pixels] if config.GAMMA_CORRECTION else np.copy(pixels)
# Send UDP packets when using ESP8266
m = []
for i in range(config.N_PIXELS):
# Ignore pixels if they haven't changed (saves bandwidth)
if np.array_equal(p[:, i], _prev_pixels[:, i]):
continue
# Byte
m.append(i) # Index of pixel to change
m.append(p[0][i]) # Pixel red value
m.append(p[1][i]) # Pixel green value
@ -30,6 +62,42 @@ def update():
_sock.sendto(bytes(m), (config.UDP_IP, config.UDP_PORT))
def _update_pi():
"""Writes new LED values to the Raspberry Pi's LED strip
Raspberry Pi uses the rpi_ws281x to control the LED strip directly.
This function updates the LED strip with new values.
"""
global pixels, _prev_pixels
# Truncate values and cast to integer
pixels = np.clip(pixels, 0, 255).astype(long)
# Optional gamma correction
p = _gamma[pixels] if config.GAMMA_CORRECTION else np.copy(pixels)
# Encode 24-bit LED values in 32 bit integers
r = np.left_shift(p[0][:].astype(int), 8)
g = np.left_shift(p[1][:].astype(int), 16)
b = p[2][:].astype(int)
rgb = np.bitwise_or(np.bitwise_or(r, g), b)
# Update the pixels
for i in range(config.N_PIXELS):
# Ignore pixels if they haven't changed (saves bandwidth)
if np.array_equal(p[:, i], _prev_pixels[:, i]):
continue
strip._led_data[i] = rgb[i]
_prev_pixels = np.copy(p)
strip.show()
def update():
"""Updates the LED strip values"""
if config.DEVICE == 'esp8266':
_update_esp8266()
elif config.DEVICE == 'pi':
_update_pi()
else:
raise ValueError('Invalid device selected')
# Execute this file to run a LED strand test
# If everything is working, you should see a red, green, and blue pixel scroll
# across the LED strip continously
@ -44,4 +112,4 @@ if __name__ == '__main__':
while True:
pixels = np.roll(pixels, 1, axis=1)
update()
time.sleep(0.2)
time.sleep(0.01)

View File

@ -7,7 +7,8 @@ import config
import microphone
import dsp
import led
import gui
if config.USE_GUI:
import gui
_time_prev = time.time() * 1000.0
"""The previous time that the frames_per_second() function was called"""
@ -68,77 +69,40 @@ def interpolate(y, new_length):
r_filt = dsp.ExpFilter(np.tile(0.01, config.N_PIXELS // 2),
alpha_decay=0.08, alpha_rise=0.99)
alpha_decay=0.04, alpha_rise=0.4)
g_filt = dsp.ExpFilter(np.tile(0.01, config.N_PIXELS // 2),
alpha_decay=0.15, alpha_rise=0.99)
b_filt = dsp.ExpFilter(np.tile(0.01, config.N_PIXELS // 2),
alpha_decay=0.25, alpha_rise=0.99)
p_filt = dsp.ExpFilter(np.tile(1, (3, config.N_PIXELS // 2)),
alpha_decay=0.05, alpha_rise=0.8)
alpha_decay=0.2, alpha_rise=0.99)
p = np.tile(1.0, (3, config.N_PIXELS // 2))
gain = dsp.ExpFilter(np.tile(0.01, config.N_FFT_BINS),
alpha_decay=0.001, alpha_rise=0.99)
def largest_indices(ary, n):
"""Returns indices of the n largest values in the given a numpy array"""
flat = ary.flatten()
indices = np.argpartition(flat, -n)[-n:]
indices = indices[np.argsort(-flat[indices])]
return np.unravel_index(indices, ary.shape)
def visualize_max(y):
"""Experimental sandbox effect. Not recommended for use"""
y = np.copy(interpolate(y, config.N_PIXELS // 2)) * 255.0
ind = largest_indices(y, 15)
y[ind] *= -1.0
y[y > 0] = 0.0
y[ind] *= -1.0
# Blur the color channels with different strengths
r = gaussian_filter1d(y, sigma=0.25)
g = gaussian_filter1d(y, sigma=0.10)
b = gaussian_filter1d(y, sigma=0.00)
b = np.roll(b, 1)
b[0] = b[1]
r_filt.update(r)
g_filt.update(g)
b_filt.update(b)
# Pixel values
pixel_r = np.concatenate((r_filt.value[::-1], r_filt.value))
pixel_g = np.concatenate((g_filt.value[::-1], g_filt.value))
pixel_b = np.concatenate((b_filt.value[::-1], b_filt.value))
# Update the LED strip values
led.pixels[0, :] = pixel_r
led.pixels[1, :] = pixel_g
led.pixels[2, :] = pixel_b
led.update()
# Update the GUI plots
GUI.curve[0][0].setData(y=pixel_r)
GUI.curve[0][1].setData(y=pixel_g)
GUI.curve[0][2].setData(y=pixel_b)
def visualize_scroll(y):
"""Effect that originates in the center and scrolls outwards"""
global p
y = gaussian_filter1d(y, sigma=1.0)**3.0
y = np.copy(y)
y = np.copy(y)**1.0
gain.update(y)
y /= gain.value
y *= 255.0
r = int(max(y[:len(y) // 3]))
g = int(max(y[len(y) // 3: 2 * len(y) // 3]))
b = int(max(y[2 * len(y) // 3:]))
# Scrolling effect window
p = np.roll(p, 1, axis=1)
p *= 0.98
p = gaussian_filter1d(p, sigma=0.2)
# p = gaussian_filter1d(p, sigma=0.2)
# Create new color originating at the center
p[0, 0] = r
p[1, 0] = g
p[2, 0] = b
# Update the LED strip
led.pixels = np.concatenate((p[:, ::-1], p), axis=1)
led.update()
if config.USE_GUI:
# Update the GUI plots
GUI.curve[0][0].setData(y=np.concatenate((p[0, :][::-1], p[0, :])))
GUI.curve[0][1].setData(y=np.concatenate((p[1, :][::-1], p[1, :])))
@ -148,13 +112,17 @@ def visualize_scroll(y):
def visualize_energy(y):
"""Effect that expands from the center with increasing sound energy"""
global p
y = gaussian_filter1d(y, sigma=1.0)**3.0
y = np.copy(y)**2.0
gain.update(y)
y /= gain.value
y *= (config.N_PIXELS // 2) - 1
r = int(np.mean(y[:len(y) // 3]))
g = int(np.mean(y[len(y) // 3: 2 * len(y) // 3]))
b = int(np.mean(y[2 * len(y) // 3:]))
# Scale by the width of the LED strip
y *= float((config.N_PIXELS // 2) - 1)
# Map color channels according to energy in the different freq bands
scale = 0.9
r = int(np.mean(y[:len(y) // 3]**scale))
g = int(np.mean(y[len(y) // 3: 2 * len(y) // 3]**scale))
b = int(np.mean(y[2 * len(y) // 3:]**scale))
# Assign color to different frequency regions
p[0, :r] = 255.0
p[0, r:] = 0.0
p[1, :g] = 255.0
@ -163,12 +131,14 @@ def visualize_energy(y):
p[2, b:] = 0.0
p_filt.update(p)
p = np.round(p_filt.value)
# Apply substantial blur to smooth the edges
p[0, :] = gaussian_filter1d(p[0, :], sigma=4.0)
p[1, :] = gaussian_filter1d(p[1, :], sigma=4.0)
p[2, :] = gaussian_filter1d(p[2, :], sigma=4.0)
# Update LED pixel arrays
# Set the new pixel value
led.pixels = np.concatenate((p[:, ::-1], p), axis=1)
led.update()
if config.USE_GUI:
# Update the GUI plots
GUI.curve[0][0].setData(y=np.concatenate((p[0, :][::-1], p[0, :])))
GUI.curve[0][1].setData(y=np.concatenate((p[1, :][::-1], p[1, :])))
@ -177,23 +147,25 @@ def visualize_energy(y):
def visualize_spectrum(y):
"""Effect that maps the Mel filterbank frequencies onto the LED strip"""
y = np.copy(interpolate(y, config.N_PIXELS // 2)) * 255.0
y = np.copy(interpolate(y, config.N_PIXELS // 2))
# Blur the color channels with different strengths
r = gaussian_filter1d(y, sigma=0.25)
g = gaussian_filter1d(y, sigma=0.10)
b = gaussian_filter1d(y, sigma=0.00)
r = gaussian_filter1d(y, sigma=1., order=0)
g = gaussian_filter1d(y, sigma=1., order=0)
b = gaussian_filter1d(y, sigma=1., order=0)
# Update temporal filters
r_filt.update(r)
g_filt.update(g)
b_filt.update(b)
# Pixel values
pixel_r = np.concatenate((r_filt.value[::-1], r_filt.value))
pixel_g = np.concatenate((g_filt.value[::-1], g_filt.value))
pixel_b = np.concatenate((b_filt.value[::-1], b_filt.value))
pixel_r = np.concatenate((r_filt.value[::-1], r_filt.value)) * 255.0
pixel_g = np.concatenate((g_filt.value[::-1], g_filt.value)) * 255.0
pixel_b = np.concatenate((b_filt.value[::-1], b_filt.value)) * 255.0
# Update the LED strip values
led.pixels[0, :] = pixel_r
led.pixels[1, :] = pixel_g
led.pixels[2, :] = pixel_b
led.update()
if config.USE_GUI:
# Update the GUI plots
GUI.curve[0][0].setData(y=pixel_r)
GUI.curve[0][1].setData(y=pixel_g)
@ -205,12 +177,22 @@ mel_gain = dsp.ExpFilter(np.tile(1e-1, config.N_FFT_BINS),
volume = dsp.ExpFilter(config.MIN_VOLUME_THRESHOLD,
alpha_decay=0.02, alpha_rise=0.02)
# Keeps track of the number of buffer overflows
# Lots of buffer overflows could mean that FPS is set too high
buffer_overflows = 1
def microphone_update(stream):
global y_roll, prev_rms, prev_exp
# Retrieve and normalize the new audio samples
y = np.fromstring(stream.read(samples_per_frame,
exception_on_overflow=False), dtype=np.int16)
try:
y = np.fromstring(stream.read(samples_per_frame), dtype=np.int16)
# exception_on_overflow=False), dtype=np.int16)
except IOError:
y = y_roll[config.N_ROLLING_HISTORY - 1, :]
global buffer_overflows
print('Buffer overflows: {0}'.format(buffer_overflows))
buffer_overflows += 1
# Normalize samples between 0 and 1
y = y / 2.0**15
# Construct a rolling window of audio samples
y_roll = np.roll(y_roll, -1, axis=0)
@ -239,7 +221,9 @@ def microphone_update(stream):
mel = mel / mel_gain.value
# Visualize the filterbank output
visualization_effect(mel)
if config.USE_GUI:
GUI.app.processEvents()
if config.DISPLAY_FPS:
print('FPS {:.0f} / {:.0f}'.format(frames_per_second(), config.FPS))
@ -249,10 +233,11 @@ samples_per_frame = int(config.MIC_RATE / config.FPS)
# Array containing the rolling audio sample window
y_roll = np.random.rand(config.N_ROLLING_HISTORY, samples_per_frame) / 1e16
visualization_effect = visualize_spectrum
visualization_effect = visualize_energy
"""Visualization effect to display on the LED strip"""
if __name__ == '__main__':
if config.USE_GUI:
import pyqtgraph as pg
# Create GUI plot for visualizing LED strip output
GUI = gui.GUI(width=800, height=400, title='Audio Visualization')