From 03075d861935191d6e305f8b754eeb0398d5d550 Mon Sep 17 00:00:00 2001 From: gtoombs Date: Mon, 4 Jan 2021 12:53:02 -0500 Subject: [PATCH 1/4] Remove pycache --- .gitignore | 5 +++-- __pycache__/mic_read.cpython-36.pyc | Bin 1398 -> 0 bytes 2 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 __pycache__/mic_read.cpython-36.pyc diff --git a/.gitignore b/.gitignore index 463fb73..a381481 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ -env/* +env/ *.swp - +__pycache__/ +.idea/ diff --git a/__pycache__/mic_read.cpython-36.pyc b/__pycache__/mic_read.cpython-36.pyc deleted file mode 100644 index e38ad26ebb19681664894b56c2ec16f8fef72ed7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1398 zcmZWpTW{Mo6ecBFl9f1@E*Og4z+Eu|ZLwm&1{6b4FY}gSi5JA)!vq6@D$)rZ-A2)E zUD&_??_n?dA^SIb*b`s(viyZTEIUUoG8dD;k4HQddCvD8^3zTy{QVgncYH$rBpZ(l z_9JMu01zZ1f(j?25p8-Wa+{t_TahQ+$d_~)L~X&MP_&{gxh=PZcR-~7CRx#_BYYA3 zOrjmp79sSzVoPj8zb86k2l{>C3`uwQ#UEj=(pbwx+^go{As9vq{^gwaGWk3ygw*_p zM8nX>$>ki4N9TE>vU_Qncf;^RrMj#x%0lwAEQ~I*41UN!q*+1aM&;7M&&ZBHzqt)^(@KMi^Qn1;HE6IF#JkZ62d7`CF=*gnkO@% z$~(N6<<*=+pgd1Zm6aw_Pr@tUj||jL4+*)VU@F1^{sMke^x)$_1OCY&wE6=8L#A{^ zmSjPuj&Th$EkTVZoL^f@x}Xa(LB8CZGgMPF!By2ihmLs zP>(vz8&JImBPp_h4}%6Qtyy^s3}Ay8x}r!0iE>#nbF!GH}iRn%3IN{~odW>hAv3u)WVuy=BL+>ej@gKtJh z)~!_mI@0Hs&i|Kko?OcK{wJ58!-{nPK Date: Mon, 4 Jan 2021 13:38:30 -0500 Subject: [PATCH 2/4] PEP8 refactoring, and a fix-up for the main loop. This must not ever have worked, because variable `im` was unavailable in the plotting method. It runs, now, although it still isn't "live" - it doesn't update. --- mic_read.py | 91 ++++++++++++++------------- requirements.txt | 14 +---- run_specgram.py | 156 ++++++++++++++++++++++++----------------------- 3 files changed, 129 insertions(+), 132 deletions(-) diff --git a/mic_read.py b/mic_read.py index b74d9fe..43ec220 100644 --- a/mic_read.py +++ b/mic_read.py @@ -7,63 +7,62 @@ Dependencies: pyaudio, numpy and matplotlib """ -############### Import Libraries ############### +from typing import Tuple + import pyaudio import numpy as np import matplotlib.pyplot as plt -############### Constants ############### -#RATE = 44100 #sample rate -RATE = 16000 -FORMAT = pyaudio.paInt16 #conversion format for PyAudio stream -CHANNELS = 1 #microphone audio channels -CHUNK_SIZE = 8192 #number of samples to take per read -SAMPLE_LENGTH = int(CHUNK_SIZE*1000/RATE) #length of each sample in ms -############### Functions ############### -""" -open_mic: -creates a PyAudio object and initializes the mic stream -inputs: none -ouputs: stream, PyAudio object -""" -def open_mic(): +# RATE = 44100 #sample rate +RATE = 16_000 +FORMAT = pyaudio.paInt16 # conversion format for PyAudio stream +CHANNELS = 1 # microphone audio channels +CHUNK_SIZE = 8_192 # number of samples to take per read +SAMPLE_LENGTH = int(CHUNK_SIZE*1_000/RATE) # length of each sample in ms + + +def open_mic() -> Tuple[pyaudio.Stream, pyaudio.PyAudio]: + """ + creates a PyAudio object and initializes the mic stream + inputs: none + ouputs: stream, PyAudio object + """ pa = pyaudio.PyAudio() - stream = pa.open(format = FORMAT, - channels = CHANNELS, - rate = RATE, - input = True, - frames_per_buffer = CHUNK_SIZE) - return stream,pa + stream = pa.open(format=FORMAT, + channels=CHANNELS, + rate=RATE, + input=True, + frames_per_buffer=CHUNK_SIZE) + return stream, pa -""" -get_data: -reads from the audio stream for a constant length of time, converts it to data -inputs: stream, PyAudio object -outputs: int16 data array -""" -def get_data(stream,pa): + +def get_data(stream: pyaudio.Stream) -> np.ndarray: + """ + reads from the audio stream for a constant length of time, converts it to data + inputs: stream, PyAudio object + outputs: int16 data array + """ input_data = stream.read(CHUNK_SIZE) - data = np.fromstring(input_data,np.int16) + data = np.fromstring(input_data, np.int16) return data -############### Test Functions ############### -""" -make_10k: -creates a 10kHz test tone -""" + def make_10k(): - x = np.linspace(-2*np.pi,2*np.pi,21000) - x = np.tile(x,int(SAMPLE_LENGTH/(4*np.pi))) - y = np.sin(2*np.pi*5000*x) - return x,y + """ + creates a 10kHz test tone + """ + x = np.linspace(-2*np.pi, 2*np.pi, 21_000) + x = np.tile(x, int(SAMPLE_LENGTH/(4*np.pi))) + y = np.sin(2*np.pi*5_000*x) + return x, y + -""" -show_freq: -plots the test tone for a sanity check -""" def show_freq(): - x,y = make_10k() - plt.plot(x,y) + """ + show_freq: + plots the test tone for a sanity check + """ + x, y = make_10k() + plt.plot(x, y) plt.show() - diff --git a/requirements.txt b/requirements.txt index c963c20..901fd32 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,11 +1,3 @@ -funcsigs==0.4 -matplotlib==1.4.3 -mock==1.3.0 -nose==1.3.7 -numpy==1.9.2 -pbr==1.4.0 -PyAudio==0.2.8 -pyparsing==2.0.3 -python-dateutil==2.4.2 -pytz==2015.4 -six==1.9.0 +matplotlib +numpy +PyAudio diff --git a/run_specgram.py b/run_specgram.py index 6ba9931..5fbe1a3 100644 --- a/run_specgram.py +++ b/run_specgram.py @@ -7,103 +7,109 @@ Dependencies: matplotlib, numpy and the mic_read.py module """ -############### Import Libraries ############### -from matplotlib.mlab import window_hanning,specgram +from typing import Tuple + +import pyaudio +from matplotlib.mlab import window_hanning, specgram import matplotlib.pyplot as plt import matplotlib.animation as animation from matplotlib.colors import LogNorm import numpy as np -############### Import Modules ############### import mic_read -############### Constants ############### -#SAMPLES_PER_FRAME = 10 #Number of mic reads concatenated within a single window +# SAMPLES_PER_FRAME = 10 # Number of mic reads concatenated within a single window SAMPLES_PER_FRAME = 4 -nfft = 1024#256#1024 #NFFT value for spectrogram -overlap = 1000#512 #overlap value for spectrogram -rate = mic_read.RATE #sampling rate +nfft = 1024 # 256#1024 # NFFT value for spectrogram +overlap = 1000 # 512 # overlap value for spectrogram +rate = mic_read.RATE # sampling rate -############### Functions ############### -""" -get_sample: -gets the audio data from the microphone -inputs: audio stream and PyAudio object -outputs: int16 array -""" -def get_sample(stream,pa): - data = mic_read.get_data(stream,pa) - return data -""" -get_specgram: -takes the FFT to create a spectrogram of the given audio signal -input: audio signal, sampling rate -output: 2D Spectrogram Array, Frequency Array, Bin Array -see matplotlib.mlab.specgram documentation for help -""" -def get_specgram(signal,rate): - arr2D,freqs,bins = specgram(signal,window=window_hanning, - Fs = rate,NFFT=nfft,noverlap=overlap) - return arr2D,freqs,bins -""" -update_fig: -updates the image, just adds on samples at the start until the maximum size is -reached, at which point it 'scrolls' horizontally by determining how much of the -data needs to stay, shifting it left, and appending the new data. -inputs: iteration number -outputs: updated image -""" -def update_fig(n): - data = get_sample(stream,pa) - arr2D,freqs,bins = get_specgram(data,rate) - im_data = im.get_array() - if n < SAMPLES_PER_FRAME: - im_data = np.hstack((im_data,arr2D)) - im.set_array(im_data) - else: - keep_block = arr2D.shape[1]*(SAMPLES_PER_FRAME - 1) - im_data = np.delete(im_data,np.s_[:-keep_block],1) - im_data = np.hstack((im_data,arr2D)) - im.set_array(im_data) - return im, - -def main(): - ############### Initialize Plot ############### - fig = plt.figure() +def get_sample(stream: pyaudio.Stream) -> np.ndarray: """ - Launch the stream and the original spectrogram + gets the audio data from the microphone + inputs: audio stream and PyAudio object + outputs: int16 array """ - stream,pa = mic_read.open_mic() - data = get_sample(stream,pa) - arr2D,freqs,bins = get_specgram(data,rate) + return mic_read.get_data(stream) + + +def get_specgram(signal: np.ndarray, rate: int) -> Tuple[ + np.ndarray, # 2D output array + np.ndarray, # Frequencies + np.ndarray, # Frequency bins +]: """ - Setup the plot paramters + takes the FFT to create a spectrogram of the given audio signal + input: audio signal, sampling rate + output: 2D Spectrogram Array, Frequency Array, Bin Array + see matplotlib.mlab.specgram documentation for help """ - extent = (bins[0],bins[-1]*SAMPLES_PER_FRAME,freqs[-1],freqs[0]) - im = plt.imshow(arr2D,aspect='auto',extent = extent,interpolation="none", - cmap = 'jet',norm = LogNorm(vmin=.01,vmax=1)) + arr2D, freqs, bins = specgram(signal, window=window_hanning, + Fs=rate, NFFT=nfft, noverlap=overlap) + return arr2D, freqs, bins + + +def make_plot( + stream: pyaudio.Stream, + arr2D: np.ndarray, + freqs: np.ndarray, + bins: np.ndarray, +): + # Initialize Plot + fig = plt.figure() + + # Set up the plot parameters + extent = (bins[0], bins[-1]*SAMPLES_PER_FRAME, freqs[-1], freqs[0]) + im = plt.imshow(arr2D, aspect='auto', extent=extent, interpolation='none', + cmap='jet', norm=LogNorm(vmin=.01, vmax=1)) plt.xlabel('Time (s)') plt.ylabel('Frequency (Hz)') plt.title('Real Time Spectogram') plt.gca().invert_yaxis() - ##plt.colorbar() #enable if you want to display a color bar + # plt.colorbar() # enable if you want to display a color bar - ############### Animate ############### - anim = animation.FuncAnimation(fig,update_fig,blit = False, - interval=mic_read.CHUNK_SIZE/1000) + def update_fig(n: int): + """ + updates the image, just adds on samples at the start until the maximum size is + reached, at which point it 'scrolls' horizontally by determining how much of the + data needs to stay, shifting it left, and appending the new data. + inputs: iteration number + outputs: updated image + """ + data = get_sample(stream) + arr2D, freqs, bins = get_specgram(data, rate) + im_data = im.get_array() + if n < SAMPLES_PER_FRAME: + im_data = np.hstack((im_data, arr2D)) + im.set_array(im_data) + else: + keep_block = arr2D.shape[1] * (SAMPLES_PER_FRAME - 1) + im_data = np.delete(im_data, np.s_[:-keep_block], 1) + im_data = np.hstack((im_data, arr2D)) + im.set_array(im_data) + return im, + + # Animate + animation.FuncAnimation(fig, update_fig, blit=False, + interval=mic_read.CHUNK_SIZE/1000) + + +def main(): + # Launch the stream and the original spectrogram + stream, pa = mic_read.open_mic() - try: + data = get_sample(stream) + arr2D, freqs, bins = get_specgram(data, rate) + + make_plot(stream, arr2D, freqs, bins) plt.show() - except: - print("Plot Closed") + finally: + stream.stop_stream() + stream.close() + pa.terminate() - ############### Terminate ############### - stream.stop_stream() - stream.close() - pa.terminate() - print("Program Terminated") -if __name__ == "__main__": +if __name__ == '__main__': main() From 3e1d67bfec695d6f685c5af1137e4483e746bba7 Mon Sep 17 00:00:00 2001 From: gtoombs Date: Mon, 4 Jan 2021 15:08:13 -0500 Subject: [PATCH 3/4] Animation runs, though it is jerky --- mic_read.py | 7 ++- run_specgram.py | 118 +++++++++++++++++++++++++----------------------- 2 files changed, 64 insertions(+), 61 deletions(-) mode change 100644 => 100755 run_specgram.py diff --git a/mic_read.py b/mic_read.py index 43ec220..4c95ec0 100644 --- a/mic_read.py +++ b/mic_read.py @@ -7,6 +7,7 @@ Dependencies: pyaudio, numpy and matplotlib """ + from typing import Tuple import pyaudio @@ -14,8 +15,7 @@ import matplotlib.pyplot as plt -# RATE = 44100 #sample rate -RATE = 16_000 +RATE = 16_000 # sample rate FORMAT = pyaudio.paInt16 # conversion format for PyAudio stream CHANNELS = 1 # microphone audio channels CHUNK_SIZE = 8_192 # number of samples to take per read @@ -48,7 +48,7 @@ def get_data(stream: pyaudio.Stream) -> np.ndarray: return data -def make_10k(): +def make_10k() -> Tuple[np.ndarray, np.ndarray]: """ creates a 10kHz test tone """ @@ -60,7 +60,6 @@ def make_10k(): def show_freq(): """ - show_freq: plots the test tone for a sanity check """ x, y = make_10k() diff --git a/run_specgram.py b/run_specgram.py old mode 100644 new mode 100755 index 5fbe1a3..1287db4 --- a/run_specgram.py +++ b/run_specgram.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + """ run_specgram.py Created By Alexander Yared (akyared@gmail.com) @@ -7,22 +9,21 @@ Dependencies: matplotlib, numpy and the mic_read.py module """ -from typing import Tuple -import pyaudio -from matplotlib.mlab import window_hanning, specgram import matplotlib.pyplot as plt -import matplotlib.animation as animation -from matplotlib.colors import LogNorm import numpy as np +import pyaudio +from matplotlib.animation import FuncAnimation +from matplotlib.colors import LogNorm +from matplotlib.image import AxesImage +from matplotlib.mlab import window_hanning, specgram +from typing import Tuple -import mic_read +from mic_read import CHUNK_SIZE, RATE, get_data, open_mic -# SAMPLES_PER_FRAME = 10 # Number of mic reads concatenated within a single window -SAMPLES_PER_FRAME = 4 -nfft = 1024 # 256#1024 # NFFT value for spectrogram -overlap = 1000 # 512 # overlap value for spectrogram -rate = mic_read.RATE # sampling rate +SAMPLES_PER_FRAME = 4 # Number of mic reads concatenated within a single window +N_FFT = 1_024 # NFFT value for spectrogram +OVERLAP = 1_000 # overlap value for spectrogram def get_sample(stream: pyaudio.Stream) -> np.ndarray: @@ -31,10 +32,10 @@ def get_sample(stream: pyaudio.Stream) -> np.ndarray: inputs: audio stream and PyAudio object outputs: int16 array """ - return mic_read.get_data(stream) + return get_data(stream) -def get_specgram(signal: np.ndarray, rate: int) -> Tuple[ +def get_specgram(signal: np.ndarray) -> Tuple[ np.ndarray, # 2D output array np.ndarray, # Frequencies np.ndarray, # Frequency bins @@ -45,65 +46,68 @@ def get_specgram(signal: np.ndarray, rate: int) -> Tuple[ output: 2D Spectrogram Array, Frequency Array, Bin Array see matplotlib.mlab.specgram documentation for help """ - arr2D, freqs, bins = specgram(signal, window=window_hanning, - Fs=rate, NFFT=nfft, noverlap=overlap) - return arr2D, freqs, bins + return specgram(signal, window=window_hanning, + Fs=RATE, NFFT=N_FFT, noverlap=OVERLAP) -def make_plot( - stream: pyaudio.Stream, - arr2D: np.ndarray, - freqs: np.ndarray, - bins: np.ndarray, -): +def update_fig(frame: int, im: AxesImage, stream: pyaudio.Stream) -> Tuple[AxesImage]: + """ + updates the image, just adds on samples at the start until the maximum size is + reached, at which point it 'scrolls' horizontally by determining how much of the + data needs to stay, shifting it left, and appending the new data. + inputs: iteration number + outputs: updated image + """ + data = get_sample(stream) + arr_2d, freqs, bins = get_specgram(data) + im_data = im.get_array() + + if frame < SAMPLES_PER_FRAME: + im_data = np.hstack((im_data, arr_2d)) + im.set_array(im_data) + else: + keep_block = arr_2d.shape[1] * (SAMPLES_PER_FRAME - 1) + im_data = np.delete(im_data, np.s_[:-keep_block], 1) + im_data = np.hstack((im_data, arr_2d)) + im.set_array(im_data) + + return im, + + +def make_plot(stream: pyaudio.Stream) -> FuncAnimation: # Initialize Plot fig = plt.figure() + ax = fig.gca() + + # Data for first frame + data = get_sample(stream) + arr_2d, freqs, bins = get_specgram(data) # Set up the plot parameters extent = (bins[0], bins[-1]*SAMPLES_PER_FRAME, freqs[-1], freqs[0]) - im = plt.imshow(arr2D, aspect='auto', extent=extent, interpolation='none', - cmap='jet', norm=LogNorm(vmin=.01, vmax=1)) - plt.xlabel('Time (s)') - plt.ylabel('Frequency (Hz)') - plt.title('Real Time Spectogram') - plt.gca().invert_yaxis() - # plt.colorbar() # enable if you want to display a color bar - - def update_fig(n: int): - """ - updates the image, just adds on samples at the start until the maximum size is - reached, at which point it 'scrolls' horizontally by determining how much of the - data needs to stay, shifting it left, and appending the new data. - inputs: iteration number - outputs: updated image - """ - data = get_sample(stream) - arr2D, freqs, bins = get_specgram(data, rate) - im_data = im.get_array() - if n < SAMPLES_PER_FRAME: - im_data = np.hstack((im_data, arr2D)) - im.set_array(im_data) - else: - keep_block = arr2D.shape[1] * (SAMPLES_PER_FRAME - 1) - im_data = np.delete(im_data, np.s_[:-keep_block], 1) - im_data = np.hstack((im_data, arr2D)) - im.set_array(im_data) - return im, + im = ax.imshow(arr_2d, aspect='auto', extent=extent, interpolation='none', + cmap='jet', norm=LogNorm(vmin=.01, vmax=1)) + ax.set_xlabel('Time (s)') + ax.set_ylabel('Frequency (Hz)') + ax.set_title('Real-Time Spectogram') + ax.invert_yaxis() + # fig.colorbar() # enable if you want to display a color bar # Animate - animation.FuncAnimation(fig, update_fig, blit=False, - interval=mic_read.CHUNK_SIZE/1000) + return FuncAnimation( + fig, + func=update_fig, fargs=(im, stream), + interval=CHUNK_SIZE/1000, + blit=True, + ) def main(): # Launch the stream and the original spectrogram - stream, pa = mic_read.open_mic() + stream, pa = open_mic() try: - data = get_sample(stream) - arr2D, freqs, bins = get_specgram(data, rate) - - make_plot(stream, arr2D, freqs, bins) + animation = make_plot(stream) plt.show() finally: stream.stop_stream() From 9b33c8e70f6de84bfbe49fdc62f5024723df53a9 Mon Sep 17 00:00:00 2001 From: gtoombs Date: Mon, 4 Jan 2021 17:37:13 -0500 Subject: [PATCH 4/4] Runs without stutter, but there is still a seam between buffers --- mic_read.py | 8 ++++---- run_specgram.py | 36 ++++++++++++++++++++++-------------- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/mic_read.py b/mic_read.py index 4c95ec0..7a3176c 100644 --- a/mic_read.py +++ b/mic_read.py @@ -29,11 +29,11 @@ def open_mic() -> Tuple[pyaudio.Stream, pyaudio.PyAudio]: ouputs: stream, PyAudio object """ pa = pyaudio.PyAudio() - stream = pa.open(format=FORMAT, + stream = pa.open(input=True, + format=FORMAT, channels=CHANNELS, rate=RATE, - input=True, - frames_per_buffer=CHUNK_SIZE) + frames_per_buffer=2*CHUNK_SIZE) return stream, pa @@ -44,7 +44,7 @@ def get_data(stream: pyaudio.Stream) -> np.ndarray: outputs: int16 data array """ input_data = stream.read(CHUNK_SIZE) - data = np.fromstring(input_data, np.int16) + data = np.frombuffer(input_data, np.int16) return data diff --git a/run_specgram.py b/run_specgram.py index 1287db4..a87f203 100755 --- a/run_specgram.py +++ b/run_specgram.py @@ -19,7 +19,7 @@ from matplotlib.mlab import window_hanning, specgram from typing import Tuple -from mic_read import CHUNK_SIZE, RATE, get_data, open_mic +from mic_read import SAMPLE_LENGTH, RATE, get_data, open_mic SAMPLES_PER_FRAME = 4 # Number of mic reads concatenated within a single window N_FFT = 1_024 # NFFT value for spectrogram @@ -36,9 +36,9 @@ def get_sample(stream: pyaudio.Stream) -> np.ndarray: def get_specgram(signal: np.ndarray) -> Tuple[ - np.ndarray, # 2D output array - np.ndarray, # Frequencies - np.ndarray, # Frequency bins + np.ndarray, # 2D spectrum + np.ndarray, # Frequency axis + np.ndarray, # Time axis ]: """ takes the FFT to create a spectrogram of the given audio signal @@ -46,8 +46,11 @@ def get_specgram(signal: np.ndarray) -> Tuple[ output: 2D Spectrogram Array, Frequency Array, Bin Array see matplotlib.mlab.specgram documentation for help """ - return specgram(signal, window=window_hanning, - Fs=RATE, NFFT=N_FFT, noverlap=OVERLAP) + return specgram( + signal, + Fs=RATE, NFFT=N_FFT, noverlap=OVERLAP, + window=window_hanning, + ) def update_fig(frame: int, im: AxesImage, stream: pyaudio.Stream) -> Tuple[AxesImage]: @@ -59,16 +62,21 @@ def update_fig(frame: int, im: AxesImage, stream: pyaudio.Stream) -> Tuple[AxesI outputs: updated image """ data = get_sample(stream) - arr_2d, freqs, bins = get_specgram(data) + arr_2d, freqs, times = get_specgram(data) im_data = im.get_array() + # frame cannot be relied upon: we're called multiple times with 0 before it + # starts to increment. + frame = im_data.shape[1] // len(times) + if frame < SAMPLES_PER_FRAME: im_data = np.hstack((im_data, arr_2d)) im.set_array(im_data) else: - keep_block = arr_2d.shape[1] * (SAMPLES_PER_FRAME - 1) - im_data = np.delete(im_data, np.s_[:-keep_block], 1) - im_data = np.hstack((im_data, arr_2d)) + im_data = np.hstack(( + im_data[:, len(times):], + arr_2d, + )) im.set_array(im_data) return im, @@ -81,23 +89,23 @@ def make_plot(stream: pyaudio.Stream) -> FuncAnimation: # Data for first frame data = get_sample(stream) - arr_2d, freqs, bins = get_specgram(data) + arr_2d, freqs, times = get_specgram(data) # Set up the plot parameters - extent = (bins[0], bins[-1]*SAMPLES_PER_FRAME, freqs[-1], freqs[0]) + extent = (times[0], times[-1]*SAMPLES_PER_FRAME, freqs[-1], freqs[0]) im = ax.imshow(arr_2d, aspect='auto', extent=extent, interpolation='none', cmap='jet', norm=LogNorm(vmin=.01, vmax=1)) ax.set_xlabel('Time (s)') ax.set_ylabel('Frequency (Hz)') ax.set_title('Real-Time Spectogram') ax.invert_yaxis() - # fig.colorbar() # enable if you want to display a color bar + # fig.colorbar(im) # enable if you want to display a color bar # Animate return FuncAnimation( fig, func=update_fig, fargs=(im, stream), - interval=CHUNK_SIZE/1000, + interval=SAMPLE_LENGTH, blit=True, )