From 11b133fba8a914b2ee5d81fa8d2fe3eb93e3409f Mon Sep 17 00:00:00 2001 From: Elliott Forney Date: Mon, 7 Jan 2019 13:42:48 -0700 Subject: [PATCH 1/9] style: linting eeg --- .pylintrc | 4 +- cebl/eeg/base.py | 4 +- cebl/eeg/eeg.py | 253 +++++++++++++++++++++++++++++++++-------------- 3 files changed, 183 insertions(+), 78 deletions(-) diff --git a/.pylintrc b/.pylintrc index 34cc454..adcda84 100644 --- a/.pylintrc +++ b/.pylintrc @@ -511,13 +511,13 @@ max-bool-expr=5 max-branches=12 # Maximum number of locals for function / method body -max-locals=30 +max-locals=35 # Maximum number of parents for a class (see R0901). max-parents=7 # Maximum number of public methods for a class (see R0904). -max-public-methods=20 +max-public-methods=30 # Maximum number of return / yield for function / method body max-returns=6 diff --git a/cebl/eeg/base.py b/cebl/eeg/base.py index d786ab5..24fa6e4 100644 --- a/cebl/eeg/base.py +++ b/cebl/eeg/base.py @@ -72,7 +72,7 @@ def getChanNames(self, chans=None): else: chanNames.append(None) else: - if c < self.nChan and c >= 0: + if 0 <= c < self.nChan: chanNames.append(self.chanNames[c]) else: chanNames.append(None) @@ -121,7 +121,7 @@ def getChanIndices(self, chans=None): else: chanIndices.append(None) else: - if c < self.nChan and c >= 0: + if 0 <= c < self.nChan: chanIndices.append(c) else: chanIndices.append(None) diff --git a/cebl/eeg/eeg.py b/cebl/eeg/eeg.py index 4a428e5..d7953cc 100644 --- a/cebl/eeg/eeg.py +++ b/cebl/eeg/eeg.py @@ -1,6 +1,5 @@ -"""Module for processing unsegmented eeg. +"""Loading and processing unsegmented / asynchronous EEG signals. """ - import pickle import json import matplotlib.pyplot as plt @@ -20,12 +19,20 @@ from . import seg +# pylint: disable=too-many-public-methods class EEG(EEGBase): - """Class for processing unsegmented eeg. + """An unsegmented / asynchronous EEG signal. """ def __init__(self, data, sampRate=256.0, chanNames=None, - markers=None, deviceName='', dtype=None, copy=False): - """Construct a new EEG instance. + markers=None, deviceName="", dtype=None, copy=False): + """Unsegmented / asynchronous EEG signal data. This class is used to + represent a single, contiguous EEG signal. EEG data should typically + be represented in this way for loading, preprocessing and analysis in + asynchronous settings, i.e., when the signals are not time-locked. + + For synchronous applications, i.e., when the signals are time-locked, + an EEG object is first typically created for loading and preprocessing + and then a SegmentedEEG object is created using the segment method. Args: data: A 2D numpy array of floats containing the eeg data. @@ -55,7 +62,7 @@ def __init__(self, data, sampRate=256.0, chanNames=None, the data argument. copy: If False (default) then data will not be copied if - possible. If True, then the data definitely be + possible. If True, then the data definitely be copied. Warning: If multiple EEG instances use the same un-copied data array, then modifying one EEG instance may lead to undefined behavior in @@ -69,7 +76,7 @@ def __init__(self, data, sampRate=256.0, chanNames=None, # if given an int, assume its the channel in the data if isinstance(markers, (int,)): markerChan = markers - markers = self.data[:,markerChan] + markers = self.data[:, markerChan] self.data = np.delete(self.data, markerChan, axis=1) # initialize the eeg base class @@ -80,6 +87,15 @@ def __init__(self, data, sampRate=256.0, chanNames=None, self.setMarkers(markers, copy=copy) def copy(self, dtype=None): + """Create a copy if this EEG object. The underlying signal data + will be duplicated. + + Args: + dtype: The data type used to store the signal. Must be + a floating point type, e.g., np.float32 or np.float64. + If None (default) the data type will be the same as + the EEG object being copied. + """ return EEG(data=self.data, sampRate=self.sampRate, chanNames=self.chanNames, markers=self.markers, deviceName=self.deviceName, dtype=dtype, copy=True) @@ -104,109 +120,188 @@ def setMarkers(self, markers, copy=False): """ if markers is None: self.markers = np.linspace(0.0, self.nSec, self.nObs) + else: self.markers = np.array(markers, copy=copy) self.markers = self.markers.astype(self.dtype, copy=False) if len(self.markers) != self.nObs: - raise RuntimeError('Length of markers ' + str(len(self.markers)) + \ - ' does not match number of observations ' + str(self.nObs)) + raise RuntimeError("Length of markers " + str(len(self.markers)) + \ + " does not match number of observations " + str(self.nObs)) return self def bandpass(self, lowFreq, highFreq, **kwargs): + """Apply a linear bandpass / highpass / lowpass / stopband filter. + See sig.BandpassFilter for more information. + + Args: + lowFreq: The frequency of the lower corner of the filter. + See sig.BandpassFilter for more information. + + highFreq: The frequency of the high corner of the filter. + See sig.BandpassFilter for more information. + + **kwargs: Additional keyword arguments to pass to + sig.BandpassFilter. + + Returns: + This function modifies the signal data and returns self. + """ bp = sig.BandpassFilter(lowFreq=lowFreq, highFreq=highFreq, sampRate=self.sampRate, dtype=self.dtype, **kwargs) self.data = bp.filter(self.data) return self def cap(self, level): - self.data[self.data > level] = level + """Cap the signal values so that |signal| < level. + + Args: + level: Signal value for capping. + + Returns: + This function modifies the signal data and returns self. + """ + self.data[self.data > level] = level self.data[self.data < -level] = -level return self def commonAverageReference(self, *args, **kwargs): + """Apply a common average reference. This subtracts the mean signal + value across all channels from all channels at each time step. + See sig.commonAverageReference for more information. + + Args: + *args, **kwargs: Additional arguments to pass to + sig.commonAverageReference. + + Returns: + This function modifies the signal data and returns self. + """ self.data = sig.commonAverageReference(self.data, *args, **kwargs) return self def car(self, *args, **kwargs): + """Apply a common average reference. This function is an alias + to self.commonAverageReference. + """ return self.commonAverageReference(*args, **kwargs) def meanSeparate(self, recover=False): + """Separate the signal mean (across channels) into a separate + chanel. The mean across all channels, at each time step, is + computed and then subtracted from all channels. This is + similar to a common average reference. Note, however, that + any channel can be reconstructed from the remaining channels + in a signal that has had the common average reference applied. + This function replaces the last channel with the signal means, + which has the channel name "mean." It is then possible to + recover the original signal. See sig.meanSeparate for more + information. + + Args: + recover: If False (default) then separate the mean into + a final channel named mean. Otherwise, recover + the original signal from a mean-separated signal. + + Throws: + RuntimeError if trying to recover but the last channel is + not named mean. + + Returns: + This function modifies the signal data and returns self. + """ + if recover and self.chanNames[-1] != "mean": + raise RuntimeError("Not a mean separated signal!") + self.data = sig.meanSeparate(self.data, recover=recover) if recover: - self.chanNames[-1] = 'recovered' + self.chanNames[-1] = "recovered" + else: - self.chanNames[-1] = 'mean' + self.chanNames[-1] = "mean" return self - def EOGRegress(self, vChan1, vChan2, hChan1, hChan2, model=None): - vChan1, vChan2 = self.getChanIndices((vChan1, vChan2)) - hChan1, hChan2 = self.getChanIndices((hChan1, hChan2)) + # pylint: disable=invalid-name + def EOGRegress(self, vChan1, vChan2, hChan1, hChan2, bipolar=True, model=None): + """Use a linear regression to occular artifacts using channels + that measure electrooculography (EOG). + + This approache uses four EOG channels as inputs to a multivariate + linear least-squares regression model that attempts predict all + channels at each time step using only these EOG signals. These + predictions are then subtracted from each channel in order to + attenuate EOG artifacts. If we assume that EOG artifacts are linear + in this way and that our EOG signals do not contain any real brain + signals (assumptions which are likely both violated) then this + should remove contamination is the EEG signals caused by eye + movement. - # report which chan? do this elsewhere? XXX - idfah - if None in (vChan1, vChan2, hChan1, hChan2): - raise RuntimeError('Invalid channel.') + Args: + vChan1: Vertical EOG channel should be above the right eye. - veog = self.data[:,vChan1] - self.data[:,vChan2] - heog = self.data[:,hChan1] - self.data[:,hChan2] - eog = np.vstack((veog, heog)).T + vChan2: Vertical EOG channel should be below the right eye. - bp = sig.BandpassFilter(0.0, 20.0, order=2, sampRate=self.sampRate) - eogFilt = bp.filter(eog); + hChan1: Horizontal EOG channel should be outside the left eye. - if model is None: - model = ml.RidgeRegression(eogFilt, self.data) + hChan2: Horizontal EOG channel should be outside the right eye. - self.data -= model.eval(eogFilt) + bipolar: If True (default) then the difference between the + vertical channels (vChan1 - vChan2) and between the + horizontal channels (hChan1 - hChan2) are used as + inputs to the linear regression model. If False, then + all four channels are used as inputs to the model. - return self, model + model: Linear regression model to use. If None (default) then + a new model will be constructed. Note that the model + is also returned. This is useful if you want to use + the same linear EOG model to preprocess multiple EEG + objects. - def EOGRegress2(self, vChan1, vChan2, hChan1, hChan2, model=None): + Returns: + This function modifies the signal data and returns a tuple + containing self and the linear model used for regression. + The linear model returned can be passed as an argument to + process other signals in the same way, e.g., test data. + """ vChan1, vChan2 = self.getChanIndices((vChan1, vChan2)) hChan1, hChan2 = self.getChanIndices((hChan1, hChan2)) # report which chan? do this elsewhere? XXX - idfah if None in (vChan1, vChan2, hChan1, hChan2): - raise RuntimeError('Invalid channel.') + raise RuntimeError('Invalid channel name.') - eog = self.data[:,(vChan1, vChan2, hChan1, hChan2)] + if bipolar: + veog = self.data[:, vChan1] - self.data[:, vChan2] + heog = self.data[:, hChan1] - self.data[:, hChan2] + eog = np.vstack((veog, heog)).T - if model is None: - model = ml.RidgeRegression(eog, self.data) - - self.data -= model.eval(eog) - - return self, model - - def EOGRegress3(self, chan, model=None): - chan = self.getChanIndices((chan,))[0] - - if chan is None: - raise RuntimeError('Invalid channel.') + else: + eog = self.data[:, (vChan1, vChan2, hChan1, hChan2)] - eog = self.data[:,chan][:,None] + bp = sig.BandpassFilter(0.0, 20.0, order=2, sampRate=self.sampRate) + eogFilt = bp.filter(eog) if model is None: - model = ml.RidgeRegression(eog, self.data) + model = ml.RidgeRegression(eogFilt, self.data) - self.data -= model.eval(eog) + self.data -= model.eval(eogFilt) return self, model - def sharpen(self, coord='sphere', *args, **kwargs): + def sharpen(self, coord='sphere', **kwargs): locs = np.asarray([chanlocs.chanLocs3d[cn.lower()] for cn in self.getChanNames()], dtype=self.dtype) coord = coord.lower() if coord == '2d': # steriographic projection - x = locs[:,0] - y = locs[:,1] - z = locs[:,2] + x = locs[:, 0] + y = locs[:, 1] + z = locs[:, 2] xy = np.vstack((x/(1.0+z), y/(1.0+z))).T dist = head.euclidDist(xy, xy) @@ -218,9 +313,9 @@ def sharpen(self, coord='sphere', *args, **kwargs): dist = head.sphereDist(locs, locs) else: - raise RuntimeError('Invalid coord %s.', str(coord)) + raise RuntimeError('Invalid coord %s.' % str(coord)) - self.data = sig.sharpen(self.data, dist=dist, *args, **kwargs) + self.data = sig.sharpen(self.data, dist=dist, **kwargs) return self def decimate(self, factor, *args, **kwargs): @@ -253,7 +348,10 @@ def interpolate(self, factor, *args, **kwargs): return self - def resample(self, factorDown, factorUp=1, interpKwargs=dict(), **decimKwargs): + def resample(self, factorDown, factorUp=1, interpKwargs=None, **decimKwargs): + if interpKwargs is None: + interpKwargs = {} + self.data = sig.resample(self.data, factorDown, factorUp, interpKwargs=interpKwargs, **decimKwargs) @@ -285,7 +383,7 @@ def demean(self): return self def getStandardizer(self, **kwargs): - return ml.Standardizer(self.data) + return ml.Standardizer(self.data, **kwargs) def standardize(self, standardizer=None, **kwargs): if standardizer is None: @@ -409,23 +507,24 @@ def spectrogram(self, *args, **kwargs): def reference(self, chans): chans = self.getChanIndices(chans) - ref = self.data[:,chans] + ref = self.data[:, chans] if len(chans) > 1: ref = ref.mean(axis=1) - self.data -= ref[:,None] + self.data -= ref[:, None] return self def bipolarReference(self, pairs): for pair in pairs: if len(pair) > 2: - raise RuntimeError('Bipolar reference assumes pairs of electrodes but got %s.' % pair) + raise RuntimeError( + 'Bipolar reference assumes pairs of electrodes but got %s.' % pair) pair = self.getChanIndices(pair) - ref = self.data[:,pair].mean(axis=1) - self.data[:,pair] = ref.reshape((-1, 1)) + ref = self.data[:, pair].mean(axis=1) + self.data[:, pair] = ref.reshape((-1, 1)) chanNames = [] for pair in pairs: @@ -443,10 +542,10 @@ def arbitraryReference(self, chanRefs): chans = self.getChanIndices(chans) refs = self.getChanIndices(refs) - ref = self.data[:,refs] + ref = self.data[:, refs] if len(refs) > 1: ref = ref.mean(axis=1) - self.data[:,chans] -= ref.reshape((-1, 1)) + self.data[:, chans] -= ref.reshape((-1, 1)) return self @@ -467,7 +566,8 @@ def trim(self, start=None, end=None): end = int(end*float(self.sampRate))/self.sampRate if end > self.nSec: - raise RuntimeError('end %f is greater than length of data %f.' % (end, self.nSec)) + raise RuntimeError( + 'end %f is greater than length of data %f.' % (end, self.nSec)) endTrimSamp = int((end-self.nSec)*self.sampRate) else: @@ -532,9 +632,10 @@ def plotAutoCorrelation(self, chans=None, lags=None, ax=None, **kwargs): return {'ax': ax, 'lines': lines} def plotCWT(self, chans=None, start=None, end=None, - method='cwt', colorbar=True, *args, **kwargs): + method='cwt', colorbar=True, **kwargs): if chans is None: chans = self.getChanNames() + chans = self.getChanIndices(chans) chanNames = self.getChanNames(chans) @@ -553,11 +654,11 @@ def plotCWT(self, chans=None, start=None, end=None, else: endSamp = int(end*self.sampRate) - s = self.data[startSamp:endSamp,chans] + s = self.data[startSamp:endSamp, chans] time = np.linspace(0, end-start, s.shape[0]).astype(self.dtype, copy=False) - transform = sig.CWT(sampRate=self.sampRate, *args, **kwargs) + transform = sig.CWT(sampRate=self.sampRate, **kwargs) freqs = transform.freqs powers, phases = transform.apply(s) @@ -573,7 +674,7 @@ def plotCWT(self, chans=None, start=None, end=None, ax = fig.add_subplot(plotRows, plotCols, i+1) axs += [ax] - im = ax.imshow(powers[:,:,i].T, interpolation='lanczos', + im = ax.imshow(powers[:, :, i].T, interpolation='lanczos', origin='lower', cmap=pltcm.get_cmap('jet'), norm=pltLogNorm(), aspect='auto', extent=(0.0, d.shape[0]/float(self.sampRate), @@ -620,7 +721,7 @@ def plotPairs(self, chans=None, start=None, end=None, bins=30): if end is not None: endSamp = int(end*self.sampRate) - s = util.colmat(self.data[startSamp:endSamp,chans]) + s = util.colmat(self.data[startSamp:endSamp, chans]) nObs = s.shape[0] nDim = s.shape[1] @@ -635,10 +736,10 @@ def plotPairs(self, chans=None, start=None, end=None, bins=30): ax = fig.add_subplot(nDim, nDim, r*nDim+c+1+nDim*(nDim-(r%nDim)*2-1)) axs.append(ax) - sx = s[:,r] - sy = s[:,c] + sx = s[:, r] + sy = s[:, c] - if (r == c): + if r == c: plt.hist(sx, bins=bins, normed=False) ax.set_xlim(-mx/2.0, mx/2.0) else: @@ -670,12 +771,15 @@ def plotPairs(self, chans=None, start=None, end=None, bins=30): return {'fig': fig, 'axs': axs} - def plotPSD(self, chans=None, ax=None, psdKwargs={}, **kwargs): + def plotPSD(self, chans=None, ax=None, psdKwargs=None, **kwargs): + if psdKwargs is None: + psdKwargs = {} + if chans is None: chans = self.getChanNames() chans = self.getChanIndices(chans) - psd = sig.PSD(self.data[:,chans], sampRate=self.sampRate, **psdKwargs) + psd = sig.PSD(self.data[:, chans], sampRate=self.sampRate, **psdKwargs) return psd.plotPower(ax=ax, **kwargs) def plotTrace(self, start=None, end=None, chans=None, @@ -753,7 +857,7 @@ def plotLags(self, chans=None, lags=(1, 2, 4, 8, 16, 32, 64, 128, 256), **kwargs lines = [] for j in range(s.shape[1]): - lines += ax.plot(s[lag:,j], s[:-lag,j], **kwargs) + lines += ax.plot(s[lag:, j], s[:-lag, j], **kwargs) #color=plt.cm.jet(j/float(s.shape[1]), alpha=0.2)) ax.set_xlabel(r'$x_t$') @@ -765,7 +869,8 @@ def plotLags(self, chans=None, lags=(1, 2, 4, 8, 16, 32, 64, 128, 256), **kwargs ax.set_ylim((mn, mx)) #if i == nCols-1: - # leg = ax.legend(lines, self.getChanNames(chans), labelspacing=0.34, prop={'size': 12}, + # leg = ax.legend(lines, self.getChanNames(chans), + # labelspacing=0.34, prop={'size': 12}, # bbox_to_anchor=(1.35, 0.8)) if i == 0: leg = ax.legend(lines, self.getChanNames(chans), @@ -787,7 +892,7 @@ def saveFile(self, fileName): fileFormat = dotSplit[-1] if fileFormat == 'pkl': - data = np.hstack((self.data, self.markers[:,None])) + data = np.hstack((self.data, self.markers[:, None])) with util.openCompressedFile(fileName, 'w') as fileHandle: pickle.dump(data, fileHandle, protocol=pickle.HIGHEST_PROTOCOL) else: From 2c92c1ebc5887499b500a67fc58d75763d631357 Mon Sep 17 00:00:00 2001 From: Elliott Forney Date: Fri, 1 Feb 2019 17:16:05 -0700 Subject: [PATCH 2/9] style: linting --- cebl/eeg/base.py | 13 +- cebl/eeg/eeg.py | 208 +++++++++++++++-------- cebl/eeg/readbdf.py | 6 +- cebl/eeg/seg.py | 358 ++++++++++++++++++++------------------- cebl/ml/__init__.py | 15 +- cebl/ml/arc.py | 43 +++-- cebl/ml/da.py | 194 +++++++++++---------- cebl/ml/logreg.py | 14 +- cebl/ml/nnet/esn.py | 36 ++-- cebl/ml/nnet/softmax.py | 81 ++++----- cebl/ml/strans/ica.py | 27 +-- cebl/ml/strans/msf.py | 12 +- cebl/ml/strans/pca.py | 28 +-- cebl/ml/strans/strans.py | 16 +- cebl/rt/main.py | 9 +- 15 files changed, 571 insertions(+), 489 deletions(-) diff --git a/cebl/eeg/base.py b/cebl/eeg/base.py index 24fa6e4..6a41be4 100644 --- a/cebl/eeg/base.py +++ b/cebl/eeg/base.py @@ -1,10 +1,9 @@ """Module containing EEG base class and related routines. """ - class EEGBase: """Base class for all EEG types. """ - def __init__(self, nObs, nChan, sampRate=256.0, chanNames=None, deviceName=''): + def __init__(self, nObs, nChan, sampRate=256.0, chanNames=None, deviceName=""): """Construct a new EEGBase instance. Args: @@ -17,7 +16,7 @@ def __init__(self, nObs, nChan, sampRate=256.0, chanNames=None, deviceName=''): chanNames: A list of names of the channels in the eeg data. If None (default) then the channel names are set - to '1', '2', ... 'nChan'. + to "1", "2", ... "nChan". deviceName: The name of the device used to record the eeg data. """ @@ -84,14 +83,14 @@ def setChanNames(self, chanNames=None): Args: chanNames: A list or tuple of channel names. If None (default) - then the channel names are set to '1', '2', ... 'nChan'. + then the channel names are set to "1", "2", ... "nChan". """ if chanNames is None: chanNames = [str(i) for i in range(self.nChan)] if len(chanNames) != self.nChan: - raise RuntimeError('Length of chanNames ' + str(len(chanNames)) + \ - ' does not match number of channels ' + str(self.nChan)) + raise RuntimeError("Length of chanNames " + str(len(chanNames)) + + " does not match number of channels " + str(self.nChan)) self.chanNames = list(chanNames) @@ -137,7 +136,7 @@ def setDeviceName(self, deviceName): """Set the name of the device used to record the eeg data. """ if deviceName is None: - deviceName = '' + deviceName = "" self.deviceName = str(deviceName) return self diff --git a/cebl/eeg/eeg.py b/cebl/eeg/eeg.py index d7953cc..b704da0 100644 --- a/cebl/eeg/eeg.py +++ b/cebl/eeg/eeg.py @@ -45,7 +45,7 @@ def __init__(self, data, sampRate=256.0, chanNames=None, chanNames: A list of names of the channels in the eeg data. If None (default) then the channel names are set - to '1', '2', ... 'nChan'. + to "1", "2", ... "nChan". markers: EEG event markers. This is a list or tuple of floats that mark events in the eeg data. There should be one @@ -127,7 +127,7 @@ def setMarkers(self, markers, copy=False): self.markers = self.markers.astype(self.dtype, copy=False) if len(self.markers) != self.nObs: - raise RuntimeError("Length of markers " + str(len(self.markers)) + \ + raise RuntimeError("Length of markers " + str(len(self.markers)) + " does not match number of observations " + str(self.nObs)) return self @@ -272,7 +272,7 @@ def EOGRegress(self, vChan1, vChan2, hChan1, hChan2, bipolar=True, model=None): # report which chan? do this elsewhere? XXX - idfah if None in (vChan1, vChan2, hChan1, hChan2): - raise RuntimeError('Invalid channel name.') + raise RuntimeError("Invalid channel name.") if bipolar: veog = self.data[:, vChan1] - self.data[:, vChan2] @@ -289,36 +289,72 @@ def EOGRegress(self, vChan1, vChan2, hChan1, hChan2, bipolar=True, model=None): model = ml.RidgeRegression(eogFilt, self.data) self.data -= model.eval(eogFilt) - return self, model - def sharpen(self, coord='sphere', **kwargs): + def sharpen(self, coord="sphere", **kwargs): + """Apply a spatial sharpening transform. Similar to the sharpen filters + used in image processing. See sig.sharpen for more information. + + Args: + coord: String value of "2d" or "3d" or "sphere" (default) + indicating the coordinate system to use for distance + calculations. + + **kwargs: Additional keyword arguments to pass to sig.sharpen. + + Returns: + This function modifies the signal data and returns self. + """ locs = np.asarray([chanlocs.chanLocs3d[cn.lower()] for cn in self.getChanNames()], dtype=self.dtype) coord = coord.lower() - if coord == '2d': + if coord == "2d": # steriographic projection x = locs[:, 0] y = locs[:, 1] z = locs[:, 2] - xy = np.vstack((x/(1.0+z), y/(1.0+z))).T + xy = np.vstack((x / (1.0 + z), y / (1.0 + z))).T dist = head.euclidDist(xy, xy) - elif coord == '3d': + elif coord == "3d": dist = head.euclidDist(locs, locs) - elif coord == 'sphere': + elif coord == "sphere": dist = head.sphereDist(locs, locs) else: - raise RuntimeError('Invalid coord %s.' % str(coord)) + raise RuntimeError("Invalid coord %s." % str(coord)) self.data = sig.sharpen(self.data, dist=dist, **kwargs) return self def decimate(self, factor, *args, **kwargs): + """Decimate by a given factor. See sig.decimate for + more information. + + Notes: + Decimation includes the application of an + antialiasing filter followed by downsampling. + If an antialiasing filter has already been + applied, consider using the downsample method. + + This method also downsamples the markers through + downsampling without an antialiasing filter. + + Args: + factor: The factor by which to decimate the + signal. For example, a value of three + will result in keeping every third + signal value. + + *args: Additional arguments to pass to + **kwargs: sig.decimate. + + Returns: + This function modifies the signal data and returns self. + """ self.data = sig.decimate(self.data, factor, *args, **kwargs) self.markers = sig.downsample(self.markers, factor) @@ -329,6 +365,27 @@ def decimate(self, factor, *args, **kwargs): return self def downsample(self, factor): + """Downsample by a given factor. See sig.downsample for + more information. + + Notes: + Downsampling does not include the application of an + antialiasing filter. In order to prevent aliasing + artifacts, consider using the decimate method or else + applying a lowpass filter before downsampling. + + Args: + factor: The factor by which to downsample the + signal. For example, a value of three + will result in keeping every third + signal value. + + *args: Additional arguments to pass to + **kwargs: sig.downsample. + + Returns: + This function modifies the signal data and returns self. + """ self.data = sig.downsample(self.data, factor) self.markers = sig.downsample(self.markers, factor) @@ -412,8 +469,8 @@ def ma(self, *args, **kwargs): def icaFilter(self, comp, remove=False, lags=0, returnICA=False, **kwargs): ica = ml.ICA(self.data, lags=lags, **kwargs) - if ica.reason == 'diverge': - raise RuntimeError('ICA training diverged. Try a smaller learning rate.') + if ica.reason == "diverge": + raise RuntimeError("ICA training diverged. Try a smaller learning rate.") self.data = ica.filter(self.data, comp=comp, remove=remove) @@ -424,8 +481,8 @@ def icaFilter(self, comp, remove=False, lags=0, returnICA=False, **kwargs): def icaTransform(self, comp=None, remove=False, lags=0, returnICA=False, **kwargs): ica = ml.ICA(self.data, lags=lags, **kwargs) - if ica.reason == 'diverge': - raise RuntimeError('ICA training diverged. Try a smaller learning rate.') + if ica.reason == "diverge": + raise RuntimeError("ICA training diverged. Try a smaller learning rate.") newData = ica.transform(self.data, comp=comp, remove=remove) @@ -519,7 +576,7 @@ def bipolarReference(self, pairs): for pair in pairs: if len(pair) > 2: raise RuntimeError( - 'Bipolar reference assumes pairs of electrodes but got %s.' % pair) + "Bipolar reference assumes pairs of electrodes but got %s." % pair) pair = self.getChanIndices(pair) @@ -529,7 +586,7 @@ def bipolarReference(self, pairs): chanNames = [] for pair in pairs: pair = self.getChanNames(pair) - chanNames.append('-'.join(pair)) + chanNames.append("-".join(pair)) self.deleteChans([r for l, r in pairs]) self.setChanNames(chanNames) @@ -555,7 +612,7 @@ def trim(self, start=None, end=None): start = int(start*float(self.sampRate))/self.sampRate if start < 0.0: - raise RuntimeError('start %f is less than zero.' % start) + raise RuntimeError("start %f is less than zero." % start) startTrimSamp = int(start*self.sampRate) else: @@ -567,7 +624,7 @@ def trim(self, start=None, end=None): if end > self.nSec: raise RuntimeError( - 'end %f is greater than length of data %f.' % (end, self.nSec)) + "end %f is greater than length of data %f." % (end, self.nSec)) endTrimSamp = int((end-self.nSec)*self.sampRate) else: @@ -622,17 +679,17 @@ def plotAutoCorrelation(self, chans=None, lags=None, ax=None, **kwargs): ax = fig.add_subplot(1, 1, 1) ax.grid() - ax.set_xlabel(r'Lag') - ax.set_ylabel(r'Correlation') + ax.set_xlabel(r"Lag") + ax.set_ylabel(r"Correlation") lines = ax.plot(np.arange(ac.shape[0]), ac, **kwargs) ax.autoscale(tight=True) - return {'ax': ax, 'lines': lines} + return {"ax": ax, "lines": lines} def plotCWT(self, chans=None, start=None, end=None, - method='cwt', colorbar=True, **kwargs): + method="cwt", colorbar=True, **kwargs): if chans is None: chans = self.getChanNames() @@ -674,9 +731,9 @@ def plotCWT(self, chans=None, start=None, end=None, ax = fig.add_subplot(plotRows, plotCols, i+1) axs += [ax] - im = ax.imshow(powers[:, :, i].T, interpolation='lanczos', - origin='lower', cmap=pltcm.get_cmap('jet'), - norm=pltLogNorm(), aspect='auto', + im = ax.imshow(powers[:, :, i].T, interpolation="lanczos", + origin="lower", cmap=pltcm.get_cmap("jet"), + norm=pltLogNorm(), aspect="auto", extent=(0.0, d.shape[0]/float(self.sampRate), np.min(freqs), np.max(freqs))) ims += [im] @@ -686,27 +743,27 @@ def plotCWT(self, chans=None, start=None, end=None, #plt.colorbar() ax.autoscale(tight=True) - ax.set_title('Channel: %s' % cn) + ax.set_title("Channel: %s" % cn) ax.set_xlabel("Seconds") ax.set_ylabel("Frequencies (Hz)") #locs = range(nObs) #if len(locs) > 20: # locs = locs[0:len(locs):len(locs)/20] - #labels = ['{:6.0g}'.format(v/float(self.sampRate)) for v in locs] + #labels = ["{:6.0g}".format(v/float(self.sampRate)) for v in locs] #print(labels) #plt.xticks(locs, labels) #locs = range(self.nFreq) #if len(locs) > 20: # locs = locs[0:len(locs):len(locs)/20] - #labels = ['{:6.2g}'.format(self.freqs[v]) for v in locs] + #labels = ["{:6.2g}".format(self.freqs[v]) for v in locs] #plt.yticks(locs, labels) #ax.set_title(chanNames[i]) if colorbar: cbar = plt.colorbar(im) - cbar.set_label('Power') + cbar.set_label("Power") - return {'axs': axs, 'ims': ims} + return {"axs": axs, "ims": ims} def plotPairs(self, chans=None, start=None, end=None, bins=30): if chans is None: @@ -715,7 +772,7 @@ def plotPairs(self, chans=None, start=None, end=None, bins=30): startSamp = None if start is not None: - startSamp = int(start*self.sampRate) + startSamp = int(start * self.sampRate) endSamp = None if end is not None: @@ -733,7 +790,8 @@ def plotPairs(self, chans=None, start=None, end=None, bins=30): for r in range(nDim): for c in range(nDim): - ax = fig.add_subplot(nDim, nDim, r*nDim+c+1+nDim*(nDim-(r%nDim)*2-1)) + ax = fig.add_subplot(nDim, nDim, + r * nDim + c + 1 + nDim * (nDim - (r % nDim) * 2 - 1)) axs.append(ax) sx = s[:, r] @@ -741,16 +799,16 @@ def plotPairs(self, chans=None, start=None, end=None, bins=30): if r == c: plt.hist(sx, bins=bins, normed=False) - ax.set_xlim(-mx/2.0, mx/2.0) + ax.set_xlim(-mx / 2.0, mx / 2.0) else: - ax.scatter(sx, sy, alpha=0.5, s=10, marker='.') - ax.plot((-mx, mx), (-mx, mx), color='grey', linestyle='dashed') + ax.scatter(sx, sy, alpha=0.5, s=10, marker=".") + ax.plot((-mx, mx), (-mx, mx), color="grey", linestyle="dashed") pearsonr, pearsonp = spstats.pearsonr(sx, sy) - pearsons = ".%2d" % np.round(pearsonr*100) + pearsons = ".%2d" % np.round(pearsonr * 100) ax.text(0.9, 0.1, pearsons, transform=ax.transAxes, - horizontalalignment='right', - verticalalignment='bottom', + horizontalalignment="right", + verticalalignment="bottom", fontsize=8) ax.set_ylim(-mx, mx) ax.set_xlim(-mx, mx) @@ -760,7 +818,7 @@ def plotPairs(self, chans=None, start=None, end=None, bins=30): ax.set_xlabel(self.chanNames[c]) ax.set_xticks([]) else: - #ax.set_xlabel('') + #ax.set_xlabel("") ax.get_xaxis().set_visible(False) if c == 0: @@ -769,7 +827,7 @@ def plotPairs(self, chans=None, start=None, end=None, bins=30): else: ax.get_yaxis().set_visible(False) - return {'fig': fig, 'axs': axs} + return {"fig": fig, "axs": axs} def plotPSD(self, chans=None, ax=None, psdKwargs=None, **kwargs): if psdKwargs is None: @@ -813,8 +871,8 @@ def plotTrace(self, start=None, end=None, chans=None, fig = plt.figure() ax = fig.add_subplot(1, 1, 1) - ax.set_xlabel(r'Time ($s$)') - ax.set_ylabel(r'Signal ($\mu V$)') + ax.set_xlabel(r"Time ($s$)") + ax.set_ylabel(r"Signal ($\mu V$)") if len(chans) > 1: ax.set_yticklabels([c for i, c in enumerate(self.chanNames) if i in chans]) ax.set_yticks(sep) @@ -826,11 +884,11 @@ def plotTrace(self, start=None, end=None, chans=None, lines = ax.plot(time, s, **kwargs) if drawZero: - ax.hlines(sep, time[0], time[-1], linewidth=2, linestyle='--', color='grey') + ax.hlines(sep, time[0], time[-1], linewidth=2, linestyle="--", color="grey") ax.autoscale(tight=True) - return {'ax': ax, 'lines': lines, 'scale': scale, 'sep': sep} + return {"ax": ax, "lines": lines, "scale": scale, "sep": sep} def plotLags(self, chans=None, lags=(1, 2, 4, 8, 16, 32, 64, 128, 256), **kwargs): if chans is None: @@ -843,8 +901,8 @@ def plotLags(self, chans=None, lags=(1, 2, 4, 8, 16, 32, 64, 128, 256), **kwargs #fig.subplots_adjust(hspace=0.15, wspace=0.25, # left=0.05, right=0.92, top=0.97, bottom=0.06) - if 'alpha' not in kwargs: - kwargs['alpha'] = 0.35 + if "alpha" not in kwargs: + kwargs["alpha"] = 0.35 nCols = np.ceil(np.sqrt(len(lags))) nRows = np.ceil(len(lags)/float(nCols)) @@ -860,8 +918,8 @@ def plotLags(self, chans=None, lags=(1, 2, 4, 8, 16, 32, 64, 128, 256), **kwargs lines += ax.plot(s[lag:, j], s[:-lag, j], **kwargs) #color=plt.cm.jet(j/float(s.shape[1]), alpha=0.2)) - ax.set_xlabel(r'$x_t$') - ax.set_ylabel(r'$x_{t-%d}$' % lag) + ax.set_xlabel(r"$x_t$") + ax.set_ylabel(r"$x_{t-%d}$" % lag) ax.set_xticks([]) ax.set_yticks([]) @@ -870,39 +928,39 @@ def plotLags(self, chans=None, lags=(1, 2, 4, 8, 16, 32, 64, 128, 256), **kwargs #if i == nCols-1: # leg = ax.legend(lines, self.getChanNames(chans), - # labelspacing=0.34, prop={'size': 12}, + # labelspacing=0.34, prop={"size": 12}, # bbox_to_anchor=(1.35, 0.8)) if i == 0: leg = ax.legend(lines, self.getChanNames(chans), - loc='upper left', prop={'size': 12}) + loc="upper left", prop={"size": 12}) for l in leg.legendHandles: l.set_alpha(1.0) l.set_linewidth(2) - return {'ax': ax, 'lines': lines, 'legend': leg} + return {"ax": ax, "lines": lines, "legend": leg} def saveFile(self, fileName): fileNameLower = fileName.lower() - dotSplit = fileNameLower.rsplit('.') + dotSplit = fileNameLower.rsplit(".") if dotSplit[-1] in util.compressedExtensions: fileFormat = dotSplit[-2] else: fileFormat = dotSplit[-1] - if fileFormat == 'pkl': + if fileFormat == "pkl": data = np.hstack((self.data, self.markers[:, None])) - with util.openCompressedFile(fileName, 'w') as fileHandle: + with util.openCompressedFile(fileName, "w") as fileHandle: pickle.dump(data, fileHandle, protocol=pickle.HIGHEST_PROTOCOL) else: - raise RuntimeError('Unknown file format ' + str(fileFormat)) + raise RuntimeError("Unknown file format " + str(fileFormat)) class EEGFromPickledMatrix(EEG): def __init__(self, fileName, sampRate, chanNames=None, markers=-1, transpose=False, *args, **kwargs): - with util.openCompressedFile(fileName, 'rb') as fileHandle: - data = np.asarray(pickle.load(fileHandle, encoding='bytes')) + with util.openCompressedFile(fileName, "rb") as fileHandle: + data = np.asarray(pickle.load(fileHandle, encoding="bytes")) if transpose: data = data.T @@ -916,7 +974,7 @@ def __init__(self, fileName, protocol, trial=1, *args, **kwargs): # should be able to give keys as argument XXX - idfah - fileHandle = util.openCompressedFile(fileName, 'r') + fileHandle = util.openCompressedFile(fileName, "r") jData = json.load(fileHandle) fileHandle.close() @@ -926,26 +984,26 @@ def __init__(self, fileName, protocol, trial=1, *args, **kwargs): found = False for d in jData: - if d['protocol'] == protocol: + if d["protocol"] == protocol: jData = d found = True break if not found: - raise RuntimeError('Invalid protocol: %s.' % str(protocol)) + raise RuntimeError("Invalid protocol: %s." % str(protocol)) - sampRate = jData['sample rate'] - chanNames = [str(cn) for cn in jData['channels']] - deviceName = jData['device'] + sampRate = jData["sample rate"] + chanNames = [str(cn) for cn in jData["channels"]] + deviceName = jData["device"] - self.notes = jData['notes'] - self.date = jData['date'] - self.location = jData['location'] - self.impairment = jData['impairment'] - self.subjectNumber = jData['subject'] + self.notes = jData["notes"] + self.date = jData["date"] + self.location = jData["location"] + self.impairment = jData["impairment"] + self.subjectNumber = jData["subject"] - trialName = 'trial %d' % trial - data = np.asarray(jData['eeg'][trialName]).T + trialName = "trial %d" % trial + data = np.asarray(jData["eeg"][trialName]).T markers = len(chanNames) EEG.__init__(self, data=data, sampRate=sampRate, chanNames=chanNames, @@ -955,9 +1013,9 @@ class EEGFromBDF(EEG): def __init__(self, fileName, *args, **kwargs): info = readbdf.readBDF(fileName) - self.date = info['startDate'] - self.time = info['startTime'] + self.date = info["startDate"] + self.time = info["startTime"] - EEG.__init__(self, data=info['data'], sampRate=info['sampRate'], - chanNames=info['chanNames'], deviceName='biosemi', - markers=info['markers'], *args, **kwargs) + EEG.__init__(self, data=info["data"], sampRate=info["sampRate"], + chanNames=info["chanNames"], deviceName="biosemi", + markers=info["markers"], *args, **kwargs) diff --git a/cebl/eeg/readbdf.py b/cebl/eeg/readbdf.py index 242bc52..4d6633f 100644 --- a/cebl/eeg/readbdf.py +++ b/cebl/eeg/readbdf.py @@ -1,3 +1,5 @@ +"""Work in progress: read bdf data from a file. +""" import matplotlib.pyplot as plt import numpy as np import struct @@ -161,7 +163,9 @@ def readBDF(fileName, verbose=False): fileName = 's11-letter-b.bdf' contents = readBDF(fileName, verbose=True) - print('From', fileName, 'read',contents['nDataRecords']*contents['nSecondsPerDataRecord'],'seconds of',contents['nChan'],'chans of data at',contents['sampRate'],'samples per second. EEG matrix is',contents['data'].shape) + print('From', fileName, 'read', contents['nDataRecords']*contents['nSecondsPerDataRecord'], + 'seconds of', contents['nChan'], 'chans of data at', contents['sampRate'], + 'samples per second. EEG matrix is',contents['data'].shape) n = 25000 eeg = contents['data'] plt.figure(2) diff --git a/cebl/eeg/seg.py b/cebl/eeg/seg.py index 963dcc6..e76a3d2 100644 --- a/cebl/eeg/seg.py +++ b/cebl/eeg/seg.py @@ -15,14 +15,15 @@ from . import head +# pylint: disable=too-many-public-methods class SegmentedEEG(EEGBase): def __init__(self, data, sampRate, chanNames=None, markers=None, - start=0.0, deviceName='', dtype=None, copy=False): + start=0.0, deviceName="", dtype=None, copy=False): """Construct a new SegmentedEEG instance for processing eeg data that has been split into segments of equal length. Args: - data: A 3D numpy array of floats of shape (nSeg,nObs[,nDim]) + data: A 3D numpy array of floats of shape (nSeg, nObs[,nDim]) containing the eeg segments. The first axis corresponds to the eeg segments. The second axis corresponds to the observations (i.e., time steps). @@ -34,7 +35,7 @@ def __init__(self, data, sampRate, chanNames=None, markers=None, chanNames: A list of names of the channels in the eeg data. If None (default) then the channel names are set - to '1', '2', ... 'nChan'. + to "1", "2", ... "nChan". markers: EEG event markers. This is a list or tuple of floats that mark each eeg segment. There should be one marker @@ -56,7 +57,7 @@ def __init__(self, data, sampRate, chanNames=None, markers=None, the data argument. copy: If False (default) then data will not be copied if - possible. If True, then the data definitely be + possible. If True, then the data definitely be copied. Warning: If multiple EEG instances use the same un-copied data array, then modifying one EEG instance may lead to undefined behavior in @@ -72,7 +73,10 @@ def __init__(self, data, sampRate, chanNames=None, markers=None, EEGBase.__init__(self, data.shape[1], data.shape[2], sampRate=sampRate, chanNames=chanNames, deviceName=deviceName) + self.markers = None self.setMarkers(markers, copy=copy) + + self.start = None self.setStart(start) def copy(self, dtype=None): @@ -81,7 +85,7 @@ def copy(self, dtype=None): deviceName=self.deviceName, dtype=dtype, copy=True) def getData(self): - """Get the current data as a numpy array of shape (nSeg,nObs,nChan). + """Get the current data as a numpy array of shape (nSeg, nObs, nChan). """ return self.data @@ -99,13 +103,14 @@ def setMarkers(self, markers, copy=False): self.markers = self.markers.astype(self.dtype, copy=False) if len(self.markers) != self.nSeg: - raise RuntimeError('Length of markers ' + str(len(self.markers)) + \ - ' does not match number of segments ' + str(self.nSeg)) + raise RuntimeError("Length of markers " + str(len(self.markers)) + + " does not match number of segments " + str(self.nSeg)) return self def setStart(self, start=None): if start is None: start = self.start + self.start = int(np.floor(start*float(self.sampRate)))/float(self.sampRate) return self @@ -136,7 +141,7 @@ def select(self, matchFunc, copy=False): sampRate=self.sampRate, markers=self.markers[indicators], start=self.getStart(), chanNames=self.chanNames, deviceName=self.deviceName, copy=copy) - def selectChr(self, character, sign=0, *args, **kwargs): + def selectChr(self, character, *args, sign=0, **kwargs): """ Note np.abs in documentation XXX - idfah """ def matchFunc(mark): @@ -167,8 +172,8 @@ def trim(self, start=None, end=None): start = int(start*float(self.sampRate))/self.sampRate if start < startOrig: - raise RuntimeError('Cannot trim to start %f before original start %f.' % - (start, startOrig)) + raise RuntimeError("Cannot trim to start %f before original start %f." % + (start, startOrig)) startTrimSamp = int((start-startOrig)*self.sampRate) else: @@ -180,14 +185,14 @@ def trim(self, start=None, end=None): end = int(end*float(self.sampRate))/self.sampRate if end > endOrig: - raise RuntimeError('Cannot trim to end %f before original end %f.' % - (end, endOrig)) + raise RuntimeError("Cannot trim to end %f before original end %f." % + (end, endOrig)) endTrimSamp = int((end-endOrig)*self.sampRate) else: endTrimSamp = None - self.data = self.data[:,startTrimSamp:endTrimSamp,:] + self.data = self.data[:, startTrimSamp:endTrimSamp, :] self.nObs = self.data.shape[1] self.nSec = self.nObs / float(self.sampRate) @@ -196,23 +201,21 @@ def trim(self, start=None, end=None): return self - ''' - def split(self, nSec): - nObs = self.sampRate*nSec - rem = np.remainder(self.nObs, nObs) + ## def split(self, nSec): + ## nObs = self.sampRate*nSec + ## rem = np.remainder(self.nObs, nObs) - self.data = self.data[:,:(self.nObs-rem-1)] + ## self.data = self.data[:, :(self.nObs-rem-1)] - nSplit = self.data.shape[1] // nObs + ## nSplit = self.data.shape[1] // nObs - self.data = self.data.reshape((nSplit*self.nSeg, nObs, -1)) + ## self.data = self.data.reshape((nSplit*self.nSeg, nObs, -1)) - self.nSeg = self.data.shape[0] - self.nObs = self.data.shape[1] - self.nSec = self.nObs / float(self.sampRate) + ## self.nSeg = self.data.shape[0] + ## self.nObs = self.data.shape[1] + ## self.nSec = self.nObs / float(self.sampRate) - return self - ''' + ## return self def split(self, nSec, overlap=0.0): span = int(self.sampRate*nSec) @@ -224,7 +227,7 @@ def split(self, nSec, overlap=0.0): # test this? XXX - idfah self.markers = np.repeat(self.markers, windows.shape[1]) - self.data = windows.reshape((-1,span,self.nChan)) + self.data = windows.reshape((-1, span, self.nChan)) self.nSeg = self.data.shape[0] self.nObs = self.data.shape[1] @@ -260,7 +263,7 @@ def deleteChans(self, chans): chans = self.getChanIndices(chans) self.data = np.delete(self.data, chans, axis=2) self.nChan -= len(chans) - self.chanNames = [c for i,c in enumerate(self.chanNames) if i not in chans] + self.chanNames = [c for i, c in enumerate(self.chanNames) if i not in chans] return self def keepChans(self, chans): @@ -271,7 +274,7 @@ def keepChans(self, chans): def reference(self, chans): chans = self.getChanIndices(chans) - ref = self.data[:,:,chans] + ref = self.data[:, :, chans] if len(chans) > 1: ref = ref.mean(axis=2) self.data -= util.segmat(ref) @@ -280,19 +283,20 @@ def reference(self, chans): def bipolarReference(self, pairs): for pair in pairs: if len(pair) > 2: - raise RuntimeError('Bipolar reference assumes pairs of electrodes but got %s.' % pair) + raise RuntimeError( + "Bipolar reference assumes pairs of electrodes but got %s." % pair) pair = self.getChanIndices(pair) - ref = self.data[:,:,pair].mean(axis=2) - self.data[:,:,pair] = util.segmat(ref) + ref = self.data[:, :, pair].mean(axis=2) + self.data[:, :, pair] = util.segmat(ref) chanNames = [] for pair in pairs: pair = self.getChanNames(pair) - chanNames.append('-'.join(pair)) + chanNames.append("-".join(pair)) - self.deleteChans([r for l,r in pairs]) + self.deleteChans([r for l, r in pairs]) self.setChanNames(chanNames) return self @@ -303,7 +307,7 @@ def commonAverageReference(self, *args, **kwargs): # for seg in self.data]) for i in range(self.data.shape[0]): - self.data[i,...] = sig.commonAverageReference(self.data[i,...], *args, **kwargs) + self.data[i, ...] = sig.commonAverageReference(self.data[i, ...], *args, **kwargs) return self @@ -312,57 +316,59 @@ def car(self, *args, **kwargs): def meanSeparate(self, recover=False): for i in range(self.data.shape[0]): - self.data[i,...] = sig.meanSeparate(self.data[i,...], recover=recover) + self.data[i, ...] = sig.meanSeparate(self.data[i, ...], recover=recover) if recover: - self.chanNames[-1] = 'recovered' + self.chanNames[-1] = "recovered" else: - self.chanNames[-1] = 'mean' + self.chanNames[-1] = "mean" return self - def sharpen(self, coord='sphere', *args, **kwargs): + def sharpen(self, coord="sphere", *args, **kwargs): locs = np.asarray([chanlocs.chanLocs3d[cn.lower()] for cn in self.getChanNames()], dtype=self.dtype) coord = coord.lower() - if coord == '2d': + if coord == "2d": # steriographic projection - x = locs[:,0] - y = locs[:,1] - z = locs[:,2] + x = locs[:, 0] + y = locs[:, 1] + z = locs[:, 2] xy = np.vstack((x/(1.0+z), y/(1.0+z))).T dist = head.euclidDist(xy, xy) - elif coord == '3d': + elif coord == "3d": dist = head.euclidDist(locs, locs) - elif coord == 'sphere': + elif coord == "sphere": dist = head.sphereDist(locs, locs) else: - raise RuntimeError('Invalid coord %s.', str(coord)) + raise RuntimeError("Invalid coord %s." % str(coord)) + + self.data = np.array([sig.sharpen(seg, dist=dist, *args, **kwargs) + for seg in self.data]) - self.data = np.array([sig.sharpen(seg, dist=dist, *args, **kwargs) for seg in self.data]) return self def baselineCorrect(self, t=None): if t is None: if self.getStart() >= 0.0: - raise RuntimeError('Cannot baselineCorrect with positive start time ' + - 'unless t is given explicitely') + raise RuntimeError("Cannot baselineCorrect with positive start time " + + "unless t is given explicitely") tSamp = int(np.abs(self.getStart()) * self.sampRate) else: tSamp = int(t * self.sampRate) - self.data -= np.mean(self.data[:,:tSamp,:],axis=1).reshape((self.nSeg,1,self.nChan)) + self.data -= np.mean(self.data[:, :tSamp, :], axis=1).reshape((self.nSeg, 1, self.nChan)) return self def downsample(self, factor): self.data = np.asarray([sig.downsample(seg, factor) for seg in self.data], - dtype=self.dtype) + dtype=self.dtype) self.sampRate /= float(factor) self.nObs = self.data.shape[1] @@ -372,7 +378,10 @@ def downsample(self, factor): return self - def resample(self, factorDown, factorUp=1, interpKwargs=dict(), **decimKwargs): + def resample(self, factorDown, factorUp=1, interpKwargs=None, **decimKwargs): + if interpKwargs is None: + interpKwargs = {} + self.data = np.asarray( [sig.resample(seg, factorDown=factorDown, factorUp=factorUp, interpKwargs=interpKwargs, **decimKwargs) @@ -391,7 +400,7 @@ def resample(self, factorDown, factorUp=1, interpKwargs=dict(), **decimKwargs): return self def chanEmbed(self): - return self.data.reshape((self.data.shape[0], -1), order='F') + return self.data.reshape((self.data.shape[0], -1), order="F") def timeEmbed(self, *args, **kwargs): return util.timeEmbed(self.data, *args, axis=1, **kwargs) @@ -412,102 +421,103 @@ def spectrogram(self, *args, **kwargs): def reference(self, chans): chans = self.getChanIndices(chans) - ref = self.data[:,:,chans] + ref = self.data[:, :, chans] if len(chans) > 1: ref = ref.mean(axis=2) self.data -= util.segmat(ref) return self - def plotSegs(self, chan=0, drawZeroTime=False, drawZeroVolt=True, - timeUnit='ms', - segLineColor=(0.05,0.05,0.2,0.1), segLineWidth=3, - meanLineColor='red', meanLineWidth=2, - ax=None, *args, **kwargs): + def plotSegs(self, chan=0, drawZeroTime=False, drawZeroVolt=True, timeUnit="ms", + segLineColor=(0.05, 0.05, 0.2, 0.1), segLineWidth=3, + meanLineColor="red", meanLineWidth=2, ax=None, **kwargs): chan, = self.getChanIndices((chan,)) - segs = self.data[:,:,chan].T + segs = self.data[:, :, chan].T timeUnit = timeUnit.lower() - if timeUnit in ('s', 'ms'): - time = np.linspace(self.getStart(),self.getEnd(), + if timeUnit in ("s", "ms"): + time = np.linspace(self.getStart(), self.getEnd(), segs.shape[0]).astype(self.dtype, copy=False) - elif timeUnit == 'obs': + elif timeUnit == "obs": time = np.arange(self.nObs) else: - raise RuntimeError('Invalid timeUnit %s.' + str(timeUnit)) + raise RuntimeError("Invalid timeUnit %s." + str(timeUnit)) - if timeUnit == 'ms': + if timeUnit == "ms": time *= 1000.0 if ax is None: - #fig = plt.figure(figsize=(9,5.5)) + #fig = plt.figure(figsize=(9, 5.5)) fig = plt.figure() - ax = fig.add_subplot(1,1,1) - - if timeUnit == 's': - ax.set_xlabel('Time (s)') - elif timeUnit == 'ms': - ax.set_xlabel('Time (ms)') - elif timeUnit == 'obs': - ax.set_xlabel('Observation') - ax.set_ylabel(r'Signal ($\mu V$)') - - segLines = ax.plot(time, segs, color=segLineColor, linewidth=segLineWidth, *args, **kwargs) - segLines[-1].set_label('Single Trial') - ax.plot(time, np.mean(segs, axis=1), color='white', linewidth=meanLineWidth*2, *args, **kwargs) - meanLine = ax.plot(time, np.mean(segs, axis=1), color=meanLineColor, linewidth=meanLineWidth, label='Mean', *args, **kwargs) + ax = fig.add_subplot(1, 1, 1) + + if timeUnit == "s": + ax.set_xlabel("Time (s)") + elif timeUnit == "ms": + ax.set_xlabel("Time (ms)") + elif timeUnit == "obs": + ax.set_xlabel("Observation") + ax.set_ylabel(r"Signal ($\mu V$)") + + segLines = ax.plot(time, segs, color=segLineColor, linewidth=segLineWidth, **kwargs) + segLines[-1].set_label("Single Trial") + ax.plot(time, np.mean(segs, axis=1), color="white", linewidth=meanLineWidth*2, **kwargs) + meanLine = ax.plot(time, np.mean(segs, axis=1), color=meanLineColor, + linewidth=meanLineWidth, label="Mean", **kwargs) vertLine = None if drawZeroTime: - vertLine = ax.vlines(0.0, np.min(segs), np.max(segs), color='red', linewidth=2, linestyle='--') + vertLine = ax.vlines(0.0, np.min(segs), np.max(segs), + color="red", linewidth=2, linestyle="--") if drawZeroVolt: - ax.hlines(0.0, time[0], time[-1], linewidth=2, linestyle='--', color='grey') + ax.hlines(0.0, time[0], time[-1], linewidth=2, linestyle="--", color="grey") ax.autoscale(tight=True) - return {'ax': ax, 'segLines': segLines, 'meanLine': meanLine, vertLine: 'vertLine'} + return {"ax": ax, "segLines": segLines, "meanLine": meanLine, vertLine: "vertLine"} - def plotAvg(self, chans=None, drawZeroTime=False, drawZeroVolt=True, timeUnit='ms', scale=None, ax=None, **kwargs): + def plotAvg(self, chans=None, drawZeroTime=False, drawZeroVolt=True, + timeUnit="ms", scale=None, ax=None, **kwargs): if chans is None: chans = self.getChanNames() chans = self.getChanIndices(chans) chans = self.getChanIndices(chans) - avg = self.data[:,:,chans].mean(axis=0) + avg = self.data[:, :, chans].mean(axis=0) timeUnit = timeUnit.lower() - if timeUnit in ('s', 'ms'): - time = np.linspace(self.getStart(),self.getEnd(), + if timeUnit in ("s", "ms"): + time = np.linspace(self.getStart(), self.getEnd(), avg.shape[0]).astype(self.dtype, copy=False) - elif timeUnit == 'obs': + elif timeUnit == "obs": time = np.arange(self.nObs) else: - raise RuntimeError('Invalid timeUnit %s.' + str(timeUnit)) + raise RuntimeError("Invalid timeUnit %s." + str(timeUnit)) - if timeUnit == 'ms': + if timeUnit == "ms": time *= 1000.0 sep, scale = util.colsep(avg, scale=scale, returnScale=True) if ax is None: - #fig = plt.figure(figsize=(9,5.5)) + #fig = plt.figure(figsize=(9, 5.5)) fig = plt.figure() - ax = fig.add_subplot(1,1,1) + ax = fig.add_subplot(1, 1, 1) - if timeUnit == 's': - ax.set_xlabel('Time (s)') - elif timeUnit == 'ms': - ax.set_xlabel('Time (ms)') - elif timeUnit == 'obs': - ax.set_xlabel('Observation') + if timeUnit == "s": + ax.set_xlabel("Time (s)") + elif timeUnit == "ms": + ax.set_xlabel("Time (ms)") + elif timeUnit == "obs": + ax.set_xlabel("Observation") if len(chans) > 1: - ax.set_yticklabels([c for i,c in enumerate(self.chanNames) if i in chans]) + ax.set_yticklabels([c for i, c in enumerate(self.chanNames) if i in chans]) ax.set_yticks(sep) else: - ax.set_ylabel(r'Signal ($\mu V$)') + ax.set_ylabel(r"Signal ($\mu V$)") lines = ax.plot(time, avg+sep, **kwargs) @@ -515,13 +525,13 @@ def plotAvg(self, chans=None, drawZeroTime=False, drawZeroVolt=True, timeUnit='m if drawZeroTime: ylim = ax.get_ylim() - #ax.vlines(0.0, np.min(avg+sep), np.max(avg+sep), color='red', linewidth=2, linestyle='--') - ax.vlines(0.0, ylim[0], ylim[1], color='red', linewidth=2, linestyle='--') + #ax.vlines(0.0, np.min(avg+sep), np.max(avg+sep), color="red", linewidth=2, linestyle="--") + ax.vlines(0.0, ylim[0], ylim[1], color="red", linewidth=2, linestyle="--") if drawZeroVolt: - ax.hlines(0.0, time[0], time[-1], linewidth=2, linestyle='--', color='grey') + ax.hlines(0.0, time[0], time[-1], linewidth=2, linestyle="--", color="grey") - return {'ax': ax, 'lines': lines, 'scale': scale} + return {"ax": ax, "lines": lines, "scale": scale} def plotAvgDiffHead(self, other, times=(0.0, 0.1, 0.2, 0.3), chans=None): if chans is None: @@ -533,8 +543,8 @@ def plotAvgDiffHead(self, other, times=(0.0, 0.1, 0.2, 0.3), chans=None): nTimes = len(times) - avgSelf = self.data[:,:,chans].mean(axis=0) - avgOther = other.data[:,:,chans].mean(axis=0) + avgSelf = self.data[:, :, chans].mean(axis=0) + avgOther = other.data[:, :, chans].mean(axis=0) avgDiff = avgSelf - avgOther if nTimes <= 4: @@ -547,56 +557,58 @@ def plotAvgDiffHead(self, other, times=(0.0, 0.1, 0.2, 0.3), chans=None): fig = plt.figure() axs = [] - for i,t in enumerate(times): + for i, t in enumerate(times): ax = fig.add_subplot(nRow, nCol, i+1) - ax.set_title('%.0fms' % (t*1000.0)) + ax.set_title("%.0fms" % (t*1000.0)) axs.append(ax) timeStep = int(t*self.sampRate) - head.plotHeadInterp(avgDiff[timeStep,:], chanNames=chanNames, ax=ax) + head.plotHeadInterp(avgDiff[timeStep, :], chanNames=chanNames, ax=ax) - return {'fig': fig, 'axs': axs} + return {"fig": fig, "axs": axs} - def plotAvgAndHead(self, chan=0, times=(100,200,300,400,500,600,700), timeUnit='ms', - mn=None, mx=None, avgKwargs={}, **kwargs): - chan, = self.getChanIndices((chan,)) + def plotAvgAndHead(self, chan=0, times=(100, 200, 300, 400, 500, 600, 700), timeUnit="ms", + mn=None, mx=None, avgKwargs=None, **kwargs): + if avgKwargs is None: + avgKwargs = {} + chan, = self.getChanIndices((chan,)) timeUnit = timeUnit.lower() nHead = len(times) - fig = plt.figure(figsize=(14,8)) + fig = plt.figure(figsize=(14, 8)) fig.subplots_adjust(hspace=0.32, wspace=0.02, left=0.065, right=0.95, top=0.97, bottom=0.18) - gs = pltgs.GridSpec(2,nHead) - axAvg = fig.add_subplot(gs[0,:]) - axHead = [fig.add_subplot(gs[1,i]) for i in range(nHead)] + gs = pltgs.GridSpec(2, nHead) + axAvg = fig.add_subplot(gs[0, :]) + axHead = [fig.add_subplot(gs[1, i]) for i in range(nHead)] axCBar = fig.add_axes((0.05, 0.08, 0.9, 0.05)) avgPlot = self.plotAvg(chans=(chan,), ax=axAvg, timeUnit=timeUnit, **avgKwargs) - avgMn, avgMx = avgPlot['ax'].get_ylim() - axAvg.vlines(times, avgMn, avgMx, linestyle='--', linewidth=2, color='red') - axAvg.set_title('Channel %s Average' % self.getChanNames((chan,))[0]) + avgMn, avgMx = avgPlot["ax"].get_ylim() + axAvg.vlines(times, avgMn, avgMx, linestyle="--", linewidth=2, color="red") + axAvg.set_title("Channel %s Average" % self.getChanNames((chan,))[0]) avg = np.mean(self.data, axis=0) headPlots = [] - for t,axH in zip(times, axHead): + for t, axH in zip(times, axHead): startObs = int(self.start*self.sampRate) - if timeUnit == 's': + if timeUnit == "s": i = int(self.sampRate*t) - fmt = '%.2f' - elif timeUnit == 'ms': + fmt = "%.2f" + elif timeUnit == "ms": i = int(self.sampRate*t/1000.0) - fmt = '%.0f' - elif timeUnit == 'obs': + fmt = "%.0f" + elif timeUnit == "obs": i = t - fmt = '%.0f' + fmt = "%.0f" else: - raise RuntimeError('Invalid timeUnit %s.' % str(timeUnit)) + raise RuntimeError("Invalid timeUnit %s." % str(timeUnit)) i -= startObs if mn is None: @@ -605,19 +617,23 @@ def plotAvgAndHead(self, chan=0, times=(100,200,300,400,500,600,700), timeUnit=' if mx is None: mx = np.max(avg) - hp = head.plotHeadInterp(avg[i,:], + hp = head.plotHeadInterp(avg[i, :], chanNames=self.getChanNames(), mn=mn, mx=mx, colorbar=False, ax=axH, **kwargs) axH.set_title((fmt + timeUnit) % t) headPlots.append(hp) - cbar = plt.colorbar(hp['im'], ax=axAvg, orientation='horizontal', cax=axCBar) - cbar.set_label(r'Signal ($\mu V$)') + cbar = plt.colorbar(hp["im"], ax=axAvg, orientation="horizontal", cax=axCBar) + cbar.set_label(r"Signal ($\mu V$)") + + return {"axAvg": axAvg, "axHead": axHead, "axCBar": axCBar, + "avgPlot": avgPlot, "headPlots": headPlots, "cbar": cbar} - return {'axAvg': axAvg, 'axHead': axHead, 'axCBar': axCBar, - 'avgPlot': avgPlot, 'headPlots': headPlots, 'cbar': cbar} + def plotAvgPSDByChan(self, scale="log", plotChanNames=True, + ax=None, psdKwargs=None, **kwargs): + if psdKwargs is None: + psdKwargs = {} - def plotAvgPSDByChan(self, scale='log', plotChanNames=True, lowFreq=0, highFreq=np.inf, ax=None, psdKwargs={}, **kwargs): psds = self.psd(**psdKwargs) powers = np.array([psd.getPowers() for psd in psds]) @@ -627,36 +643,36 @@ def plotAvgPSDByChan(self, scale='log', plotChanNames=True, lowFreq=0, highFreq= highMask = freqs > 0.5 freqMask = lowMask & highMask - powers = powers[:,freqMask] + powers = powers[:, freqMask] powers = powers.mean(axis=0) freqs = freqs[freqMask] if ax is None: fig = plt.figure() - ax = fig.add_subplot(1,1,1) + ax = fig.add_subplot(1, 1, 1) ax.grid() - ax.set_title('Power Spectral Density') - ax.set_xlabel(r'Freqency ($Hz$)') + ax.set_title("Power Spectral Density") + ax.set_xlabel(r"Freqency ($Hz$)") ax.set_xlim((np.min(freqs), np.max(freqs))) scale = scale.lower() - if scale in ('linear', 'log'): - ax.set_ylabel(r'Power Density ($\mu V^2 / Hz$)') - elif scale in ('db', 'decibels'): - ax.set_ylabel(r'Power Density (dB)') - if scale == 'log': - ax.set_yscale('log') - - if scale in ('linear', 'log'): + if scale in ("linear", "log"): + ax.set_ylabel(r"Power Density ($\mu V^2 / Hz$)") + elif scale in ("db", "decibels"): + ax.set_ylabel(r"Power Density (dB)") + if scale == "log": + ax.set_yscale("log") + + if scale in ("linear", "log"): pass - elif scale in ('db', 'decibels'): + elif scale in ("db", "decibels"): powers = 10.0*np.log10(powers/np.max(powers)) else: - raise RuntimeError('Invalid scale %s.' % str(scale)) + raise RuntimeError("Invalid scale %s." % str(scale)) - powersFlat = powers.reshape((-1,), order='F') + powersFlat = powers.reshape((-1,), order="F") lines = ax.plot(powersFlat, **kwargs) nFreq = len(freqs) @@ -665,16 +681,16 @@ def plotAvgPSDByChan(self, scale='log', plotChanNames=True, lowFreq=0, highFreq= chanNames = self.getChanNames() if plotChanNames: - for i,cn in enumerate(chanNames): + for i, cn in enumerate(chanNames): if i > 0: - ax.vlines(i*float(nFreq), mn, mx, linestyle='--') + ax.vlines(i*float(nFreq), mn, mx, linestyle="--") ax.text((i+0.25)*float(nFreq), mx-0.38*(mx-mn), cn, fontsize=14) tickStride = int(np.ceil(nFreq/3.0)) tickFreqs = freqs[::tickStride] tickPlaces = np.arange(nFreq)[::tickStride] tickLocs = np.concatenate( - [tickPlaces+nFreq*i for i,c in enumerate(chanNames)]) + [tickPlaces+nFreq*i for i, c in enumerate(chanNames)]) tickLabels = np.round(np.tile(tickFreqs, len(chanNames))).astype(np.int) ax.set_xticks(tickLocs) @@ -682,7 +698,7 @@ def plotAvgPSDByChan(self, scale='log', plotChanNames=True, lowFreq=0, highFreq= ax.autoscale(tight=True) - return {'freqs': freqs, 'powers': powers, 'lines': lines, 'ax': ax} + return {"freqs": freqs, "powers": powers, "lines": lines, "ax": ax} def plotImg(self, chans): if chans is None: @@ -692,21 +708,21 @@ def plotImg(self, chans): chans = self.getChanIndices(chans) #if ax is None: - ## fig = plt.figure(figsize=(14,8.5)) - # fig = plt.figure(figsize=(9,5.5)) - # ax = fig.add_subplot(1,1,1) - #ax.set_yticklabels([c for i,c in enumerate(self.chanNames) if i in chans]) + ## fig = plt.figure(figsize=(14, 8.5)) + # fig = plt.figure(figsize=(9, 5.5)) + # ax = fig.add_subplot(1, 1, 1) + #ax.set_yticklabels([c for i, c in enumerate(self.chanNames) if i in chans]) #ax.set_yticks(sep) - #ax.set_xlabel('Time (s)') + #ax.set_xlabel("Time (s)") #ax.set_ylim(-scale, sep[-1] + scale) class SegmentedEEGFromEEG(SegmentedEEG): def __init__(self, unSegmentedEEG, start=0.0, end=0.8, #startsFunc=lambda m: np.where(np.diff(np.abs(m)) > 0.0)[0], startsFunc=lambda m: np.where(~np.isclose(np.diff(m), 0.0))[0], - *args, **kwargs): + **kwargs): - unSegmentedData = unSegmentedEEG.getData() + unSegmentedData = unSegmentedEEG.getData() sampRate = unSegmentedEEG.getSampRate() chanNames = unSegmentedEEG.getChanNames() markers = unSegmentedEEG.getMarkers() @@ -725,12 +741,12 @@ def __init__(self, unSegmentedEEG, start=0.0, end=0.8, # if first segment is too short, ditch it # this feels hacky ? XXX - idfah while segStarts[0] + startSamp < 0: - ##print('ditching first segment') + ##print("ditching first segment") segStarts = segStarts[1:] # if last segment is too short, ditch it while segStarts[-1] + endSamp >= unSegmentedData.shape[0]: - ##print('ditching last segment') + ##print("ditching last segment") segStarts = segStarts[:-1] indices = np.asarray([range(s+startSamp, s+endSamp) for s in segStarts], @@ -739,7 +755,7 @@ def __init__(self, unSegmentedEEG, start=0.0, end=0.8, data = unSegmentedData[indices] SegmentedEEG.__init__(self, data=data, sampRate=sampRate, chanNames=chanNames, - markers=markers[segStarts+1], start=start, *args, **kwargs) + markers=markers[segStarts+1], start=start, **kwargs) class SegmentEEGFromSingleEEG(SegmentedEEG): def __init__(self, singleEEG, *args, **kwargs): @@ -749,14 +765,14 @@ def __init__(self, singleEEG, *args, **kwargs): sampRate = singleEEG.getSampRate() chanNames = singleEEG.getChanNames() - + SegmentedEEG.__init__(self, data=data, sampRate=sampRate, chanNames=chanNames, markers=markers, *args, **kwargs) class SegmentedEEGFromMatFiles(SegmentedEEG): - def __init__(self, fileNames, dataKey='data', sampRate=('key','freq'), - chanNames=('key','channels'), markers=('arg',None), start=('arg',0.0), - transpose=False, deviceName=('arg',None), *args, **kwargs): + def __init__(self, fileNames, dataKey="data", sampRate=("key", "freq"), + chanNames=("key", "channels"), markers=("arg", None), start=("arg", 0.0), + transpose=False, deviceName=("arg", None), **kwargs): firstMat = spio.loadmat(fileNames[0]) firstSeg = util.colmat(firstMat[dataKey]) @@ -768,12 +784,12 @@ def keyOrArg(spec): koa = spec[0] val = spec[1] - if koa == 'key': + if koa == "key": return firstMat[val] - elif koa == 'arg': + elif koa == "arg": return val else: - raise RuntimeError('Invalid spec %s.' % spec) + raise RuntimeError("Invalid spec %s." % spec) sampRate = int(keyOrArg(sampRate)) chanNames = [str(chanName[0]) for chanName in keyOrArg(chanNames)[0][0]] @@ -791,7 +807,7 @@ def keyOrArg(spec): seg = seg.T if seg.shape != firstShape: - raise RuntimeError('Shape of first segment %s %s does not not match shape of segment %s %s.' % + raise RuntimeError("Shape of first segment %s %s does not not match shape of segment %s %s." % (str(fileNames[0]), str(firstShape), str(fileName), str(seg.shape))) data.append(seg) @@ -799,4 +815,4 @@ def keyOrArg(spec): data = np.asarray(data) SegmentedEEG.__init__(self, data=data, sampRate=sampRate, chanNames=chanNames, - markers=markers, start=start, *args, **kwargs) + markers=markers, start=start, **kwargs) diff --git a/cebl/ml/__init__.py b/cebl/ml/__init__.py index c72ddb6..aa54721 100644 --- a/cebl/ml/__init__.py +++ b/cebl/ml/__init__.py @@ -1,5 +1,12 @@ """Machine learning. """ +from cebl.util.clsm import * +from cebl.util.errm import * + +from . import label +from . import optim +from . import paraminit +from . import part from .arc import * from .autoreg import * @@ -12,11 +19,3 @@ from .som import * from .strans import * from .stand import * - -from . import label -from . import optim -from . import paraminit -from . import part - -from cebl.util.clsm import * -from cebl.util.errm import * diff --git a/cebl/ml/arc.py b/cebl/ml/arc.py index 05caa4b..6bca30d 100644 --- a/cebl/ml/arc.py +++ b/cebl/ml/arc.py @@ -5,12 +5,9 @@ from .classifier import Classifier from .autoreg import AutoRegression, RecurrentAutoRegression -from .logreg import LogisticRegression from . import stand from .nnet import esn - -from .logreg import LogisticRegression from .knn import KNN @@ -101,7 +98,7 @@ def train(self, classData, **autoRegKwargs): [self.autoRegClass(ss, **autoRegKwargs) for ss in classData] #self.baselineErrors = np.empty(len(self.models)) - #for i,mdl in enumerate(self.models): + #for i, mdl in enumerate(self.models): # preds, resids = mdl.eval(classData[i], returnResid=True) # self.baselineErrors[i] = util.rmse(resids) @@ -175,7 +172,7 @@ def demoARC(): print() fig = plt.figure(figsize=(20,6)) - axSigs = fig.add_subplot(1,3, 1) + axSigs = fig.add_subplot(1, 3, 1) axSigs.plot(x, trainData[0][0].T, color='blue', linewidth=2, label=r'$\mathbf{sin}(x)$') axSigs.plot(x, trainData[0].T, color='blue', alpha=0.1, linewidth=2) axSigs.plot(x, 3.0+trainData[1][0].T, color='red', linewidth=2, label=r'$\mathbf{sin}(2x)$') @@ -195,13 +192,13 @@ def demoARC(): #trainErrors = [standardizer.apply(cls) for cls in trainErrors] #testErrors = [standardizer.apply(cls) for cls in testErrors] - axTrainErrs = fig.add_subplot(1,3, 2) - #axTrainErrs = fig.add_subplot(1,2, 1) + axTrainErrs = fig.add_subplot(1, 3, 2) + #axTrainErrs = fig.add_subplot(1, 2, 1) axTrainErrs.scatter(trainErrors[0][:,0], trainErrors[0][:,1], color='blue') axTrainErrs.scatter(trainErrors[1][:,0], trainErrors[1][:,1], color='red') axTrainErrs.set_title('Training Relative Modeling Errors') - axTrainErrs.set_xlabel('$\mathbf{sin}(x)$ model error') - axTrainErrs.set_ylabel('$\mathbf{sin}(2x)$ model error') + axTrainErrs.set_xlabel(r'$\mathbf{sin}(x)$ model error') + axTrainErrs.set_ylabel(r'$\mathbf{sin}(2x)$ model error') allTrainErrs = np.vstack(trainErrors) mn = allTrainErrs.min() @@ -211,13 +208,13 @@ def demoARC(): axTrainErrs.grid() axTrainErrs.autoscale(tight=True) - axTestErrs = fig.add_subplot(1,3, 3) - #axTestErrs = fig.add_subplot(1,2, 2) + axTestErrs = fig.add_subplot(1, 3, 3) + #axTestErrs = fig.add_subplot(1, 2, 2) axTestErrs.scatter(testErrors[0][:,0], testErrors[0][:,1], color='blue') axTestErrs.scatter(testErrors[1][:,0], testErrors[1][:,1], color='red') axTestErrs.set_title('Testing Relative Modeling Errors') - axTestErrs.set_xlabel('$\mathbf{sin}(x)$ model error') - axTestErrs.set_ylabel('$\mathbf{sin}(2x)$ model error') + axTestErrs.set_xlabel(r'$\mathbf{sin}(x)$ model error') + axTestErrs.set_ylabel(r'$\mathbf{sin}(2x)$ model error') allTestErrs = np.vstack(testErrors) mn = allTestErrs.min() @@ -290,8 +287,8 @@ def demoRARC(): print('AUC: ', model.auc(testData)) print() - fig = plt.figure(figsize=(20,6)) - axSigs = fig.add_subplot(1,3, 1) + fig = plt.figure(figsize=(20, 6)) + axSigs = fig.add_subplot(1, 3, 1) axSigs.plot(x, trainData[0][0].T, color='blue', linewidth=2, label=r'$\mathbf{sin}(x)$') axSigs.plot(x, trainData[0].T, color='blue', alpha=0.1, linewidth=2) axSigs.plot(x, 3.0+trainData[1][0].T, color='red', linewidth=2, label=r'$\mathbf{sin}(2x)$') @@ -311,13 +308,13 @@ def demoRARC(): #trainErrors = [standardizer.apply(cls) for cls in trainErrors] #testErrors = [standardizer.apply(cls) for cls in testErrors] - axTrainErrs = fig.add_subplot(1,3, 2) - #axTrainErrs = fig.add_subplot(1,2, 1) + axTrainErrs = fig.add_subplot(1, 3, 2) + #axTrainErrs = fig.add_subplot(1, 2, 1) axTrainErrs.scatter(trainErrors[0][:,0], trainErrors[0][:,1], color='blue') axTrainErrs.scatter(trainErrors[1][:,0], trainErrors[1][:,1], color='red') axTrainErrs.set_title('Training Relative Modeling Errors') - axTrainErrs.set_xlabel('$\mathbf{sin}(x)$ model error') - axTrainErrs.set_ylabel('$\mathbf{sin}(2x)$ model error') + axTrainErrs.set_xlabel(r'$\mathbf{sin}(x)$ model error') + axTrainErrs.set_ylabel(r'$\mathbf{sin}(2x)$ model error') allTrainErrs = np.vstack(trainErrors) mn = allTrainErrs.min() @@ -327,13 +324,13 @@ def demoRARC(): axTrainErrs.grid() axTrainErrs.autoscale(tight=True) - axTestErrs = fig.add_subplot(1,3, 3) - #axTestErrs = fig.add_subplot(1,2, 2) + axTestErrs = fig.add_subplot(1, 3, 3) + #axTestErrs = fig.add_subplot(1, 2, 2) axTestErrs.scatter(testErrors[0][:,0], testErrors[0][:,1], color='blue') axTestErrs.scatter(testErrors[1][:,0], testErrors[1][:,1], color='red') axTestErrs.set_title('Testing Relative Modeling Errors') - axTestErrs.set_xlabel('$\mathbf{sin}(x)$ model error') - axTestErrs.set_ylabel('$\mathbf{sin}(2x)$ model error') + axTestErrs.set_xlabel(r'$\mathbf{sin}(x)$ model error') + axTestErrs.set_ylabel(r'$\mathbf{sin}(2x)$ model error') allTestErrs = np.vstack(testErrors) mn = allTestErrs.min() diff --git a/cebl/ml/da.py b/cebl/ml/da.py index f4cc08b..2c6caa1 100644 --- a/cebl/ml/da.py +++ b/cebl/ml/da.py @@ -23,7 +23,7 @@ def __init__(self, classData, average=0.0, shrinkage=0.0): Args: classData: Training data. This is a numpy array or list of numpy - arrays with shape (nCls,nObs[,nIn]). If the dimensions + arrays with shape (nCls, nObs[,nIn]). If the dimensions index is missing the data is assumed to be one-dimensional. @@ -57,7 +57,7 @@ def train(self, classData): Args: classData: Training data. This is a numpy array or list of numpy - arrays with shape (nCls,nObs[,nIn]). If the dimensions + arrays with shape (nCls, nObs[,nIn]). If the dimensions index is missing the data is assumed to be one-dimensional. """ @@ -71,7 +71,7 @@ def train(self, classData): logPriors = np.log(np.array( [cls.shape[0]/float(totalObs) for cls in classData])).astype(self.dtype, copy=False) - # class means (nCls,ndim) + # class means (nCls, ndim) self.means = np.array( [np.mean(cls, axis=0) for cls in classData]).astype(self.dtype, copy=False) self.means = util.colmat(self.means) @@ -82,7 +82,7 @@ def train(self, classData): # average covariance matrix avgCov = np.zeros((self.nIn, self.nIn), dtype=self.dtype) - for i,cls in enumerate(classData): + for i, cls in enumerate(classData): #dataZeroMean = cls - self.means[i] #cv = np.cov(dataZeroMean, rowvar=False).astype(self.dtype, copy=False) cv = np.cov(cls, rowvar=False).astype(self.dtype, copy=False) @@ -104,7 +104,7 @@ def train(self, classData): self.invCovs = [] self.intercepts = np.zeros(self.nCls, dtype=self.dtype) - for i,cv in enumerate(covs): + for i, cv in enumerate(covs): ##cvi = sp.linalg.pinvh(cv) #try: # cvi = np.linalg.inv(cv) @@ -135,7 +135,7 @@ def discrim(self, x): x: Input data. A numpy array with shape (nObs[,nIn]). Returns: - Numpy array with shape (nObs,nCls) containing the discriminant values. + Numpy array with shape (nObs, nCls) containing the discriminant values. Notes: These values are the log of the evaluated discriminant functions @@ -147,8 +147,8 @@ def discrim(self, x): # number of observations nObs = x.shape[0] - # (nObs,nCls) - dv = np.zeros((nObs,self.nCls), dtype=self.dtype) + # (nObs, nCls) + dv = np.zeros((nObs, self.nCls), dtype=self.dtype) # could probably vectorize this? XXX - idfah for i in range(self.nCls): @@ -157,7 +157,7 @@ def discrim(self, x): dv *= -0.5 dv += self.intercepts - # (nObs,nCls) + # (nObs, nCls) return dv def logDens(self, x): @@ -174,7 +174,7 @@ def dens(self, x): x: Input data. A numpy array with shape (nObs[,nIn]). Returns: - Numpy array with shape (nObs,nCls) containing the density values. + Numpy array with shape (nObs, nCls) containing the density values. Notes: This is slower and less precise than discrim. Only use probs if you @@ -189,7 +189,7 @@ def probs(self, x): x: Input data. A numpy array with shape (nObs[,nIn]). Returns: - Numpy array with shape (nObs,nCls) containing the probability values. + Numpy array with shape (nObs, nCls) containing the probability values. Notes: This is less precise than discrim. Only use probs if you @@ -210,14 +210,14 @@ def demoQDA2d(): """QDA Example. """ # covariance matrices - covRed = [[1,-0.9], - [-0.9,1]] + covRed = [[1, -0.9], + [-0.9, 1]] - covGreen = [[0.8,-0.5], - [-0.5,0.8]] + covGreen = [[0.8, -0.5], + [-0.5, 0.8]] - covBlue = [[0.3,0.0], - [0.0,0.3]] + covBlue = [[0.3, 0.0], + [0.0, 0.3]] # red data red = np.random.multivariate_normal( @@ -225,18 +225,18 @@ def demoQDA2d(): # green data green = np.random.multivariate_normal( - (0,0), covGreen, 300) + (0, 0), covGreen, 300) # blue data blue = np.random.multivariate_normal( - (1.5,1.5), covBlue, 400) + (1.5, 1.5), covBlue, 400) - data = [red,green,blue] + data = [red, green, blue] #data = [cls.astype(np.float32) for cls in data] # min and max training values - mn = np.min(np.vstack((red,green,blue)), axis=0) - mx = np.max(np.vstack((red,green,blue)), axis=0) + mn = np.min(np.vstack((red, green, blue)), axis=0) + mx = np.max(np.vstack((red, green, blue)), axis=0) # train model model = QuadraticDiscriminantAnalysis(data) @@ -262,7 +262,7 @@ def demoQDA2d(): # first figure shows training data and class intersections fig = plt.figure() - ax = fig.add_subplot(2,2,1) + ax = fig.add_subplot(2, 2, 1) # training data ax.scatter(red[:,0], red[:,1], color="red") @@ -271,9 +271,9 @@ def demoQDA2d(): # generate grid over training data sw = 0.02 - sx = np.arange(mn[0],mx[0], sw) - sy = np.arange(mn[1],mx[1], sw) - x,y = np.meshgrid(sx,sy) + sx = np.arange(mn[0], mx[0], sw) + sy = np.arange(mn[1], mx[1], sw) + x, y = np.meshgrid(sx, sy) # get probabilities and labels for values in grid z = np.vstack((x.reshape((-1,)), y.reshape((-1,)))).T @@ -301,72 +301,71 @@ def demoQDA2d(): ax.contour(x, y, diffGB, colors='black', levels=(0,)) # second figure shows 3d plots of probability densities - ax = fig.add_subplot(2,2,2, projection='3d') + ax = fig.add_subplot(2, 2, 2, projection='3d') # straight class colors for suface plots - color = np.reshape([dRed,dGreen,dBlue], (3, x.shape[0],x.shape[1])) - color = color.swapaxes(1,2).T + color = np.reshape([dRed, dGreen, dBlue], (3, x.shape[0], x.shape[1])) + color = color.swapaxes(1, 2).T # flip colors to fade to white zro = np.zeros_like(x) colorFlip = np.ones((3, x.shape[0], x.shape[1])) - colorFlip -= (np.array((zro,dRed,dRed)) + - np.array((dGreen,zro,dGreen)) + - np.array((dBlue,dBlue,zro))) + colorFlip -= (np.array((zro, dRed, dRed)) + + np.array((dGreen, zro, dGreen)) + + np.array((dBlue, dBlue, zro))) colorFlip -= np.min(colorFlip) colorFlip /= np.max(colorFlip) - colorFlip = colorFlip.swapaxes(1,2).T + colorFlip = colorFlip.swapaxes(1, 2).T # probability density surface - #surf = ax.plot_surface(x,y, dMax, cmap=matplotlib.cm.jet, linewidth=0) - surf = ax.plot_surface(x,y, dMax, facecolors=colorFlip, + surf = ax.plot_surface(x, y, dMax, facecolors=colorFlip, linewidth=0.02, shade=True) surf.set_edgecolor('black') # add edgecolor back in, bug? # third figure shows 3d plots of probabilities - ax = fig.add_subplot(2,2,3, projection='3d') + ax = fig.add_subplot(2, 2, 3, projection='3d') # straight class colors for suface plots - color = np.reshape([pRed,pGreen,pBlue], (3, x.shape[0],x.shape[1])) - color = color.swapaxes(1,2).T + color = np.reshape([pRed, pGreen, pBlue], (3, x.shape[0], x.shape[1])) + color = color.swapaxes(1, 2).T # flip colors to fade to white zro = np.zeros_like(x) colorFlip = np.ones((3, x.shape[0], x.shape[1])) - colorFlip -= (np.array((zro,pRed,pRed)) + - np.array((pGreen,zro,pGreen)) + - np.array((pBlue,pBlue,zro))) + colorFlip -= (np.array((zro, pRed, pRed)) + + np.array((pGreen, zro, pGreen)) + + np.array((pBlue, pBlue, zro))) colorFlip -= np.min(colorFlip) colorFlip /= np.max(colorFlip) - colorFlip = colorFlip.swapaxes(1,2).T + colorFlip = colorFlip.swapaxes(1, 2).T # probability density surface - #surf = ax.plot_surface(x,y, pMax, cmap=matplotlib.cm.jet, linewidth=0) - surf = ax.plot_surface(x,y, pMax, facecolors=colorFlip, + #surf = ax.plot_surface(x, y, pMax, cmap=matplotlib.cm.jet, linewidth=0) + surf = ax.plot_surface(x, y, pMax, facecolors=colorFlip, linewidth=0.02, shade=True) surf.set_edgecolor('black') # add edgecolor back in, bug? """ # third figure shows contours and color image of probability densities - ax = fig.add_subplot(2,2,3) + ax = fig.add_subplot(2, 2, 3) - #ax.pcolor(x,y,pMax) + #ax.pcolor(x, y, pMax) ax.imshow(colorFlip, origin='lower', - extent=(mn[0],mx[0],mn[1],mx[1]), aspect='auto') + extent=(mn[0], mx[0], mn[1], mx[1]), aspect='auto') # contours nLevel = 6 cs = ax.contour(x, y, pMax, colors='black', - levels=np.linspace(np.min(pMax),np.max(pMax),nLevel)) + levels=np.linspace(np.min(pMax), np.max(pMax), nLevel)) cs.clabel(fontsize=6) """ # fourth figure - ax = fig.add_subplot(2,2,4, projection='3d') + ax = fig.add_subplot(2, 2, 4, projection='3d') labels = model.label(z) lMax = np.reshape(labels, x.shape) - surf = ax.plot_surface(x,y, lMax, facecolors=colorFlip, + surf = ax.plot_surface(x, y, lMax, facecolors=colorFlip, linewidth=0.02)#, antialiased=False) #surf.set_edgecolor(np.vstack(color)) surf.set_edgecolor('black') @@ -383,7 +382,7 @@ def __init__(self, classData, shrinkage=0): Args: classData: Training data. This is a numpy array or list of numpy - arrays with shape (nCls,nObs[,nIn]). If the dimensions + arrays with shape (nCls, nObs[,nIn]). If the dimensions index is missing the data is assumed to be one-dimensional. @@ -391,7 +390,7 @@ def __init__(self, classData, shrinkage=0): covariance matrix toward its average eigenvalue: covariance = (1-shrinkage)*covariance + shrinkage*averageEigenvalue*identity - Behavior is undefined if shrinkage is outside [0,1]. + Behavior is undefined if shrinkage is outside [0, 1]. This parameter has no effect if average is 0. Returns: @@ -411,7 +410,7 @@ def train(self, classData): Args: classData: Training data. This is a numpy array or list of numpy - arrays with shape (nCls,nObs[,nIn]). If the dimensions + arrays with shape (nCls, nObs[,nIn]). If the dimensions index is missing the data is assumed to be one-dimensional. """ @@ -425,7 +424,7 @@ def train(self, classData): logPriors = np.log(np.array( [cls.shape[0]/float(totalObs) for cls in classData])).astype(self.dtype, copy=False) - # class means (nCls,ndim) + # class means (nCls, ndim) means = np.array([np.mean(cls, axis=0) for cls in classData]).astype(self.dtype, copy=False) means = util.colmat(means) @@ -433,7 +432,7 @@ def train(self, classData): self.avgCov = np.zeros((self.nIn, self.nIn), dtype=self.dtype) # sum up class covariances - for i,cls in enumerate(classData): + for i, cls in enumerate(classData): self.avgCov += np.cov(cls, rowvar=False) #key = util.hashArray(cls) #cov = covCache[key] @@ -467,11 +466,11 @@ def train(self, classData): raise RuntimeError('Covariance matrix has zero determinant, consider using shrinkage.') # model coefficients - # (ndim,nCls) = (ndim,ndim) x (ndim,nCls) - self.weights = self.invCov.dot(means.T) + # (ndim, nCls) = (ndim, ndim) x (ndim, nCls) + self.weights = self.invCov @ means.T # model intercepts (nCls,) - #self.intercepts = np.array([-0.5 * means[cls,:].dot(self.weights[:,cls]) + logPriors[cls] + #self.intercepts = np.array([-0.5 * (means[cls,:] @ (self.weights[:,cls])) + logPriors[cls] # for cls in range(self.nCls)]) self.intercepts = -0.5 * np.sum(self.weights * means.T, axis=0) + logPriors @@ -495,8 +494,8 @@ def discrim(self, x): x = util.colmat(x) # discriminant values - # (nObs,nCls) = (nObs,ndim) x (ndim,nCls) + (nObs,nCls) - dv = x.dot(self.weights) + self.intercepts.reshape((1,-1)) + # (nObs, nCls) = (nObs, ndim) x (ndim, nCls) + (nObs, nCls) + dv = (x @ self.weights) + self.intercepts.reshape((1,-1)) return dv @@ -505,7 +504,7 @@ def logDens(self, x): dv = self.discrim(x) # find class probability densities by adding back in canceled terms - xSx = np.sum(x.dot(self.invCov) * x, axis=1).reshape((-1,1)) + xSx = np.sum(x @ self.invCov) * x, axis=1).reshape((-1, 1)) return -0.5 * (self.nCls*log2pi + self.logDet + xSx) + dv @@ -516,7 +515,7 @@ def dens(self, x): x: Input data. A numpy array with shape (nObs[,nIn]). Returns: - Numpy array with shape (nObs,nCls) containing the density values. + Numpy array with shape (nObs, nCls) containing the density values. Notes: This is slower and less precise than discrim. Only use probs if you @@ -531,7 +530,7 @@ def probs(self, x): x: Input data. A numpy array with shape (nObs[,nIn]). Returns: - Numpy array with shape (nObs,nCls) containing the probability values. + Numpy array with shape (nObs, nCls) containing the probability values. Notes: This is less precise than discrim. Only use probs if you @@ -559,8 +558,8 @@ def demoLDA2d(): """LDA 2d example. """ # covariance matrix for each training class - cov = [[1,-0.8], - [-0.8,1]] + cov = [[1, -0.8], + [-0.8, 1]] # red data red = np.random.multivariate_normal( @@ -568,17 +567,17 @@ def demoLDA2d(): # green data green = np.random.multivariate_normal( - (0,0), cov, 300) + (0, 0), cov, 300) # blue data blue = np.random.multivariate_normal( - (1,1), cov, 400) + (1, 1), cov, 400) - data = [red,green,blue] + data = [red, green, blue] # min and max training values - mn = np.min(np.vstack((red,green,blue)), axis=0) - mx = np.max(np.vstack((red,green,blue)), axis=0) + mn = np.min(np.vstack((red, green, blue)), axis=0) + mx = np.max(np.vstack((red, green, blue)), axis=0) # train model model = LinearDiscriminantAnalysis(data, shrinkage=0) @@ -601,7 +600,7 @@ def demoLDA2d(): # first figure shows training data and class intersections fig = plt.figure() - ax = fig.add_subplot(2,2,1) + ax = fig.add_subplot(2, 2, 1) # training data ax.scatter(red[:,0], red[:,1], color="red") @@ -610,9 +609,9 @@ def demoLDA2d(): # generate grid over training data sw = 0.02 - sx = np.arange(mn[0],mx[0], sw) - sy = np.arange(mn[1],mx[1], sw) - x,y = np.meshgrid(sx,sy) + sx = np.arange(mn[0], mx[0], sw) + sy = np.arange(mn[1], mx[1], sw) + x, y = np.meshgrid(sx, sy) # get probability probabilities and labels for values in grid z = np.vstack((x.reshape((-1,)), y.reshape((-1,)))).T @@ -640,72 +639,71 @@ def demoLDA2d(): dMax = np.reshape(np.max(densities, axis=1), x.shape) # second figure shows 3d plots of probability densities - ax = fig.add_subplot(2,2,2, projection='3d') + ax = fig.add_subplot(2, 2, 2, projection='3d') # straight class colors for suface plots - color = np.reshape([dRed,dGreen,dBlue], (3, x.shape[0],x.shape[1])) - color = color.swapaxes(1,2).T + color = np.reshape([dRed, dGreen, dBlue], (3, x.shape[0], x.shape[1])) + color = color.swapaxes(1, 2).T # flip colors to fade to white zro = np.zeros_like(x) colorFlip = np.ones((3, x.shape[0], x.shape[1])) - colorFlip -= (np.array((zro,dRed,dRed)) + - np.array((dGreen,zro,dGreen)) + - np.array((dBlue,dBlue,zro))) + colorFlip -= (np.array((zro, dRed, dRed)) + + np.array((dGreen, zro, dGreen)) + + np.array((dBlue, dBlue, zro))) colorFlip -= np.min(colorFlip) colorFlip /= np.max(colorFlip) - colorFlip = colorFlip.swapaxes(1,2).T + colorFlip = colorFlip.swapaxes(1, 2).T # probability density surface - #surf = ax.plot_surface(x,y, dMax, cmap=matplotlib.cm.jet, linewidth=0) - surf = ax.plot_surface(x,y, dMax, facecolors=colorFlip, + #surf = ax.plot_surface(x, y, dMax, cmap=matplotlib.cm.jet, linewidth=0) + surf = ax.plot_surface(x, y, dMax, facecolors=colorFlip, linewidth=0.02, shade=True) surf.set_edgecolor('black') # add edgecolor back in, bug? # third figure shows 3d plots of probabilities - ax = fig.add_subplot(2,2,3, projection='3d') + ax = fig.add_subplot(2, 2, 3, projection='3d') # straight class colors for suface plots - color = np.reshape([pRed,pGreen,pBlue], (3, x.shape[0],x.shape[1])) - color = color.swapaxes(1,2).T + color = np.reshape([pRed, pGreen, pBlue], (3, x.shape[0], x.shape[1])) + color = color.swapaxes(1, 2).T # flip colors to fade to white zro = np.zeros_like(x) colorFlip = np.ones((3, x.shape[0], x.shape[1])) - colorFlip -= (np.array((zro,pRed,pRed)) + - np.array((pGreen,zro,pGreen)) + - np.array((pBlue,pBlue,zro))) + colorFlip -= (np.array((zro, pRed, pRed)) + + np.array((pGreen, zro, pGreen)) + + np.array((pBlue, pBlue, zro))) colorFlip -= np.min(colorFlip) colorFlip /= np.max(colorFlip) - colorFlip = colorFlip.swapaxes(1,2).T + colorFlip = colorFlip.swapaxes(1, 2).T # probability density surface - #surf = ax.plot_surface(x,y, pMax, cmap=matplotlib.cm.jet, linewidth=0) - surf = ax.plot_surface(x,y, pMax, facecolors=colorFlip, + surf = ax.plot_surface(x, y, pMax, facecolors=colorFlip, linewidth=0.02, shade=True) surf.set_edgecolor('black') # add edgecolor back in, bug? """ # third figure shows contours and color image of probability densities - ax = fig.add_subplot(2,2,3) + ax = fig.add_subplot(2, 2, 3) - #ax.pcolor(x,y,pMax) + #ax.pcolor(x, y, pMax) ax.imshow(colorFlip, origin='lower', - extent=(mn[0],mx[0],mn[1],mx[1]), aspect='auto') + extent=(mn[0], mx[0], mn[1], mx[1]), aspect='auto') # contours nLevel=6 cs = ax.contour(x, y, pMax, colors='black', - levels=np.linspace(np.min(pMax),np.max(pMax),nLevel)) + levels=np.linspace(np.min(pMax), np.max(pMax), nLevel)) cs.clabel(fontsize=6) """ # fourth figure - ax = fig.add_subplot(2,2,4, projection='3d') + ax = fig.add_subplot(2, 2, 4, projection='3d') labels = model.label(z) lMax = np.reshape(labels, x.shape) - surf = ax.plot_surface(x,y, lMax, facecolors=colorFlip, + surf = ax.plot_surface(x, y, lMax, facecolors=colorFlip, linewidth=0.02)#, antialiased=False) #surf.set_edgecolor(np.vstack(color)) surf.set_edgecolor('black') diff --git a/cebl/ml/logreg.py b/cebl/ml/logreg.py index e14ce56..014a6e3 100644 --- a/cebl/ml/logreg.py +++ b/cebl/ml/logreg.py @@ -79,6 +79,11 @@ def parameters(self): """ return self.weights.ravel() + def discrim(self, x): + x = util.colmat(x) + v = x @ (self.weights[:-1]) + self.weights[-1] + return v + def probs(self, x): """Compute class probabilities. @@ -88,10 +93,7 @@ def probs(self, x): Returns: Numpy array with shape (nObs, nIn) containing the probability values. """ - x = util.colmat(x) - - v = x.dot(self.weights[:-1]) + self.weights[-1] - return util.softmax(v) + return util.softmax(self.discrim(x)) def error(self, x, g): """Compute the negative log likelyhood for given inputs and targets. @@ -120,7 +122,7 @@ def gradient(self, x, g, returnError=True): delta = (probs - g) / probs.size - grad = util.bias(x).T.dot(delta) + grad = util.bias(x).T @ delta gf = grad.ravel() @@ -328,7 +330,7 @@ def gradient(self, x, g, returnError=True): penMask = np.ones_like(self.weights) penMask[-1,:] = 0.0 - grad = (util.bias(x).T.dot(delta) + + grad = (util.bias(x).T @ delta + self.elastic * 2.0 * self.penalty * penMask * self.weights / self.weights.size + # L2-norm penalty (1.0-self.elastic) * self.penalty * penMask * np.sign(self.weights) / self.weights.size) # L1-norm penalty diff --git a/cebl/ml/nnet/esn.py b/cebl/ml/nnet/esn.py index 5716ac5..5b8a3bf 100644 --- a/cebl/ml/nnet/esn.py +++ b/cebl/ml/nnet/esn.py @@ -151,7 +151,7 @@ def __init__(self, x, nRes=1024, rwScale=0.95, rwConn=0.01, iw = self.initIW(iwScale, iwConn) rw = self.initRW(rwScale, rwConn) - self.hw = np.vstack((iw,rw)) + self.hw = np.vstack((iw, rw)) if self.sparse: # is csc or csr faster? XXX - idfah @@ -180,7 +180,7 @@ def initIW(self, iwScale, iwConn): self.iwConn = iwConn iw = np.random.uniform(-self.iwMult, self.iwMult, - size=(self.nIn+1,self.nRes)) + size=(self.nIn+1, self.nRes)) connMask = np.random.random(iw.shape) > self.iwConn connMask[0,0] = False iw[connMask] = 0.0 @@ -262,7 +262,7 @@ def eval(self, x, context=None, returncontext=False): if context is None: context = np.zeros((nSeg, self.nRes), dtype=self.dtype) - xt = np.empty((nSeg,nIn+self.nRes)) + xt = np.empty((nSeg, nIn+self.nRes)) hwT = self.hw[:-1].T @@ -379,11 +379,11 @@ def setRWScale(self, x, scale): def plotActDensity(self, x, ax=None, **kwargs): act = self.eval(x, **kwargs) - t = np.arange(0.0,4.0,0.01) + t = np.arange(0.0, 4.0, 0.01) if ax is None: fig = plt.figure() - ax = fig.add_subplot(1,1,1) + ax = fig.add_subplot(1, 1, 1) ax.set_xlabel('Density') ax.set_ylabel('Reservoir Activation') @@ -391,7 +391,7 @@ def plotActDensity(self, x, ax=None, **kwargs): n, bins, patches = ax.hist(act.ravel(), normed=True, orientation='horizontal', label='Activations') - lines = ax.plot(np.linspace(0.0,np.max(n),t.size), np.tanh(t-2.0), + lines = ax.plot(np.linspace(0.0, np.max(n), t.size), np.tanh(t-2.0), linewidth=2, label=r'$\phi$') # label=r'$\phi='+self.phi.__name__) leg = ax.legend(loc='lower right') @@ -405,7 +405,7 @@ def plotWeightImg(self, ax=None): if ax is None: fig = plt.figure() - ax = fig.add_subplot(1,1,1) + ax = fig.add_subplot(1, 1, 1) img = ax.imshow(hw[self.nIn:,:], interpolation='none') @@ -525,11 +525,11 @@ def demoESP(): fig = plt.figure() - impulseAx = fig.add_subplot(2,1,1) + impulseAx = fig.add_subplot(2, 1, 1) impulseAx.plot(sig, color='grey', linewidth=3) impulseAx.plot(sigi, color='red') - actAx = fig.add_subplot(2,1,2) + actAx = fig.add_subplot(2, 1, 2) actAx.plot(act, color='black', linewidth=2) actAx.plot(acti, color='red') @@ -547,7 +547,7 @@ def xor(a, b): horizon = 5 transient = horizon+1 - x = np.random.randint(0,2, size=n).astype(np.float32) + x = np.random.randint(0, 2, size=n).astype(np.float32) g = np.array([int(xor(x[i-horizon], x[i-horizon-1])) if i > horizon else 0 for i in range(len(x))], dtype=np.float32) @@ -555,7 +555,7 @@ def xor(a, b): transient=transient, sideTrack=False, sparse=True, verbose=True) # redo for test data - x = np.random.randint(0,2, size=n) + x = np.random.randint(0, 2, size=n) g = np.array([int(xor(x[i-horizon], x[i-horizon-1])) if i > horizon else 0 for i in range(len(x))], dtype=np.float) @@ -565,21 +565,21 @@ def xor(a, b): net.reservoir.plotWeightImg() fig = plt.figure() - axTarg = fig.add_subplot(2,1,1) + axTarg = fig.add_subplot(2, 1, 1) axTarg.bar(range(len(g)), g) axTarg.set_xlim((0, len(g))) - axTarg.set_ylim((0.0,1.0)) + axTarg.set_ylim((0.0, 1.0)) - axOut = fig.add_subplot(2,1,2) + axOut = fig.add_subplot(2, 1, 2) axOut.bar(range(len(g)), out) axOut.set_xlim((0, len(g))) - axOut.set_ylim((0.0,1.0)) + axOut.set_ylim((0.0, 1.0)) def demoESNSine(): - time = np.linspace(0.0,10.0*np.pi,5000) + time = np.linspace(0.0, 10.0*np.pi, 5000) s1 = np.sin(time) s2 = np.cos(time) - s = np.vstack((s1,s2)).T + s = np.vstack((s1, s2)).T x = s[None,:-1] g = s[None,1:] @@ -591,7 +591,7 @@ def demoESNSine(): resid = g - pred fig = plt.figure() - ax = fig.add_subplot(1,1,1) + ax = fig.add_subplot(1, 1, 1) ax.plot(time, s, color='blue') ax.plot(time[1:], pred[0], color='red') diff --git a/cebl/ml/nnet/softmax.py b/cebl/ml/nnet/softmax.py index 2cf6a4e..25b1aaf 100644 --- a/cebl/ml/nnet/softmax.py +++ b/cebl/ml/nnet/softmax.py @@ -23,7 +23,7 @@ def __init__(self, classData, nHidden=10, transFunc=transfer.lecun, Args: classData: Training data. This is a numpy array or list of numpy - arrays with shape (nCls,nObs[,nIn]). If the + arrays with shape (nCls, nObs[,nIn]). If the dimensions index is missing the data is assumed to be one-dimensional. @@ -64,7 +64,8 @@ def __init__(self, classData, nHidden=10, transFunc=transfer.lecun, Refs: @incollection{lecun2012efficient, title={Efficient backprop}, - author={LeCun, Yann A and Bottou, L{\'e}on and Orr, Genevieve B and M{\"u}ller, Klaus-Robert}, + author={LeCun, Yann A and Bottou, L{\'e}on and Orr, + Genevieve B and M{\"u}ller, Klaus-Robert}, booktitle={Neural networks: Tricks of the trade}, pages={9--48}, year={2012}, @@ -90,9 +91,9 @@ def __init__(self, classData, nHidden=10, transFunc=transfer.lecun, assert len(self.transFunc) == self.nHLayers views = util.packedViews(self.layerDims, dtype=self.dtype) - self.pw = views[0] + self.pw = views[0] self.hws = views[1:-1] - self.vw = views[-1] + self.vw = views[-1] if not util.isiterable(weightInitFunc): weightInitFunc = (weightInitFunc,) * (self.nHLayers+1) @@ -152,7 +153,7 @@ def evalHiddens(self, x): x: Input data. A numpy array with shape (nObs[,nIn]). Returns: - A numpy array with shape (nObs,nHidden) containing the + A numpy array with shape (nObs, nHidden) containing the hidden layer activations for each input in x. """ x = np.asarray(x) @@ -172,7 +173,7 @@ def probs(self, x): x: Input data. A numpy array with shape (nObs[,nIn]). Returns: - Numpy array with shape (nObs,nCls) containing the + Numpy array with shape (nObs, nCls) containing the probability values. """ x = np.asarray(x) @@ -258,9 +259,9 @@ def gradient(self, x, g, returnError=True): # packed views of the hidden and visible gradient matrices views = util.packedViews(self.layerDims, dtype=self.dtype) - pg = views[0] + pg = views[0] hgs = views[1:-1] - vg = views[-1] + vg = views[-1] # forward pass z1 = util.bias(x) @@ -309,19 +310,19 @@ def demoFNS2d(): t1 = np.linspace(0.0, 2.0*np.pi, n1) x1 = 5.0*np.cos(t1) + np.random.normal(scale=noiseScale, size=n1) y1 = 5.0*np.sin(t1) + np.random.normal(scale=noiseScale, size=n1) - red = np.vstack((x1,y1)).T + red = np.vstack((x1, y1)).T x2 = np.linspace(-1.0, 3.0, n2) y2 = (x2-0.8)**2 - 2.5 y2 += np.random.normal(scale=noiseScale, size=n2) - green = np.vstack((x2,y2)).T + green = np.vstack((x2, y2)).T x3 = np.linspace(-3.0, 1.0, n3) y3 = -(x3+0.8)**2 + 2.5 y3 += np.random.normal(scale=noiseScale, size=n3) - blue = np.vstack((x3,y3)).T + blue = np.vstack((x3, y3)).T - classData = [red,green,blue] + classData = [red, green, blue] classData = [cls.astype(np.float32) for cls in classData] @@ -350,9 +351,9 @@ def demoFNS2d(): ## maxIter=10, precision=1.0e-10, verbose=True) # find class labels - redLabel = model.label(red) # one at a time + redLabel = model.label(red) # one at a time greenLabel = model.label(green) - blueLabel = model.label(blue) + blueLabel = model.label(blue) print(model.probs(classData[0]).dtype) print(model.probs(classData[1]).dtype) @@ -371,29 +372,29 @@ def demoFNS2d(): # first figure shows training data and class intersections fig = plt.figure() - ax = fig.add_subplot(2,2,1) + ax = fig.add_subplot(2, 2, 1) ax.set_title('Class Data') # training data - ax.scatter(red[:,0], red[:,1], color="red") + ax.scatter(red[:,0], red[:,1], color="red") ax.scatter(green[:,0], green[:,1], color="green") - ax.scatter(blue[:,0], blue[:,1], color="blue") + ax.scatter(blue[:,0], blue[:,1], color="blue") # generate grid over training data sw = 0.025 - sx = np.arange(mn[0],mx[0], sw) - sy = np.arange(mn[1],mx[1], sw) - x,y = np.meshgrid(sx,sy) + sx = np.arange(mn[0], mx[0], sw) + sy = np.arange(mn[1], mx[1], sw) + x, y = np.meshgrid(sx, sy) # get probability densities and labels for values in grid z = np.vstack((x.reshape((-1,)), y.reshape((-1,)))).T probs = model.probs(z) # red, green, blue and max probability densities - pRed = np.reshape(probs[:,0,None], x.shape) + pRed = np.reshape(probs[:,0,None], x.shape) pGreen = np.reshape(probs[:,1,None], x.shape) - pBlue = np.reshape(probs[:,2,None], x.shape) - pMax = np.reshape(np.max(probs, axis=1), x.shape) + pBlue = np.reshape(probs[:,2,None], x.shape) + pMax = np.reshape(np.max(probs, axis=1), x.shape) # class intersections diffRG = pRed - pGreen @@ -404,51 +405,51 @@ def demoFNS2d(): ##ax.contour(x, y, diffGB, colors='black', levels=(0,)) # second figure shows 3d plots of probability densities - ax = fig.add_subplot(2,2,2, projection='3d') + ax = fig.add_subplot(2, 2, 2, projection='3d') ax.set_title('P(C = k)') # straight class colors for suface plots - color = np.reshape([pRed,pGreen,pBlue], (3, x.shape[0],x.shape[1])) - color = color.swapaxes(1,2).T + color = np.reshape([pRed, pGreen, pBlue], (3, x.shape[0], x.shape[1])) + color = color.swapaxes(1, 2).T # flip colors to fade to white zro = np.zeros_like(x) colorFlip = np.ones((3, x.shape[0], x.shape[1])) - colorFlip -= (np.array((zro,pRed,pRed)) + - np.array((pGreen,zro,pGreen)) + - np.array((pBlue,pBlue,zro))) + colorFlip -= (np.array((zro, pRed, pRed)) + + np.array((pGreen, zro, pGreen)) + + np.array((pBlue, pBlue, zro))) colorFlip -= np.min(colorFlip) colorFlip /= np.max(colorFlip) - colorFlip = colorFlip.swapaxes(1,2).T + colorFlip = colorFlip.swapaxes(1, 2).T # probability density surface - #surf = ax.plot_surface(x,y, pMax, cmap=matplotlib.cm.jet, linewidth=0) - surf = ax.plot_surface(x,y, pMax, facecolors=colorFlip, + #surf = ax.plot_surface(x, y, pMax, cmap=matplotlib.cm.jet, linewidth=0) + surf = ax.plot_surface(x, y, pMax, facecolors=colorFlip, linewidth=0.02, shade=True) surf.set_edgecolor('black') # add edgecolor back in, bug? # third figure shows contours and color image of probability densities - ax = fig.add_subplot(2,2,3) + ax = fig.add_subplot(2, 2, 3) ax.set_title('max_K P(C = k)') - #ax.pcolor(x,y,pMax) + #ax.pcolor(x, y, pMax) ax.imshow(colorFlip, origin='lower', - extent=(mn[0],mx[0],mn[1],mx[1]), aspect='auto') + extent=(mn[0], mx[0], mn[1], mx[1]), aspect='auto') - # contours + # contours nLevel = 4 cs = ax.contour(x, y, pMax, colors='black', - levels=np.linspace(np.min(pMax),np.max(pMax),nLevel)) + levels=np.linspace(np.min(pMax), np.max(pMax), nLevel)) cs.clabel(fontsize=6) # fourth figure - ax = fig.add_subplot(2,2,4, projection='3d') + ax = fig.add_subplot(2, 2, 4, projection='3d') ax.set_title('argmax_K P(C = k)') labels = model.label(z) - lMax = np.reshape(labels, x.shape) + lMax = np.reshape(labels, x.shape) - surf = ax.plot_surface(x,y, lMax, facecolors=colorFlip, + surf = ax.plot_surface(x, y, lMax, facecolors=colorFlip, linewidth=0.02)#, antialiased=False) surf.set_edgecolor('black') diff --git a/cebl/ml/strans/ica.py b/cebl/ml/strans/ica.py index b1d638e..b2914c4 100644 --- a/cebl/ml/strans/ica.py +++ b/cebl/ml/strans/ica.py @@ -17,7 +17,8 @@ class IndependentComponentsAnalysis(STrans): References: @article{bell1995information, - title={An information-maximization approach to blind separation and blind deconvolution}, + title={An information-maximization approach to blind + separation and blind deconvolution}, author={Bell, Anthony J and Sejnowski, Terrence J}, journal={Neural computation}, volume={7}, @@ -28,7 +29,8 @@ class IndependentComponentsAnalysis(STrans): } @inproceedings{girolami1997generalised, - title={Generalised independent component analysis through unsupervised learning with emergent bussgang properties}, + title={Generalised independent component analysis through + unsupervised learning with emergent bussgang properties}, author={Girolami, Mark and Fyfe, C}, booktitle={Neural Networks, 1997., International Conference on}, volume={3}, @@ -38,7 +40,8 @@ class IndependentComponentsAnalysis(STrans): } @article{lee1999independent, - title={Independent component analysis using an extended infomax algorithm for mixed subgaussian and supergaussian sources}, + title={Independent component analysis using an extended infomax + algorithm for mixed subgaussian and supergaussian sources}, author={Lee, Te-Won and Girolami, Mark and Sejnowski, Terrence J}, journal={Neural computation}, volume={11}, @@ -81,7 +84,7 @@ def train(self, s, kurtosis, learningRate, tolerance, maxIter, callback, verbose # np.mean(y*util.tanh(y), axis=0)) k = np.sign(spstat.kurtosis(y, axis=0)) - k[np.isclose(k,0.0)] = -1.0 + k[np.isclose(k, 0.0)] = -1.0 grad[...] = (I - k*util.tanh(y).T.dot(y) - y.T.dot(y)).T.dot(self.w) * n @@ -103,7 +106,7 @@ def train(self, s, kurtosis, learningRate, tolerance, maxIter, callback, verbose elif np.max(np.abs(self.w)) > 1.0e100: self.reason = 'diverge' break - + if iteration >= maxIter: self.reason = 'maxiter' break @@ -125,9 +128,9 @@ def demoICA(): s1 = spsig.sawtooth(t) s2 = np.cos(5.0*t) s3 = np.random.uniform(-1.0, 1.0, size=t.size) - s = np.vstack((s1,s2,s3)).T + s = np.vstack((s1, s2, s3)).T - m = np.random.random((3,3)) + m = np.random.random((3, 3)) m /= m.sum(axis=0) sMixed = s.dot(m) @@ -136,23 +139,23 @@ def demoICA(): fig = plt.figure() - axOrig = fig.add_subplot(4,1, 1) + axOrig = fig.add_subplot(4, 1, 1) axOrig.plot(s+util.colsep(s)) axOrig.set_title('Unmixed Signal') axOrig.autoscale(tight=True) - axMixed = fig.add_subplot(4,1, 2) + axMixed = fig.add_subplot(4, 1, 2) axMixed.plot(sMixed+util.colsep(sMixed)) axMixed.set_title('Mixed Signal (random transform)') axMixed.autoscale(tight=True) - axUnmixed = fig.add_subplot(4,1, 3) + axUnmixed = fig.add_subplot(4, 1, 3) icaFilt.plotTransform(sMixed, ax=axUnmixed) axUnmixed.set_title('ICA Components') axUnmixed.autoscale(tight=True) - axCleaned = fig.add_subplot(4,1, 4) - icaFilt.plotFilter(sMixed, comp=(0,1,), ax=axCleaned) + axCleaned = fig.add_subplot(4, 1, 4) + icaFilt.plotFilter(sMixed, comp=(0, 1,), ax=axCleaned) axCleaned.set_title('Cleaned Signal (First two components kept)') axCleaned.autoscale(tight=True) diff --git a/cebl/ml/strans/msf.py b/cebl/ml/strans/msf.py index 061e5a0..193f35e 100644 --- a/cebl/ml/strans/msf.py +++ b/cebl/ml/strans/msf.py @@ -77,10 +77,10 @@ def demoMSF(): s1 = spsig.sawtooth(t) #+ 3.0 s2 = np.cos(5.0*t) s3 = np.random.uniform(-1.0, 1.0, size=t.size) - s = np.vstack((s1,s2,s3)).T + s = np.vstack((s1, s2, s3)).T #m = np.array([ [0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5] ]) - m = np.random.random((3,3)) + m = np.random.random((3, 3)) m /= m.sum(axis=0) sMixed = s.dot(m) @@ -88,22 +88,22 @@ def demoMSF(): msfFilt = MSF(sMixed, lags=0) fig = plt.figure() - axOrig = fig.add_subplot(4,1, 1) + axOrig = fig.add_subplot(4, 1, 1) axOrig.plot(s+util.colsep(s)) axOrig.set_title('Unmixed Signal') axOrig.autoscale(tight=True) - axMixed = fig.add_subplot(4,1, 2) + axMixed = fig.add_subplot(4, 1, 2) axMixed.plot(sMixed+util.colsep(sMixed)) axMixed.set_title('Mixed Signal (random transform)') axMixed.autoscale(tight=True) - axUnmixed = fig.add_subplot(4,1, 3) + axUnmixed = fig.add_subplot(4, 1, 3) msfFilt.plotTransform(sMixed, ax=axUnmixed) axUnmixed.set_title('MSF Components') axUnmixed.autoscale(tight=True) - axCleaned = fig.add_subplot(4,1, 4) + axCleaned = fig.add_subplot(4, 1, 4) msfFilt.plotFilter(sMixed, comp=(2,), remove=True, ax=axCleaned) axCleaned.set_title('Cleaned Signal (Last Component Removed)') axCleaned.autoscale(tight=True) diff --git a/cebl/ml/strans/pca.py b/cebl/ml/strans/pca.py index ce4d7ae..9b61a26 100644 --- a/cebl/ml/strans/pca.py +++ b/cebl/ml/strans/pca.py @@ -35,18 +35,18 @@ def train(self, s): def getMags(self): return self.mags - def plotMags(self, standardize=True, ax=None, *args, **kwargs): + def plotMags(self, standardize=True, ax=None, **kwargs): result = {} if ax is None: fig = plt.figure() result['fig'] = fig - ax = fig.add_subplot(1,1,1) + ax = fig.add_subplot(1, 1, 1) result['ax'] = ax mags = self.mags / np.sum(self.mags) if standardize else self.mags sep = np.arange(len(mags)) - bars = plt.bar(sep, mags, *args, **kwargs) + bars = plt.bar(sep, mags, **kwargs) result['bars'] = bars return result @@ -62,7 +62,7 @@ def demoPCA(): s2 = np.cos(0.5*t) #s3 = np.random.normal(scale=1.2, size=t.size) s3 = np.random.uniform(-2.0, 2.0, size=t.size) - s = np.vstack((s1,s2,s3)).T + s = np.vstack((s1, s2, s3)).T theta1 = np.pi/6.0 rot1 = np.array([[np.cos(theta1), -np.sin(theta1), 0.0], @@ -88,23 +88,23 @@ def demoPCA(): fig = plt.figure() - axOrig = fig.add_subplot(4,1, 1) + axOrig = fig.add_subplot(4, 1, 1) axOrig.plot(s+util.colsep(s)) axOrig.set_title('Unmixed Signal') axOrig.autoscale(tight=True) - axMixed = fig.add_subplot(4,1, 2) + axMixed = fig.add_subplot(4, 1, 2) axMixed.plot(sMixed+util.colsep(sMixed)) axMixed.set_title('Mixed Signal (3d rotation)') axMixed.autoscale(tight=True) - axUnmixed = fig.add_subplot(4,1, 3) + axUnmixed = fig.add_subplot(4, 1, 3) pcaFilt.plotTransform(sMixed, ax=axUnmixed) axUnmixed.set_title('PCA Components') axUnmixed.autoscale(tight=True) - axCleaned = fig.add_subplot(4,1, 4) - pcaFilt.plotFilter(sMixed, comp=(1,2,), ax=axCleaned) + axCleaned = fig.add_subplot(4, 1, 4) + pcaFilt.plotFilter(sMixed, comp=(1, 2,), ax=axCleaned) axCleaned.set_title('Cleaned Signal (First Component Removed)') axCleaned.autoscale(tight=True) @@ -119,17 +119,19 @@ def demoPCA2d(): s1 = np.vstack((np.random.normal(scale=3, size=n), np.random.normal(scale=0.3, size=n))).T s2 = s1.dot(rot) - s = np.vstack((s1,s2)) + s = np.vstack((s1, s2)) pca = PCA(s) y = pca.transform(s) fig = plt.figure() - ax = fig.add_subplot(1,1,1) + ax = fig.add_subplot(1, 1, 1) ax.scatter(s[:,0], s[:,1]) - ax.arrow(0.0, 0.0, pca.wInv[0,0]/pca.mags[0], pca.wInv[0,1]/pca.mags[0], head_width=0.05, head_length=0.1, color='red') - ax.arrow(0.0, 0.0, pca.wInv[1,0]/pca.mags[1], pca.wInv[1,1]/pca.mags[1], head_width=0.05, head_length=0.1, color='red') + ax.arrow(0.0, 0.0, pca.wInv[0,0]/pca.mags[0], pca.wInv[0,1]/pca.mags[0], + head_width=0.05, head_length=0.1, color='red') + ax.arrow(0.0, 0.0, pca.wInv[1,0]/pca.mags[1], pca.wInv[1,1]/pca.mags[1], + head_width=0.05, head_length=0.1, color='red') ax.grid() if __name__ == '__main__': diff --git a/cebl/ml/strans/strans.py b/cebl/ml/strans/strans.py index 8d2e207..54a7320 100644 --- a/cebl/ml/strans/strans.py +++ b/cebl/ml/strans/strans.py @@ -39,7 +39,7 @@ def transform(self, s, comp=None, remove=False): y = s.dot(self.w) - if comp is None or len(comp) == 0: + if comp is None or not comp: return y else: compInd = np.array([remove,]*s.shape[1]) @@ -52,7 +52,7 @@ def filter(self, s, comp, remove=False): y = s.dot(self.w) - if comp is None or len(comp) == 0: + if comp is None or not comp: compInd = np.ones(s.shape[1], dtype=self.dtype) else: compInd = np.empty(s.shape[1], dtype=self.dtype) @@ -64,22 +64,22 @@ def filter(self, s, comp, remove=False): filt = y.dot(compMat).dot(self.wInv) + self.means return filt[:,:self.nDim] - def plotTransform(self, s, comp=None, remove=False, ax=None, *args, **kwargs): + def plotTransform(self, s, comp=None, remove=False, ax=None, **kwargs): if ax is None: fig = plt.figure() - ax = fig.add_subplot(1,1,1) + ax = fig.add_subplot(1, 1, 1) y = self.transform(s, comp=comp, remove=remove) - lines = ax.plot(y+util.colsep(y), *args, **kwargs) + lines = ax.plot(y+util.colsep(y), **kwargs) return {'ax': ax, 'lines': lines} - def plotFilter(self, s, comp, remove=False, ax=None, *args, **kwargs): + def plotFilter(self, s, comp, remove=False, ax=None, **kwargs): if ax is None: fig = plt.figure() - ax = fig.add_subplot(1,1,1) + ax = fig.add_subplot(1, 1, 1) filt = self.filter(s, comp=comp, remove=remove) - lines = ax.plot(filt+util.colsep(filt), *args, **kwargs) + lines = ax.plot(filt+util.colsep(filt), **kwargs) return {'ax': ax, 'lines': lines} diff --git a/cebl/rt/main.py b/cebl/rt/main.py index 31dd66f..f4b9644 100644 --- a/cebl/rt/main.py +++ b/cebl/rt/main.py @@ -17,7 +17,7 @@ def OnInit(self): """Create a new CEBLMain frame. """ self.SetAppName('CEBL') - main = CEBLMain() + self.main = CEBLMain() return True def OnExit(self): @@ -28,7 +28,8 @@ def OnExit(self): return True class CEBLMain(wx.Frame): - """Top-level CEBL frame. Holds the notebook and source manager and maintains the general state of CEBL. + """Top-level CEBL frame. Holds the notebook and source manager and + maintains the general state of CEBL. """ def __init__(self): """Initialize the main GUI frame. @@ -131,7 +132,9 @@ def updateStatusBar(self, event=None): class Splash(wx.adv.SplashScreen): def __init__(self, parent): - logo = wx.Image(os.path.dirname(__file__) + '/images/CEBL3_splash.png', wx.BITMAP_TYPE_PNG).ConvertToBitmap() + logo = wx.Image(os.path.dirname(__file__) + '/images/CEBL3_splash.png', + wx.BITMAP_TYPE_PNG).ConvertToBitmap() + wx.adv.SplashScreen.__init__(self, parent=parent, milliseconds=2000, bitmap=logo, splashStyle=wx.adv.SPLASH_CENTER_ON_SCREEN | wx.adv.SPLASH_TIMEOUT) From e97b378dfc74af1f9a339afecb54821eec042e01 Mon Sep 17 00:00:00 2001 From: Elliott Forney Date: Fri, 1 Feb 2019 17:54:12 -0700 Subject: [PATCH 3/9] style: linting, remove trailing whitespaces --- cebl/eeg/readbdf.py | 10 +- cebl/ml/da.py | 4 +- cebl/ml/knn.py | 2 +- cebl/ml/logreg.py | 6 +- cebl/ml/nnet/convac.py | 6 +- cebl/ml/nnet/convreg.py | 4 +- cebl/ml/nnet/ddembed.py | 4 +- cebl/ml/nnet/forward.py | 2 +- cebl/ml/nnet/multielman.py | 2 +- cebl/ml/optim/alopex.py | 12 +- cebl/ml/optim/alopexas.py | 18 +-- cebl/ml/optim/rprop.py | 8 +- cebl/ml/optim/scg.py | 4 +- cebl/ml/optim/sciopt.py | 2 +- cebl/ml/optim/steepest.py | 4 +- cebl/ml/stand.py | 2 +- cebl/rt/filters/__init__.py | 2 +- cebl/rt/manager.py | 2 +- cebl/rt/pages/bciplayer.py | 6 +- cebl/rt/pages/filt.py | 1 - cebl/rt/pages/mentaltasks.py | 8 +- cebl/rt/pages/motorpong.py | 12 +- cebl/rt/pages/p300bot.py | 21 ++-- cebl/rt/pages/p300grid.py | 53 ++++---- cebl/rt/pages/pieern.py | 2 +- cebl/rt/pages/power.py | 3 +- cebl/rt/pages/standard.py | 2 +- cebl/rt/pages/textstim.py | 192 ++++++++++++++--------------- cebl/rt/pages/trace.py | 6 +- cebl/rt/sources/openbci/openbci.py | 13 +- cebl/rt/widgets/notebook.py | 2 +- cebl/rt/widgets/pong.py | 58 ++++----- cebl/rt/widgets/textstim.py | 2 +- cebl/util/arr.py | 2 +- 34 files changed, 236 insertions(+), 241 deletions(-) diff --git a/cebl/eeg/readbdf.py b/cebl/eeg/readbdf.py index 4d6633f..cf3bcb2 100644 --- a/cebl/eeg/readbdf.py +++ b/cebl/eeg/readbdf.py @@ -41,7 +41,7 @@ def unpackInt24s(binaryData, start, length, count, nChan, sampRate): ints = ints.reshape((-1, nChan)) return ints, start + totalLength - + def readBDF(fileName, verbose=False): with open(fileName, 'rb') as fileHandle: binaryData = fileHandle.read() @@ -53,16 +53,16 @@ def readBDF(fileName, verbose=False): raise RuntimeError('readBDF: idCode is', idcode, 'which is not BIOSEMI. Cannot read this file.') subjectId, nextByte = unpackStrings(binaryData, nextByte, 80) - if verbose: print('subjectId is', subjectId) + if verbose: print('subjectId is', subjectId) recordingId, nextByte = unpackStrings(binaryData, nextByte, 80) - if verbose: print('recordingId is', recordingId) + if verbose: print('recordingId is', recordingId) startDate, nextByte = unpackStrings(binaryData, nextByte, 8) - if verbose: print('startDate is', startDate) + if verbose: print('startDate is', startDate) startTime, nextByte = unpackStrings(binaryData, nextByte, 8) - if verbose: print('startTime is', startTime) + if verbose: print('startTime is', startTime) nBytesHeader, nextByte = unpackInts(binaryData, nextByte, 8) if verbose: print('nBytesHeader is', nBytesHeader) diff --git a/cebl/ml/da.py b/cebl/ml/da.py index 2c6caa1..b8cef16 100644 --- a/cebl/ml/da.py +++ b/cebl/ml/da.py @@ -352,7 +352,7 @@ def demoQDA2d(): ax.imshow(colorFlip, origin='lower', extent=(mn[0], mx[0], mn[1], mx[1]), aspect='auto') - # contours + # contours nLevel = 6 cs = ax.contour(x, y, pMax, colors='black', levels=np.linspace(np.min(pMax), np.max(pMax), nLevel)) @@ -690,7 +690,7 @@ def demoLDA2d(): ax.imshow(colorFlip, origin='lower', extent=(mn[0], mx[0], mn[1], mx[1]), aspect='auto') - # contours + # contours nLevel=6 cs = ax.contour(x, y, pMax, colors='black', levels=np.linspace(np.min(pMax), np.max(pMax), nLevel)) diff --git a/cebl/ml/knn.py b/cebl/ml/knn.py index 4ceb9bb..b976b5b 100644 --- a/cebl/ml/knn.py +++ b/cebl/ml/knn.py @@ -174,7 +174,7 @@ def demoKNN(): ax.imshow(colorFlip, origin='lower', extent=(mn[0], mx[0], mn[1], mx[1]), aspect='auto') - # contours + # contours nLevel = 4 cs = ax.contour(x, y, pMax, colors='black', levels=np.linspace(np.min(pMax), np.max(pMax), nLevel)) diff --git a/cebl/ml/logreg.py b/cebl/ml/logreg.py index 014a6e3..31a259a 100644 --- a/cebl/ml/logreg.py +++ b/cebl/ml/logreg.py @@ -274,7 +274,7 @@ def demoLogisticRegression2d(): ax.imshow(colorFlip, origin='lower', extent=(mn[0], mx[0], mn[1], mx[1]), aspect='auto') - # contours + # contours nLevel = 4 cs = ax.contour(x, y, pMax, colors='black', levels=np.linspace(np.min(pMax), np.max(pMax), nLevel)) @@ -301,7 +301,7 @@ def __init__(self, classData, penalty=0.0, elastic=1.0, **kwargs): elastic: - kwargs: + kwargs: """ self.penalty = penalty self.elastic = elastic @@ -315,7 +315,7 @@ def error(self, x, g): likes = np.log(util.capZero(self.probs(x))) pf = self.weights[:-1,:].ravel() - return (-np.mean(g*likes) + + return (-np.mean(g*likes) + self.elastic * self.penalty * pf.dot(pf)/pf.size + # L2-norm penalty (1.0-self.elastic) * self.penalty * np.mean(np.abs(pf))) # L1-norm penalty diff --git a/cebl/ml/nnet/convac.py b/cebl/ml/nnet/convac.py index 0b17538..d186e02 100644 --- a/cebl/ml/nnet/convac.py +++ b/cebl/ml/nnet/convac.py @@ -186,7 +186,7 @@ def evalConvs(self, x): #c = phi(c.dot(cw[:-1]) + cw[-1]) c = phi(util.segdot(c, cw[:-1]) + cw[-1]) c = util.accum(c, poolSize, axis=1) / poolSize - + elif self.poolMethod == 'lanczos': c = util.timeEmbed(c, lags=width-1, axis=1) #c = phi(c.dot(cw[:-1]) + cw[-1]) @@ -359,10 +359,10 @@ def gradient(self, x, g, returnError=True): cPrimes.append(cPrime) c = phi(h) - + if poolSize == 1: pass - + elif self.poolMethod == 'average': c = util.accum(c, poolSize, axis=1) / poolSize diff --git a/cebl/ml/nnet/convreg.py b/cebl/ml/nnet/convreg.py index 98b8e82..0659d7e 100644 --- a/cebl/ml/nnet/convreg.py +++ b/cebl/ml/nnet/convreg.py @@ -110,7 +110,7 @@ def evalConvs(self, x): c = util.timeEmbed(c, lags=width-1, axis=1) c = phi(util.segdot(c, cw[:-1]) + cw[-1]) - + cs.append(c) return cs @@ -212,7 +212,7 @@ def gradient(self, x, g, returnError=True): cPrimes.append(cPrime) c = phi(h) - + c1 = util.bias(c) # evaluate hidden and visible layers diff --git a/cebl/ml/nnet/ddembed.py b/cebl/ml/nnet/ddembed.py index 089b197..8e97f13 100644 --- a/cebl/ml/nnet/ddembed.py +++ b/cebl/ml/nnet/ddembed.py @@ -12,7 +12,7 @@ ### ### pad = np.zeros((lags,nDim)) ### delta = np.vstack((pad,delta,pad)) -### +### ### d = list() ### sz = delta.itemsize ### for i in range(origDim): @@ -35,7 +35,7 @@ ## ## pad = np.zeros((nSeg,lags,nDim)) ## delta = np.concatenate((pad,delta,pad), axis=1) -## +## ## d = list() ## sz = delta.itemsize ## diff --git a/cebl/ml/nnet/forward.py b/cebl/ml/nnet/forward.py index e0d4213..e15ae1c 100644 --- a/cebl/ml/nnet/forward.py +++ b/cebl/ml/nnet/forward.py @@ -42,7 +42,7 @@ def __init__(self, x, g, nHidden=10, transFunc=transfer.lecun, self.hws = views[1:-1] self.vw = views[-1] - if not util.isiterable(weightInitFunc): + if not util.isiterable(weightInitFunc): weightInitFunc = (weightInitFunc,) * (self.nHLayers+1) assert len(weightInitFunc) == (len(self.hws) + 1) diff --git a/cebl/ml/nnet/multielman.py b/cebl/ml/nnet/multielman.py index f8e4275..e1ebee3 100644 --- a/cebl/ml/nnet/multielman.py +++ b/cebl/ml/nnet/multielman.py @@ -302,7 +302,7 @@ def xor(a, b): #axH1.set_xlim((0, len(g))) #axH1.set_ylim((0.0,1.0)) axH1.set_title('H2') - + # second layer output axH2 = fig.add_subplot(4,2,7) #axH2.bar(range(len(hout[1])), hout[1]) diff --git a/cebl/ml/optim/alopex.py b/cebl/ml/optim/alopex.py index 7a300ad..cf99c50 100644 --- a/cebl/ml/optim/alopex.py +++ b/cebl/ml/optim/alopex.py @@ -17,7 +17,7 @@ def alopex(optable, stepSize=0.0075, tempInit=10000, tempIter=20, stepSize: Step size. - tempInit: + tempInit: tempIter: @@ -45,11 +45,11 @@ def alopex(optable, stepSize=0.0075, tempInit=10000, tempIter=20, final results. If False (default), then a history is not kept. - callback: + callback: verbose: Print extra information to standard out during the training procedure. - + args, kwargs: Arguments passed to optable.gradients. Returns: @@ -217,7 +217,7 @@ def alopexb(optable, stepSize=0.005, forgetFactor=0.5, stepSize: Step size. - forgetFactor: + forgetFactor: accuracy: Terminate if current value of the error funciton falls below this value. @@ -243,11 +243,11 @@ def alopexb(optable, stepSize=0.005, forgetFactor=0.5, final results. If False (default), then a history is not kept. - callback: + callback: verbose: Print extra information to standard out during the training procedure. - + args, kwargs: Arguments passed to optable.gradients. Returns: diff --git a/cebl/ml/optim/alopexas.py b/cebl/ml/optim/alopexas.py index f162492..f46a4c4 100644 --- a/cebl/ml/optim/alopexas.py +++ b/cebl/ml/optim/alopexas.py @@ -30,7 +30,7 @@ def alopex(optable, stepMax: An upper bound on step sizes. - tempInit: + tempInit: tempIter: @@ -64,11 +64,11 @@ def alopex(optable, final results. If False (default), then a history is not kept. - callback: + callback: verbose: Print extra information to standard out during the training procedure. - + args, kwargs: Arguments passed to optable.gradients. Returns: @@ -274,7 +274,7 @@ def alopexas(optable, stepMax: An upper bound on step sizes. - tempInit: + tempInit: tempIter: @@ -308,11 +308,11 @@ def alopexas(optable, final results. If False (default), then a history is not kept. - callback: + callback: verbose: Print extra information to standard out during the training procedure. - + args, kwargs: Arguments passed to optable.gradients. Returns: @@ -518,7 +518,7 @@ def alopexb(optable, stepMax: An upper bound on step sizes. - forgetFactor: + forgetFactor: accuracy: Terminate if current value of the error funciton falls below this value. @@ -550,11 +550,11 @@ def alopexb(optable, final results. If False (default), then a history is not kept. - callback: + callback: verbose: Print extra information to standard out during the training procedure. - + args, kwargs: Arguments passed to optable.gradients. Returns: diff --git a/cebl/ml/optim/rprop.py b/cebl/ml/optim/rprop.py index 57a90a1..ff29a51 100644 --- a/cebl/ml/optim/rprop.py +++ b/cebl/ml/optim/rprop.py @@ -59,11 +59,11 @@ def rprop(optable, final results. If False (default), then a history is not kept. - callback: + callback: verbose: Print extra information to standard out during the training procedure. - + args, kwargs: Arguments passed to optable.gradients. Returns: @@ -251,11 +251,11 @@ def irprop(optable, final results. If False (default), then a history is not kept. - callback: + callback: verbose: Print extra information to standard out during the training procedure. - + args, kwargs: Arguments passed to optable.gradients. Returns: diff --git a/cebl/ml/optim/scg.py b/cebl/ml/optim/scg.py index 9b06ea0..1208c94 100644 --- a/cebl/ml/optim/scg.py +++ b/cebl/ml/optim/scg.py @@ -45,11 +45,11 @@ def scg(optable, is included in the final results. If False (default), then a history is not kept. - callback: + callback: verbose: Print extra information to standard out during the training procedure. - + args, kwargs: Additional arguments passed to optable.error and optable.gradient. diff --git a/cebl/ml/optim/sciopt.py b/cebl/ml/optim/sciopt.py index 3b11526..6e7ecaa 100644 --- a/cebl/ml/optim/sciopt.py +++ b/cebl/ml/optim/sciopt.py @@ -6,7 +6,7 @@ def sciopt(optable, - method='CG', options=None, + method='CG', options=None, maxIter=1000, precision=1.0e-10, pTrace=False, eTrace=False, callback=None, verbose=False, diff --git a/cebl/ml/optim/steepest.py b/cebl/ml/optim/steepest.py index e35ad2b..d071424 100644 --- a/cebl/ml/optim/steepest.py +++ b/cebl/ml/optim/steepest.py @@ -14,7 +14,7 @@ def steepest(optable, Args: optable: - + learningRates: Initial learning learning rate. finalLearningRates: Final learning rate. If None (default), @@ -51,7 +51,7 @@ def steepest(optable, verbose: Print extra information to standard out during the training procedure. - + args, kwargs: Arguments passed to opt.gradients. Returns: diff --git a/cebl/ml/stand.py b/cebl/ml/stand.py index 6183154..076c1c5 100644 --- a/cebl/ml/stand.py +++ b/cebl/ml/stand.py @@ -8,7 +8,7 @@ def __init__(self, x, method='zmus'): """ Args: - method: + method: zmus: Zero mean, unit standard deviation range: Range of [-1,1] diff --git a/cebl/rt/filters/__init__.py b/cebl/rt/filters/__init__.py index 0803e09..5795288 100644 --- a/cebl/rt/filters/__init__.py +++ b/cebl/rt/filters/__init__.py @@ -23,7 +23,7 @@ from .msf import MaxSignalFraction, MaxSignalFractionName filterChoices[MaxSignalFractionName] = MaxSignalFraction -from .pca import PrincipalComponents, PrincipalComponentsName +from .pca import PrincipalComponents, PrincipalComponentsName filterChoices[PrincipalComponentsName] = PrincipalComponents from .reference import Reference, ReferenceName diff --git a/cebl/rt/manager.py b/cebl/rt/manager.py index 23e766f..9148c67 100644 --- a/cebl/rt/manager.py +++ b/cebl/rt/manager.py @@ -97,7 +97,7 @@ def remRunningPage(self, page): number of running pages reaches zero. """ self.runningPages.remove(page) - + if self.getNRunningPages() == 0: self.src.stop() diff --git a/cebl/rt/pages/bciplayer.py b/cebl/rt/pages/bciplayer.py index 5996b8b..610e069 100644 --- a/cebl/rt/pages/bciplayer.py +++ b/cebl/rt/pages/bciplayer.py @@ -47,7 +47,7 @@ def __init__(self, *args, **kwargs): def initMediaPath(self): mediaPathControlBox = widgets.ControlBox(self, label='Media Path', orient=wx.HORIZONTAL) - + self.mediaPathTextCtrl = wx.TextCtrl(parent=self, style=wx.TE_PROCESS_ENTER) self.mediaPathTextCtrl.SetValue(self.pg.defaultMusicDir) mediaPathControlBox.Add(self.mediaPathTextCtrl, proportion=1, @@ -391,7 +391,7 @@ def initConfig(self): if not os.path.isdir(os.path.expanduser(self.defaultMusicDir)): self.defaultMusicDir = '~' - self.classifierChoices = ('Linear Discriminant', + self.classifierChoices = ('Linear Discriminant', 'K-Nearest Euclidean', 'K-Nearest Cosine', 'Linear Logistic', @@ -411,7 +411,7 @@ def initConfig(self): self.isi = 0.550 self.trainCap = None - + def initCurStimList(self): self.curStimList = copy.copy(self.choices) np.random.shuffle(self.curStimList) diff --git a/cebl/rt/pages/filt.py b/cebl/rt/pages/filt.py index e4ff67f..ced4765 100644 --- a/cebl/rt/pages/filt.py +++ b/cebl/rt/pages/filt.py @@ -1,6 +1,5 @@ import wx -from cebl.rt import sources from cebl.rt import widgets from cebl.rt import filters diff --git a/cebl/rt/pages/mentaltasks.py b/cebl/rt/pages/mentaltasks.py index c8eb7cd..ef8713f 100644 --- a/cebl/rt/pages/mentaltasks.py +++ b/cebl/rt/pages/mentaltasks.py @@ -41,7 +41,7 @@ def initFeatures(self): flag=wx.ALL | wx.EXPAND, border=10) featureSizer.Add(spanControlBox, proportion=1, flag=wx.LEFT | wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10) - + # radio buttons for turning log transform on and off logTransControlBox = widgets.ControlBox(self, label='Log Trans', orient=wx.HORIZONTAL) @@ -374,7 +374,7 @@ def setMethod(self, event): self.pg.requireRetrain() def initLayout(self): - self.initStandardLayout() + self.initStandardLayout() self.FitInside() self.autoregPanel.Hide() @@ -790,7 +790,7 @@ def valTraceCB(optable, iteration, paramTrace, errorTrace, success=True): dialog.Destroy() resultText = (('Best Num Iterations: %f\n' % bestIter) + - ('Best Mean Training CA: %f\n' % bestMeanTrnCA) + + ('Best Mean Training CA: %f\n' % bestMeanTrnCA) + ('Best Mean Validation CA: %f\n' % bestMeanValCA) + ('Final Training CA: %f\n' % trainCA) + ('Confusion Matrix:\n' + str(trainConfusion) + '\n') + @@ -850,7 +850,7 @@ def trainAutoregRR(self, trainData): dialog.Destroy() resultText = (('Best Order: %f\n' % bestOrder) + - ('Best Mean Training CA: %f\n' % bestMeanTrnCA) + + ('Best Mean Training CA: %f\n' % bestMeanTrnCA) + ('Best Mean Validation CA: %f\n' % bestMeanValCA) + ('Final Training CA: %f\n' % trainCA) + ('Confusion Matrix:\n' + str(trainConfusion) + '\n') + diff --git a/cebl/rt/pages/motorpong.py b/cebl/rt/pages/motorpong.py index fccf6e4..b415061 100644 --- a/cebl/rt/pages/motorpong.py +++ b/cebl/rt/pages/motorpong.py @@ -42,7 +42,7 @@ def initFeatures(self): flag=wx.ALL | wx.EXPAND, border=10) featureSizer.Add(spanControlBox, proportion=1, flag=wx.LEFT | wx.BOTTOM | wx.RIGHT | wx.EXPAND, border=10) - + # radio buttons for turning log transform on and off logTransControlBox = widgets.ControlBox(self, label='Log Trans', orient=wx.HORIZONTAL) @@ -161,7 +161,7 @@ def initChoices(self): choiceControlBox.Add(choiceGridSizer, proportion=1, flag=wx.ALL | wx.EXPAND, border=0) - + self.sizer.Add(choiceControlBox, proportion=0, flag=wx.ALL | wx.EXPAND, border=10) def setChoices(self, event): @@ -365,7 +365,7 @@ def setMethod(self, event): self.pg.requireRetrain() def initLayout(self): - self.initStandardLayout() + self.initStandardLayout() self.FitInside() self.autoregPanel.Hide() @@ -397,8 +397,8 @@ def initFeatureCanvas(self): self.featureCanvas = FigureCanvas(parent=self, id=wx.ID_ANY, figure=self.featureFig) def initPongGame(self): - self.pongGame = widgets.Pong(self) - + self.pongGame = widgets.Pong(self) + # for some reason pongGame.Hide() is not called in showPieMenu. I don't know why self.pongGame.Hide() @@ -492,7 +492,7 @@ def initStandardToolbarControls(self): self.trainButton = wx.Button(self.toolbar, label='Train') self.toolbar.AddControl(self.trainButton, label='Train') self.Bind(wx.EVT_BUTTON, self.toggleTrain, self.trainButton) - + # button to re-train classifier self.retrainButton = wx.Button(self.toolbar, label='Retrain') self.retrainButton.Disable() diff --git a/cebl/rt/pages/p300bot.py b/cebl/rt/pages/p300bot.py index 73352c3..4cb93af 100644 --- a/cebl/rt/pages/p300bot.py +++ b/cebl/rt/pages/p300bot.py @@ -1,12 +1,13 @@ import copy +import random +import socket +import time + import matplotlib.pyplot as plt import matplotlib.gridspec as pltgs from matplotlib.backends.backend_wxagg \ import FigureCanvasWxAgg as FigureCanvas import numpy as np -import random -import socket -import time import wx from wx.lib.agw import aui import wx.lib.agw.floatspin as agwfs @@ -247,12 +248,12 @@ def initPieMenu(self): def initERPCanvas(self): #self.erpFig = plt.Figure() - #self.erpAx = self.erpFig.add_subplot(1,1,1) + #self.erpAx = self.erpFig.add_subplot(1, 1, 1) #self.erpCanvas = FigureCanvas(parent=self, id=wx.ID_ANY, figure=self.erpFig) self.erpFig = plt.Figure() self.erpFig.subplots_adjust(hspace=0.32, wspace=0.02, left=0.065, right=0.95, top=0.97, bottom=0.18) - gs = pltgs.GridSpec(2,4) + gs = pltgs.GridSpec(2, 4) self.erpAx = self.erpFig.add_subplot(gs[0,:]) self.h1Ax = self.erpFig.add_subplot(gs[1,0]) self.h2Ax = self.erpFig.add_subplot(gs[1,1]) @@ -425,8 +426,8 @@ def connectToRobot(self, host, port=7799): pass try: - self.robotSock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) - self.robotSock.connect((host,port)) + self.robotSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.robotSock.connect((host, port)) except Exception as e: self.robotSock = None @@ -482,7 +483,7 @@ def afterTrain(self, earlyStop): ##self.saveCap() def trainEpoch(self): - if len(self.curStimList) == 0: + if not self.curStimList: self.curTrainTrial += 1 self.initCurStimList() @@ -594,7 +595,7 @@ def trainLDA(self, classData, dialog): caption='Training Completed!', style=wx.OK | wx.ICON_INFORMATION) def trainKNN(self, classData, dialog, metric): - ks = np.arange(1,10) + ks = np.arange(1, 10) trainAUC = np.zeros(ks.shape) validAUC = np.zeros(ks.shape) @@ -759,7 +760,7 @@ def testClearStim(self, event=None): self.pieMenu.clearAllHighlights() self.src.setMarker(0.0) - if len(self.curStimList) == 0: + if not self.curStimList: self.initCurStimList() wx.CallLater(1000.0*self.windowEnd*1.05, self.testClassify) else: diff --git a/cebl/rt/pages/p300grid.py b/cebl/rt/pages/p300grid.py index a59bfd5..f2068ae 100644 --- a/cebl/rt/pages/p300grid.py +++ b/cebl/rt/pages/p300grid.py @@ -1,11 +1,10 @@ -import copy +import time + import matplotlib.pyplot as plt import matplotlib.gridspec as pltgs from matplotlib.backends.backend_wxagg \ import FigureCanvasWxAgg as FigureCanvas import numpy as np -import os -import time import wx from wx.lib.agw import aui import wx.lib.agw.floatspin as agwfs @@ -48,28 +47,28 @@ def initCopy(self): copyControlBox = widgets.ControlBox(self, label='Copy Text', orient=wx.VERTICAL) self.copyTextCtrl = wx.TextCtrl(self) - self.Bind(wx.EVT_TEXT, self.setCopyText, self.copyTextCtrl) + self.Bind(wx.EVT_TEXT, self.setCopyText, self.copyTextCtrl) self.offlineControls += [self.copyTextCtrl] copyControlBox.Add(self.copyTextCtrl, proportion=1, flag=wx.ALL | wx.EXPAND, border=10) copySizer.Add(copyControlBox, proportion=1, flag =wx.ALL | wx.EXPAND, border=10) - + self.sizer.Add(copySizer, proportion=0, flag=wx.EXPAND) def setCopyText(self, event): copyText = self.copyTextCtrl.GetLineText(0) self.pg.testText = copyText self.pg.gridSpeller.setCopyText(copyText) - if len(copyText) == 0: + if not copyText: self.pg.freeSpelling = True else: self.pg.freeSpelling = False def initNTrial(self): trialSizer = wx.BoxSizer(orient=wx.HORIZONTAL) - + trialControlBox = widgets.ControlBox(self, label='Num Trials', orient=wx.VERTICAL) self.trialSpinCtrl = wx.SpinCtrl(self, #style=wx.SP_WRAP, value=str(self.pg.nTrials), min=1, max=100) @@ -187,7 +186,7 @@ def setWindowEnd(self, event): def initColors(self): # first row colorSizer1 = wx.BoxSizer(orient=wx.HORIZONTAL) - + gridColorControlBox = widgets.ControlBox(self, label='Grid color', orient=wx.VERTICAL) self.gridColorCtrl = wx.ColourPickerCtrl(self) @@ -277,7 +276,7 @@ def setGridColor(self, event): def setBackgroundColor(self, event): self.pg.gridSpeller.setBackground(self.backgroundColorCtrl.GetColour()) - + def setHighlightColor(self, event): self.pg.gridSpeller.setHighlightColor(self.highlightColorCtrl.GetColour()) @@ -289,7 +288,7 @@ def setCopyColor(self, event): def setFeedColor(self, event): self.pg.gridSpeller.setFeedColor(self.feedColorCtrl.GetColour()) - + def initGridLayout(self): gridLayoutControlBox = widgets.ControlBox(self, label='Layout', orient=wx.VERTICAL) @@ -333,7 +332,7 @@ def setGridLayoutUpp(self, event): def setGridLayoutNum(self, event): self.pg.gridSpeller.setGridNum() - + def setGridLayoutEtc(self, event): self.pg.gridSpeller.setGridEtc() @@ -376,7 +375,7 @@ def initERPCanvas(self): self.erpFig = plt.Figure() self.erpFig.subplots_adjust(hspace=0.32, wspace=0.02, left=0.065, right=0.95, top=0.97, bottom=0.18) - gs = pltgs.GridSpec(2,4) + gs = pltgs.GridSpec(2, 4) self.erpAx = self.erpFig.add_subplot(gs[0,:]) self.h1Ax = self.erpFig.add_subplot(gs[1,0]) self.h2Ax = self.erpFig.add_subplot(gs[1,1]) @@ -448,10 +447,10 @@ def plotERP(self, cap): self.erpAx.set_ylabel(r'Signal ($\mu V$)') sampRate = targ.getSampRate() - erp1 = erp[int((0.2+0.2)*sampRate),:] - erp2 = erp[int((0.2+0.3)*sampRate),:] - erp3 = erp[int((0.2+0.4)*sampRate),:] - erp4 = erp[int((0.2+0.5)*sampRate),:] + erp1 = erp[int((0.2 + 0.2) * sampRate),:] + erp2 = erp[int((0.2 + 0.3) * sampRate),:] + erp3 = erp[int((0.2 + 0.4) * sampRate),:] + erp4 = erp[int((0.2 + 0.5) * sampRate),:] erpAll = np.concatenate((erp1, erp2, erp3, erp4)) @@ -528,7 +527,7 @@ def initConfig(self): self.windowEnd = 0.75 # classifier parameters - self.classifierChoices = ('Linear Discriminant', + self.classifierChoices = ('Linear Discriminant', 'K-Nearest Euclidean', 'K-Nearest Cosine', 'Linear Logistic', @@ -642,7 +641,7 @@ def saveResultText(self, resultText): # # original # #cap = cap.copy().demean().bandpass(0.5, 12.0, order=3) # # biosemi hack XXX - idfah - # cap = cap.copy().demean().reference((36,37)).deleteChans(range(32,40)) + # cap = cap.copy().demean().reference((36, 37)).deleteChans(range(32, 40)) # cap.keepChans(('Fz', 'Cz', 'P3', 'Pz', 'P4', 'P7', 'Oz', 'P8')) # # kind of a hack XXX - idfah @@ -686,8 +685,8 @@ def afterTrain(self, earlyStop): def showTrainSymbol(self): # random, no bottom row - #self.curRow = np.random.randint(0,5) - #self.curCol = np.random.randint(0,6) + #self.curRow = np.random.randint(0, 5) + #self.curCol = np.random.randint(0, 6) trainSyms = [sym if sym != ' ' else grid.space for sym in self.trainText] sym = trainSyms[(self.curRep-1) % len(trainSyms)] @@ -700,7 +699,7 @@ def showTrainSymbol(self): def trainEpoch(self): # if the stim list is empty - if len(self.curStimList) == 0: + if not self.curStimList: # increment current repetition self.curRep += 1 @@ -836,7 +835,7 @@ def trainLDA(self, classData, dialog): self.saveResultText(resultText) def trainKNN(self, classData, dialog, metric): - ks = np.arange(1,10) + ks = np.arange(1, 10) trainAUC = np.zeros(ks.shape) validAUC = np.zeros(ks.shape) @@ -1039,7 +1038,7 @@ def testClearStim(self, event=None): self.gridSpeller.removeHighlight() self.src.setMarker(0.0) - if len(self.curStimList) == 0: + if not self.curStimList: self.initCurStimList() wx.CallLater(1000.0*self.windowEnd*1.1, self.testClassify) @@ -1061,7 +1060,7 @@ def controlSpeller(self, rowChoice, colChoice): else: self.gridSpeller.appendFeedText('_') - if len(self.testSyms) == 0: + if not self.testSyms: ##wx.CallLater(1000.0*self.windowEnd*1.1-1000.0*self.si, self.endTest) wx.CallLater(1000.0*self.windowEnd*1.1, self.endTest) @@ -1104,6 +1103,6 @@ def testClassify(self): resultRow, resultCol = np.unravel_index(probabilities.argmax(), probabilities.shape) - #resultRow = np.random.randint(0,6) - #resultCol = np.random.randint(0,6) - self.controlSpeller(resultRow,resultCol) + #resultRow = np.random.randint(0, 6) + #resultCol = np.random.randint(0, 6) + self.controlSpeller(resultRow, resultCol) diff --git a/cebl/rt/pages/pieern.py b/cebl/rt/pages/pieern.py index 1cb3cee..411cc44 100644 --- a/cebl/rt/pages/pieern.py +++ b/cebl/rt/pages/pieern.py @@ -21,7 +21,7 @@ def __init__(self, parent, pg, *args, **kwargs): self.initTrialSecs() self.initARParam() self.initClassifier() - self.initStandardLayout() + self.initStandardLayout() def initChoices(self): choiceControlBox = widgets.ControlBox(self, label='Choices', orient=wx.VERTICAL) diff --git a/cebl/rt/pages/power.py b/cebl/rt/pages/power.py index 8e62da1..b08396a 100644 --- a/cebl/rt/pages/power.py +++ b/cebl/rt/pages/power.py @@ -1,4 +1,3 @@ -import numpy as np import wx from wx.lib.agw import aui @@ -147,7 +146,7 @@ def initControls(self): rbtns[0].SetValue(True) # select first button to start # bind callbacks to each radio button with appropriate factors - for rbtn,factor in zip(rbtns,(1, 2, 4, 8)): + for rbtn, factor in zip(rbtns, (1,2,4,8)): # Uses lexical scoping to save ratio for each button. def setDecimationWrapper(event, factor=factor): self.setDecimation(factor=factor) diff --git a/cebl/rt/pages/standard.py b/cebl/rt/pages/standard.py index f60d35f..27bbe7e 100644 --- a/cebl/rt/pages/standard.py +++ b/cebl/rt/pages/standard.py @@ -297,7 +297,7 @@ def initStandardToolbarControls(self): self.trainButton = wx.Button(self.toolbar, label='Train') self.toolbar.AddControl(self.trainButton, label='Train') self.Bind(wx.EVT_BUTTON, self.toggleTrain, self.trainButton) - + # button to re-train classifier self.retrainButton = wx.Button(self.toolbar, label='Retrain') self.retrainButton.Disable() diff --git a/cebl/rt/pages/textstim.py b/cebl/rt/pages/textstim.py index 871a6de..b71e045 100644 --- a/cebl/rt/pages/textstim.py +++ b/cebl/rt/pages/textstim.py @@ -1,11 +1,11 @@ import copy -import numpy as np import string import time + +import numpy as np import wx from wx.lib.agw import aui -from cebl import util from cebl.rt import widgets from .page import Page @@ -15,7 +15,7 @@ class TextStim(Page): def __init__(self, *args, **kwargs): self.initConfig() - Page.__init__(self, name='TextStim', *args, **kwargs) + Page.__init__(self, name="TextStim", *args, **kwargs) self.initAui() self.initToolbar() @@ -29,17 +29,17 @@ def initAui(self): def initToolbar(self): self.toolbar = aui.AuiToolBar(self) - self.startButton = wx.Button(self.toolbar, label='Start') - self.toolbar.AddControl(self.startButton, label='Run') + self.startButton = wx.Button(self.toolbar, label="Start") + self.toolbar.AddControl(self.startButton, label="Run") self.Bind(wx.EVT_BUTTON, self.toggleRunning, self.startButton) self.subjectTextCtrl = wx.TextCtrl(self.toolbar) - self.subjectTextCtrl.SetValue('s') - self.toolbar.AddControl(self.subjectTextCtrl, label='Subject') + self.subjectTextCtrl.SetValue("s") + self.toolbar.AddControl(self.subjectTextCtrl, label="Subject") self.protocolComboBox = wx.ComboBox(self.toolbar, choices=self.protocols, value=self.protocol, style=wx.CB_READONLY) - self.toolbar.AddControl(self.protocolComboBox, label='Protocol') + self.toolbar.AddControl(self.protocolComboBox, label="Protocol") self.Bind(wx.EVT_COMBOBOX, self.setProtocolFromComboBox, self.protocolComboBox) #self.toolbar.Realize() @@ -51,25 +51,25 @@ def initConfig(self): self.stimFont = wx.Font(pointSize=196, family=wx.FONTFAMILY_SWISS, style=wx.FONTSTYLE_NORMAL, weight=wx.FONTWEIGHT_NORMAL) - self.protocols = ('3minutes', 'letter practice b', - 'letter b', 'letter d', 'letter p', - 'letter m', 'letter t', 'letter x', - 'motortasks practice', - 'motortasks trial1', 'motortasks trial2', - 'mentaltasks practice', - 'mentaltasks trial1', - 'mentaltasks trial2', - 'mentaltasks trial3', - 'mentaltasks trial4', - 'mentaltasks trial5') + self.protocols = ("3minutes", "letter practice b", + "letter b", "letter d", "letter p", + "letter m", "letter t", "letter x", + "motortasks practice", + "motortasks trial1", "motortasks trial2", + "mentaltasks practice", + "mentaltasks trial1", + "mentaltasks trial2", + "mentaltasks trial3", + "mentaltasks trial4", + "mentaltasks trial5") self.nProtocols = len(self.protocols) - self.setProtocol('3minutes') + self.setProtocol("3minutes") self.startPause = 2.0 def initTextStim(self): - self.stimArea = widgets.TextStim(self, stimText='', + self.stimArea = widgets.TextStim(self, stimText="", stimColor=self.stimColor, stimFont=self.stimFont) self.stimTimer = wx.Timer(self) @@ -78,12 +78,12 @@ def initTextStim(self): def initLayout(self): """Initialize layout of main page and initialize layout. """ - toolbarAuiInfo = (aui.AuiPaneInfo().Name('toolbar').Caption(self.name + ' Tools') + toolbarAuiInfo = (aui.AuiPaneInfo().Name("toolbar").Caption(self.name + " Tools") .ToolbarPane().Top().CloseButton(False).LeftDockable(False).RightDockable(False)) self.auiManager.AddPane(self.toolbar, toolbarAuiInfo) self.toolbar.Realize() - stimPaneAuiInfo = aui.AuiPaneInfo().Name('stim').Caption(self.name + 'Stimulus').CenterPane() + stimPaneAuiInfo = aui.AuiPaneInfo().Name("stim").Caption(self.name + "Stimulus").CenterPane() self.auiManager.AddPane(self.stimArea, stimPaneAuiInfo) self.auiManager.Update() @@ -91,118 +91,118 @@ def initLayout(self): def setProtocol(self, protocol): self.protocol = protocol - if self.protocol == '3minutes': - self.letter = '' + if self.protocol == "3minutes": + self.letter = "" - self.stims = ['*',] + self.stims = ["*",] self.si = 3*60.0 self.isi = 0.0 - self.instructions = 'Relax and look at the\n\n%s for 3 minutes.' % self.stims[0] + self.instructions = "Relax and look at the\n\n%s for 3 minutes." % self.stims[0] - elif self.protocol.startswith('letter'): + elif self.protocol.startswith("letter"): self.letter = self.protocol[-1] - if self.protocol.startswith('letter practice'): - chars = 'bdpfnpdpbddsbakbbdb' + if self.protocol.startswith("letter practice"): + chars = "bdpfnpdpbddsbakbbdb" - elif self.letter in ('b', 'd', 'p'): - chars = 'bdpfnpdpbddsbakbbdbmbbpadtdtbdpvdnpbddpp' +\ - 'bsppdimddppdbpbbbdpbdpdpkibdpfdpeebpbbpv' +\ - 'vddbpdbcbpdpbbykcdpp' + elif self.letter in ("b", "d", "p"): + chars = "bdpfnpdpbddsbakbbdbmbbpadtdtbdpvdnpbddpp" + \ + "bsppdimddppdbpbbbdpbdpdpkibdpfdpeebpbbpv" + \ + "vddbpdbcbpdpbbykcdpp" - elif self.letter == 'm': - chars = 'zijovpmmlhvyummpcmthtdmbpkmimnuomtnmbsq' +\ - 'mglcmmanmqgluakqmnoumhfmimrjfjlmhrntmyjw' + elif self.letter == "m": + chars = "zijovpmmlhvyummpcmthtdmbpkmimnuomtnmbsq" + \ + "mglcmmanmqgluakqmnoumhfmimrjfjlmhrntmyjw" - elif self.letter == 't': - chars = 'tbmdfaootfrsqyjptotutrslttxpfejtqontmtdh' +\ - 'pwhtrweesqvaprbatmtztlrztktsutthtwpvtvne' + elif self.letter == "t": + chars = "tbmdfaootfrsqyjptotutrslttxpfejtqontmtdh" + \ + "pwhtrweesqvaprbatmtztlrztktsutthtwpvtvne" - elif self.letter == 'x': - chars = 'fjgxaxgunxzuyrxkqphxiddoyxqcccacxbtxxtxv' +\ - 'ecplmunxrxcxxzbexyfztojwmxybxnxhtpwxxwrz' + elif self.letter == "x": + chars = "fjgxaxgunxzuyrxkqphxiddoyxqcccacxbtxxtxv" + \ + "ecplmunxrxcxxzbexyfztojwmxybxnxhtpwxxwrz" else: stims = string.ascii_lowercase*3 self.stims = list(chars) - + self.si = 0.100 self.isi = 0.750 - self.instructions =\ - ('Count the number of times the letter\n\n%s' % self.letter) +\ - ' appears in the center of the screen.' + self.instructions = \ + ("Count the number of times the letter\n\n%s" % self.letter) + \ + " appears in the center of the screen." - elif self.protocol.startswith('motortasks'): - self.letter = '' + elif self.protocol.startswith("motortasks"): + self.letter = "" - #mtasks = ['Left', 'Right'] - #if self.protocol.startswith('motortasks practice'): + #mtasks = ["Left", "Right"] + #if self.protocol.startswith("motortasks practice"): # nTrials = 1 #else: # nTrials = 5 #self.stims = sum([list(np.random.permutation(mtasks)) for i in range(nTrials)], []) - if self.protocol.startswith('motortasks practice'): - self.stims = ['Left', 'Right'] + if self.protocol.startswith("motortasks practice"): + self.stims = ["Left", "Right"] else: - self.stims = ['Right', 'Right', - 'Left', 'Right', - 'Left', 'Left', - 'Right', 'Left'] + self.stims = ["Right", "Right", + "Left", "Right", + "Left", "Left", + "Right", "Left"] self.si = 10.0 self.isi = 5.0 - self.instructions ='''In your mind only, please perform one of the following tasks + self.instructions = '''In your mind only, please perform one of the following tasks when one of the following cues appears. When the screen is blank, relax and think of nothing. -'Left' think about repeatedly raising and lowering your left arm over your head. +"Left" think about repeatedly raising and lowering your left arm over your head. -'Right' think about repeatedly raising and lowering your right arm over your head.''' +"Right" think about repeatedly raising and lowering your right arm over your head.''' - elif self.protocol.startswith('mentaltasks'): - self.letter = '' + elif self.protocol.startswith("mentaltasks"): + self.letter = "" - #mtasks = ['Count', 'Fist', 'Rotate', 'Song'] - #if self.protocol.startswith('mentaltasks practice'): + #mtasks = ["Count", "Fist", "Rotate", "Song"] + #if self.protocol.startswith("mentaltasks practice"): # nTrials = 1 #else: # nTrials = 3 #self.stims = sum([list(np.random.permutation(mtasks)) for i in range(nTrials)], []) - #if self.protocol.startswith('mentaltasks practice'): - # self.stims = ['Count', 'Rotate', 'Song', 'Fist'] + #if self.protocol.startswith("mentaltasks practice"): + # self.stims = ["Count", "Rotate", "Song", "Fist"] #else: - # self.stims = ['Count', 'Song', 'Rotate', 'Count', - # 'Fist', 'Song', 'Rotate', 'Fist', - # 'Count', 'Fist', 'Rotate', 'Song', - # 'Rotate', 'Count', 'Song', 'Count', - # 'Fist', 'Rotate', 'Song', 'Fist'] - self.stims = ['Count', 'Rotate', 'Song', 'Fist'] + # self.stims = ["Count", "Song", "Rotate", "Count", + # "Fist", "Song", "Rotate", "Fist", + # "Count", "Fist", "Rotate", "Song", + # "Rotate", "Count", "Song", "Count", + # "Fist", "Rotate", "Song", "Fist"] + self.stims = ["Count", "Rotate", "Song", "Fist"] np.random.shuffle(self.stims) self.si = 10.0 self.isi = 5.0 - self.instructions ='''In your mind only, please perform one of the following tasks + self.instructions = '''In your mind only, please perform one of the following tasks when one of the following cues appears. When the screen is blank, relax and think of nothing. -'Count' think about counting backwards from 100 by 3 +"Count" think about counting backwards from 100 by 3 -'Fist' think about repeatedly clenching and opening your right hand +"Fist" think about repeatedly clenching and opening your right hand -'Rotate' think about a rotating cube suspended in air +"Rotate" think about a rotating cube suspended in air -'Song' sing a favorite song silently to yourself''' +"Song" sing a favorite song silently to yourself''' else: - raise RuntimeError('Invalid protocol: ' % protocol) + raise RuntimeError("Invalid protocol: " % protocol) def toggleRunning(self, event=None): if self.isRunning(): @@ -210,7 +210,7 @@ def toggleRunning(self, event=None): self.startButton.Disable() else: self.start() - self.startButton.SetLabel('Stop') + self.startButton.SetLabel("Stop") def setProtocolFromComboBox(self, event): self.setProtocol(self.protocolComboBox.GetValue()) @@ -218,7 +218,7 @@ def setProtocolFromComboBox(self, event): def beforeStart(self): instructionDialog = wx.MessageDialog(self, self.instructions, - 'Instructions', style=wx.OK | wx.CENTER) + "Instructions", style=wx.OK | wx.CENTER) instructionDialog.ShowModal() instructionDialog.Destroy() @@ -226,7 +226,7 @@ def afterStart(self): self.nextBlank = True self.stopFlag = False self.availStims = copy.copy(self.stims) - self.stimArea.setStimText('') + self.stimArea.setStimText("") self.startTime = time.time() @@ -234,18 +234,18 @@ def afterStart(self): def changeStim(self, event=None): if self.stopFlag: - curStim = '' + curStim = "" self.src.setMarker(0.0) - #print('stopping') + #print("stopping") self.stop() - self.startButton.SetLabel('Start') + self.startButton.SetLabel("Start") self.startButton.Enable() - elif len(self.availStims) == 0: + elif not self.availStims: self.stimTimer.Start(1000.0*self.startPause, oneShot=True) - curStim = '' + curStim = "" self.stopFlag = True self.src.setMarker(0.0) @@ -253,7 +253,7 @@ def changeStim(self, event=None): if self.isRunning(): self.stimTimer.Start(1000.0*self.isi, oneShot=True) - curStim = '' + curStim = "" self.nextBlank = False self.src.setMarker(0.0) @@ -261,7 +261,7 @@ def changeStim(self, event=None): self.stimTimer.Start(1000.0*self.si, oneShot=True) curStim = self.availStims.pop(0) - if self.protocol.startswith('letter'): + if self.protocol.startswith("letter"): if curStim == self.letter: sign = 1 else: @@ -276,14 +276,14 @@ def changeStim(self, event=None): self.stimArea.setStimText(curStim) def beforeStop(self): - if not 'practice' in self.protocol: + if not "practice" in self.protocol: cap = self.src.getEEGSecs(time.time() - self.startTime, filter=False) - fileName = self.subjectTextCtrl.GetValue() + '-' +\ - self.protocol.replace(' ', '-') + '.pkl' + fileName = self.subjectTextCtrl.GetValue() + "-" + \ + self.protocol.replace(" ", "-") + ".pkl" - saveDialog = wx.FileDialog(self, message='Save EEG data.', - wildcard='Pickle (*.pkl)|*.pkl|All Files|*', + saveDialog = wx.FileDialog(self, message="Save EEG data.", + wildcard="Pickle (*.pkl)|*.pkl|All Files|*", defaultFile=fileName, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) @@ -292,7 +292,7 @@ def beforeStop(self): return cap.saveFile(saveDialog.GetPath()) except Exception: - wx.LogError('Save failed!') + wx.LogError("Save failed!") raise finally: saveDialog.Destroy() @@ -313,9 +313,9 @@ def afterStop(self): self.setProtocol(self.protocols[nextProtoIndex]) def sayThankYou(self): - thankYou = 'This session is complete. Thank you for participating!!' + thankYou = "This session is complete. Thank you for participating!!" thanksDialog = wx.MessageDialog(self, thankYou, - 'Thank You!', style=wx.OK | wx.CENTER) + "Thank You!", style=wx.OK | wx.CENTER) thanksDialog.ShowModal() thanksDialog.Destroy() diff --git a/cebl/rt/pages/trace.py b/cebl/rt/pages/trace.py index 4db5d36..c56ea2c 100644 --- a/cebl/rt/pages/trace.py +++ b/cebl/rt/pages/trace.py @@ -1,10 +1,8 @@ import numpy as np -import time import wx from wx.lib.agw import aui from cebl.rt import widgets -from cebl import sig from .standard import StandardConfigPanel, StandardMonitorPage @@ -85,7 +83,7 @@ def initTopbar(self): rbtns[0].SetValue(True) # select first button to start # bind callbacks to each radio button with appropriate factors - for rbtn,factor in zip(rbtns,(1, 2, 4, 8)): + for rbtn, factor in zip(rbtns, (1,2,4,8)): # Uses lexical scoping to save ratio for each button. def setDecimationWrapper(event, factor=factor): self.setDecimation(factor=factor) @@ -306,7 +304,7 @@ def updatePlot(self, event=None): markerScale = self.scale markers = 0.9 * markers * markerScale / np.max(np.abs(markers)) - data = np.hstack((data,markers[:,None])) + data = np.hstack((data, markers[:,None])) chanNames = chanNames + ['Mk'] # tell trace plot widget to draw diff --git a/cebl/rt/sources/openbci/openbci.py b/cebl/rt/sources/openbci/openbci.py index d10bb90..7dc202a 100644 --- a/cebl/rt/sources/openbci/openbci.py +++ b/cebl/rt/sources/openbci/openbci.py @@ -20,7 +20,7 @@ ADS1299_VREF = 4.5 #reference voltage for ADC in ADS1299. set by its hardware ADS1299_GAIN = 24.0 #assumed gain setting for ADS1299. set by its Arduino code SCALE_uVOLTS_PER_COUNT = ADS1299_VREF/float((pow(2,23)-1))/ADS1299_GAIN*1000000. -SCALE_ACCEL_G_PER_COUNT = 0.002 /(pow(2,4)) #assume set to +/4G, so 2 mG +SCALE_ACCEL_G_PER_COUNT = 0.002 /(pow(2,4)) #assume set to +/4G, so 2 mG class OpenBCIConfigPanel(SourceConfigPanel): def __init__(self, parent, src, *args, **kwargs): @@ -73,7 +73,7 @@ def __init__(self, mgr, sampRate=250, # observations collected in each poll self.pollSize = pollSize self.firstPoll = True - + ## Connection ##################################### @@ -133,7 +133,7 @@ def connect(self): self.stopAcquisition() # self.configuration = self.getConfig() - + self.connected = True except Exception as e: self.connected = False @@ -174,7 +174,6 @@ def startAcquisition(self): # self.stopAcquisition() Now done in beforeStart print('sending start command b') self.device.write(b'b') - def stopAcquisition(self): # send stop command @@ -281,14 +280,14 @@ def pollData(self): reply = self.device.read(scanSize * self.pollSize) # print('pollData after read',len(reply)) - startByte,sampleId = struct.unpack('BB',reply[:2]) - + startByte, sampleId = struct.unpack('BB', reply[:2]) + # print('sampleId',sampleId,'startbyte',startByte,'inwaiting',self.device.inWaiting()) for polli in range(self.pollSize): # big-endian - eeg[polli,:] = [struct.unpack('>i', (b'\x00' if reply[i] < 0x80 else b'\xff') + + eeg[polli,:] = [struct.unpack('>i', (b'\x00' if reply[i] < 0x80 else b'\xff') + reply[i:i+3])[0] for i in eegIndices+(polli*scanSize)] #eeg[polli,:] = np.array([struct.unpack('>i', (b'\x00' if reply[i] < 0x80 else b'\xff') + reply[i:i+3])[0] for i in eegIndices+(polli*self.pollSize)]) diff --git a/cebl/rt/widgets/notebook.py b/cebl/rt/widgets/notebook.py index 1b4ac21..3c10087 100644 --- a/cebl/rt/widgets/notebook.py +++ b/cebl/rt/widgets/notebook.py @@ -90,7 +90,7 @@ def __init__(self, *args, **kwargs): wx.Frame.__init__(self, *args, **kwargs) #self.Bind(wx.EVT_KEY_UP, self.onKeyUp) self.Bind(events.EVT_FULLSCREEN, self.toggleFullScreen) - + #def onKeyUp(self, event): # key = event.GetKeyCode() # if key == wx.WXK_F11: diff --git a/cebl/rt/widgets/pong.py b/cebl/rt/widgets/pong.py index 53fff85..3af87fd 100644 --- a/cebl/rt/widgets/pong.py +++ b/cebl/rt/widgets/pong.py @@ -11,19 +11,19 @@ class PongObject: def __init__(self): self.coords = np.array([0.0,0.0]) self.velocity = np.array([0.0,0.0]) - + self.pen = wx.Pen((250,250,250), 2) self.brush = wx.Brush((255,255,255)) def outsideOfScreen(self, windowDiameter): return False - + def update(self, windowDiameter): - self.coords += self.velocity - + self.coords += self.velocity + def setCoords(self, coords): self.coords[:] = coords - + def setXCoord(self, x): self.coords[0] = x @@ -42,7 +42,7 @@ def __init__(self): #self.speedPercentage = 0.01 self.speedPercentage = 0.004 - self.direction = 2 + self.direction = 2 self.touchingBottom = False @@ -53,25 +53,25 @@ def draw(self, gc, windowDiameter): gc.SetPen(self.pen) gc.SetBrush(self.brush) - gc.DrawEllipse(0.0, 0.0, self.diameterPercentage * windowDiameter, + gc.DrawEllipse(0.0, 0.0, self.diameterPercentage * windowDiameter, self.diameterPercentage * windowDiameter) gc.PopState() def bounceOfWalls(self): - if self.coords[0] + self.diameterPercentage > 0.995: + if self.coords[0] + self.diameterPercentage > 0.995: self.reflectVertical() - + if self.coords[0] < 0.005: - self.reflectVertical() + self.reflectVertical() if self.coords[1] < 0.005: self.reflectHorizontal() - + if self.coords[1] + self.diameterPercentage > 0.995: self.touchingBottom = True - - return False + + return False def reflectVertical(self): self.velocity[0] *= -1 @@ -92,7 +92,7 @@ def setSpeed(self, speedPercentage): def squareDistance(self, pointA, pointB): xA, yA = pointA[0], pointA[1] xB, yB = pointB[0], pointB[1] - return (xA - xB)**2 + (yA - yB)**2 + return (xA - xB)**2 + (yA - yB)**2 def intersectCircle(self, pointA, pointB): circleCenter = self.coords + self.diameterPercentage*0.5 @@ -101,7 +101,7 @@ def intersectCircle(self, pointA, pointB): circleCenter[0] -= pointA[0] circleCenter[1] -= pointA[1] - + pointB[0] -= pointA[0] pointB[1] -= pointA[1] @@ -119,12 +119,12 @@ def intersectCircle(self, pointA, pointB): def dealWithPaddle(self, paddle): """ pointA = [paddle.coords[0], paddle.coords[1]] - pointB = [paddle.coords[0] + paddle.widthPercentage, paddle.coords[1]] + pointB = [paddle.coords[0] + paddle.widthPercentage, paddle.coords[1]] pointC = [paddle.coords[0] + paddle.widthPercentage, paddle.coords[1] + paddle.heightPercentage] pointD = [paddle.coords[0], paddle.coords[1] + paddle.heightPercentage] - circleCenter = [self.coords[0] + self.diameterPercentage*0.5, - self.coords[1] + self.diameterPercentage*0.5] + circleCenter = [self.coords[0] + self.diameterPercentage*0.5, + self.coords[1] + self.diameterPercentage*0.5] circleRadius = self.diameterPercentage/2 @@ -147,7 +147,7 @@ def dealWithPaddle(self, paddle): if circleCenter[0] >= pointD[0] and circleCenter[0] <= pointC[0]: if self.intersectCircle(pointD, pointC): self.coords[1] = pointD[1] - self.reflectHorizontal() + self.reflectHorizontal() return True if circleCenter[1] >= pointA[1] and circleCenter[0] <= pointD[1]: @@ -158,7 +158,7 @@ def dealWithPaddle(self, paddle): if circleCenter[1] >= pointB[1] and circleCenter[0] <= pointC[1]: if self.intersectCircle(pointC, pointB): - self.coords[0] = pointC[0] + self.coords[0] = pointC[0] self.reflectVertical() return True @@ -184,7 +184,7 @@ def setRadius(self, radiusPercentage): class Paddle(PongObject): def __init__(self, widthPercentage = 0.4, heightPercentage = 0.025): PongObject.__init__(self) - self.widthPercentage = widthPercentage + self.widthPercentage = widthPercentage self.heightPercentage = heightPercentage #self.speedPercentage = 0.015 @@ -214,7 +214,7 @@ def stopMoving(self): def update(self, windowDiameter): self.setYCoord(0.95-self.heightPercentage) - PongObject.update(self, windowDiameter) + PongObject.update(self, windowDiameter) if self.coords[0] < 0.005: self.coords[0] = 0.005 @@ -224,7 +224,7 @@ def update(self, windowDiameter): def setWidth(self, widthPercentage): self.widthPercentaga = widthPercentage - + def setHeight(self, heightPercentage): self.heightPercentage - heightPercentage @@ -234,7 +234,7 @@ def setSpeed(self, speedPercentage): class Border(PongObject): def __init__(self, widthPercentage = 0.4, heightPercentage = 0.05, coords=np.array([0.0,0.0])): PongObject.__init__(self) - self.widthPercentage = widthPercentage + self.widthPercentage = widthPercentage self.heightPercentage = heightPercentage self.setCoords(coords) @@ -265,7 +265,7 @@ def scoreGood(self): def scoreBad(self): self.score[1] += 1 - + def reset(self): self.score[...] = 0 @@ -281,7 +281,7 @@ def draw(self, gc, windowDiameter): gc.SetFont(gc.CreateFont(self.scoreFont, col='white')) gc.DrawText(str(self.score[0]) + ":" + str(self.score[1]),0,0) - gc.PopState() + gc.PopState() def update(self, windowDiameter): pass @@ -317,7 +317,7 @@ def initRefreshTimer(self): self.refreshTimer.Start(30.0) def bindKeyboard(self): - self.Bind(wx.EVT_KEY_DOWN, self.onKeyPress) + self.Bind(wx.EVT_KEY_DOWN, self.onKeyPress) self.Bind(wx.EVT_KEY_UP, self.onKeyRelease) def onKeyPress(self, event): @@ -331,7 +331,7 @@ def onKeyPress(self, event): if keycode == ord('R') or keycode == ord('r'): self.startGame() - + def onKeyRelease(self, event): self.stopPaddle() @@ -383,7 +383,7 @@ def draw(self, gc): # Make the game centered gc.PushState() gc.Translate((self.GetSize()[0] - self.winRadius*2)*0.5, 0) - + [obj.draw(gc, self.winRadius*2) for obj in self.gameObjects] gc.PopState() diff --git a/cebl/rt/widgets/textstim.py b/cebl/rt/widgets/textstim.py index a63d66c..9de823b 100644 --- a/cebl/rt/widgets/textstim.py +++ b/cebl/rt/widgets/textstim.py @@ -256,7 +256,7 @@ def __init__(self, parent, stim='*', *args, **kwargs): parent: wx parent object. stim: String containing the idle stimulus. - + args, kwargs: Additional arguments passed to TextStim base class. """ diff --git a/cebl/util/arr.py b/cebl/util/arr.py index 2808bf1..dee9daf 100644 --- a/cebl/util/arr.py +++ b/cebl/util/arr.py @@ -326,7 +326,7 @@ def softmaxM1(x): terms = capZero(np.exp(x-mx)) denom = (emx + np.sum(terms, axis=1)).reshape((-1, 1)) return np.hstack((terms/denom, emx/denom)) - + def logSoftmaxM1(x): mx = np.max((np.max(x), 0.0)) xmx = x - mx From 2d9676f845531c283054de0c17c4c4167d261165 Mon Sep 17 00:00:00 2001 From: Elliott Forney Date: Fri, 15 Feb 2019 23:18:40 -0700 Subject: [PATCH 4/9] style: linting --- cebl/eeg/head.py | 6 +++--- cebl/ml/da.py | 2 +- cebl/ml/optim/scg.py | 4 ++-- cebl/rt/filters/filt.py | 2 +- cebl/rt/manager.py | 10 ++++----- cebl/rt/pages/bciplayer.py | 4 ++-- cebl/rt/pages/mentaltasks.py | 8 +++---- cebl/rt/pages/motorpong.py | 8 +++---- cebl/rt/pages/page.py | 2 +- cebl/rt/sources/erptest.py | 2 +- cebl/rt/widgets/control.py | 35 +++++++++++++++--------------- cebl/rt/widgets/mplayer.py | 41 ++++++++++++++++++------------------ cebl/rt/widgets/textstim.py | 12 +++-------- 13 files changed, 64 insertions(+), 72 deletions(-) diff --git a/cebl/eeg/head.py b/cebl/eeg/head.py index b691651..cd81329 100644 --- a/cebl/eeg/head.py +++ b/cebl/eeg/head.py @@ -128,7 +128,7 @@ def plotHead(chanNames=('F3','F4','C3','C4','P3','P4','O1','O2'), chanNamesLower = [chanName.lower() for chanName in chanNames] # lower case # if no valid chanNames then just draw the outline - if len(chanNames) == 0: + if not chanNames: return result # get 3d cartesian coordinates for each channel @@ -194,7 +194,7 @@ def plotHeadLines(magnitudes, chanNames=('F3','F4','C3','C4','P3','P4','O1','O2' result['mx'] = mx # if no valid chanNames then just draw the outline - if len(chanNames) == 0: + if not chanNames: return result # get 3d cartesian coordinates for each channel @@ -290,7 +290,7 @@ def plotHeadInterp(magnitudes, chanNames=('Fp1','Fp2','F7','F3','Fz','F4','F8',' magnitudes = np.asarray(magnitudes) # if no valid mags then just draw the outline - if len(magnitudes) == 0: + if not magnitudes: return result # get min and max magnitudes if not provided diff --git a/cebl/ml/da.py b/cebl/ml/da.py index b8cef16..f046f19 100644 --- a/cebl/ml/da.py +++ b/cebl/ml/da.py @@ -504,7 +504,7 @@ def logDens(self, x): dv = self.discrim(x) # find class probability densities by adding back in canceled terms - xSx = np.sum(x @ self.invCov) * x, axis=1).reshape((-1, 1)) + xSx = np.sum((x @ self.invCov) * x, axis=1).reshape((-1, 1)) return -0.5 * (self.nCls*log2pi + self.logDet + xSx) + dv diff --git a/cebl/ml/optim/scg.py b/cebl/ml/optim/scg.py index 1208c94..be5c01c 100644 --- a/cebl/ml/optim/scg.py +++ b/cebl/ml/optim/scg.py @@ -110,8 +110,8 @@ def scg(optable, sigma0 = 1.0e-4 # initial scale - #beta = 0.01 - beta = 1.0 + beta = 0.01 + #beta = 1.0 # force calculation of directional derivatives success = True diff --git a/cebl/rt/filters/filt.py b/cebl/rt/filters/filt.py index db90c92..ed42011 100644 --- a/cebl/rt/filters/filt.py +++ b/cebl/rt/filters/filt.py @@ -82,7 +82,7 @@ def __init__(self, src): self.filters = [] def push(self, filterClass): - if len(self.filters) == 0: + if not self.filters: inSampRate = self.src.getSampRate() inChans = self.src.getChanNames() else: diff --git a/cebl/rt/manager.py b/cebl/rt/manager.py index 9148c67..0a4581d 100644 --- a/cebl/rt/manager.py +++ b/cebl/rt/manager.py @@ -15,16 +15,14 @@ def logExceptionHook(etype, e, trace): wx.LogError(''.join(traceback.format_exception(etype, e, trace)) + 'Uncaught.\n') class Manager: - def __init__(self, pageParent, statusPanel=None, - sourceList=sources.sourceList, - defaultSource=sources.defaultSource, - pageList=pages.pageList): + def __init__(self, pageParent, statusPanel=None, sourceList=None, + defaultSource=sources.defaultSource, pageList=None): self.pageParent = pageParent self.statusPanel = statusPanel - self.sourceList = sourceList + self.sourceList = sources.sourceList if sourceList is None else sourceList self.defaultSource = defaultSource - self.pageList = pageList + self.pageList = pages.pageList if pageList is None else pageList self.pages = [] self.src = None diff --git a/cebl/rt/pages/bciplayer.py b/cebl/rt/pages/bciplayer.py index 610e069..55b0425 100644 --- a/cebl/rt/pages/bciplayer.py +++ b/cebl/rt/pages/bciplayer.py @@ -509,7 +509,7 @@ def afterTrain(self, earlyStop): self.saveCap() def trainEpoch(self): - if len(self.curStimList) == 0: + if not self.curStimList: self.curTrainTrial += 1 self.initCurStimList() @@ -787,7 +787,7 @@ def testClearStim(self, event=None): self.pieMenu.clearAllHighlights() self.src.setMarker(0.0) - if len(self.curStimList) == 0: + if not self.curStimList: self.initCurStimList() wx.CallLater(1000.0*self.windowEnd*1.05, self.testClassify) else: diff --git a/cebl/rt/pages/mentaltasks.py b/cebl/rt/pages/mentaltasks.py index ef8713f..aa40a59 100644 --- a/cebl/rt/pages/mentaltasks.py +++ b/cebl/rt/pages/mentaltasks.py @@ -612,7 +612,7 @@ def afterTrain(self, earlyStop): self.saveCap() def trainEpoch(self): - if len(self.curChoices) == 0: + if not self.curChoices: self.curChoices = copy.copy(self.choices) np.random.shuffle(self.curChoices) self.curTrial += 1 @@ -629,7 +629,7 @@ def trainClearTrial(self, event=None): self.src.setMarker(0.0) - if self.curTrial == self.nTrainTrial and len(self.curChoices) == 0: + if not self.curChoices and self.curTrial == self.nTrainTrial: wx.CallLater(1000.0*self.pauseSecs, self.endTrain) else: wx.CallLater(1000.0*self.pauseSecs, self.runTrainEpoch) @@ -984,7 +984,7 @@ def testEpoch(self): wx.CallLater(1000.0*self.decisionSecs*1.1, self.testClassify) def highlightTestTarget(self): - if len(self.curChoices) == 0: + if not self.curChoices: self.curChoices = copy.copy(self.choices) np.random.shuffle(self.curChoices) self.curTrial += 1 @@ -1031,7 +1031,7 @@ def testClearTrial(self): self.pieMenu.clearAllHighlights() self.curDecision = -1 - if self.curTrial == self.nTestTrial and len(self.curChoices) == 0: + if not self.curChoices and self.curTrial == self.nTestTrial: wx.CallLater(1000.0*self.pauseSecs, self.endTest) else: wx.CallLater(1000.0*self.pauseSecs, self.runTestEpoch) diff --git a/cebl/rt/pages/motorpong.py b/cebl/rt/pages/motorpong.py index b415061..65003f9 100644 --- a/cebl/rt/pages/motorpong.py +++ b/cebl/rt/pages/motorpong.py @@ -664,7 +664,7 @@ def afterTrain(self, earlyStop): self.saveCap() def trainEpoch(self): - if len(self.curChoices) == 0: + if not self.curChoices: self.curChoices = copy.copy(self.choices) np.random.shuffle(self.curChoices) self.curTrial += 1 @@ -681,7 +681,7 @@ def trainClearTrial(self, event=None): self.src.setMarker(0.0) - if self.curTrial == self.nTrainTrial and len(self.curChoices) == 0: + if not self.curChoices and self.curTrial == self.nTrainTrial: wx.CallLater(1000.0*self.pauseSecs, self.endTrain) else: wx.CallLater(1000.0*self.pauseSecs, self.runTrainEpoch) @@ -926,7 +926,7 @@ def testEpoch(self): wx.CallLater(1000.0*self.decisionSecs*1.1, self.testClassify) def highlightTestTarget(self): - if len(self.curChoices) == 0: + if not self.curChoices: self.curChoices = copy.copy(self.choices) np.random.shuffle(self.curChoices) self.curTrial += 1 @@ -986,7 +986,7 @@ def testClearTrial(self): self.pieMenu.clearAllHighlights() self.curDecision = -1 - if self.curTrial == self.nTestTrial and len(self.curChoices) == 0: + if not self.curChoices and self.curTrial == self.nTestTrial: wx.CallLater(1000.0*self.pauseSecs, self.endTest) else: wx.CallLater(1000.0*self.pauseSecs, self.runTestEpoch) diff --git a/cebl/rt/pages/page.py b/cebl/rt/pages/page.py index c39f64a..317e1a5 100644 --- a/cebl/rt/pages/page.py +++ b/cebl/rt/pages/page.py @@ -114,7 +114,7 @@ def stop(self, event=None): try: self.beforeStop() - self.running= False + self.running = False self.mgr.remRunningPage(self) self.afterStop() diff --git a/cebl/rt/sources/erptest.py b/cebl/rt/sources/erptest.py index 318d2e7..d4c772d 100644 --- a/cebl/rt/sources/erptest.py +++ b/cebl/rt/sources/erptest.py @@ -146,7 +146,7 @@ def setTrigger(self, event=None): value = self.triggerValueTextCtrl.GetValue() - if len(value) == 0: + if not value: fValue = 0.0 else: try: diff --git a/cebl/rt/widgets/control.py b/cebl/rt/widgets/control.py index 85f75c7..867d9a9 100644 --- a/cebl/rt/widgets/control.py +++ b/cebl/rt/widgets/control.py @@ -3,23 +3,22 @@ class ControlBox(wx.StaticBoxSizer): - def __init__(self, parent, label='', orient=wx.VERTICAL): + def __init__(self, parent, label="", orient=wx.VERTICAL): wx.StaticBoxSizer.__init__(self, wx.StaticBox(parent, label=label), orient=orient) -'''class LabeledFloatSpinCtrl(wx.Panel): - def __init__(self, parent, label='', digits=3, *args, **kwargs): - wx.Panel.__init__(self, parent=parent) - - self.sizer = wx.BoxSizer(orient=wx.VERTICAL) - - self.label = wx.StaticText(parent=self, label=label) - self.sizer.Add(self.label, proportion=0, flag=wx.BOTTOM, border=5) - - self.spinner = agwfs.FloatSpin(parent=self, *args, **kwargs) - self.spinner.SetFormat("%f") - self.spinner.SetDigits(digits) - self.sizer.Add(self.spinner, proportion=0)#, flag=wx.ALL)#, border=10) - - self.SetSizer(self.sizer) - self.Layout() -''' +##class LabeledFloatSpinCtrl(wx.Panel): +## def __init__(self, parent, label='', digits=3, *args, **kwargs): +## wx.Panel.__init__(self, parent=parent) +## +## self.sizer = wx.BoxSizer(orient=wx.VERTICAL) +## +## self.label = wx.StaticText(parent=self, label=label) +## self.sizer.Add(self.label, proportion=0, flag=wx.BOTTOM, border=5) +## +## self.spinner = agwfs.FloatSpin(parent=self, *args, **kwargs) +## self.spinner.SetFormat("%f") +## self.spinner.SetDigits(digits) +## self.sizer.Add(self.spinner, proportion=0)#, flag=wx.ALL)#, border=10) +## +## self.SetSizer(self.sizer) +## self.Layout() diff --git a/cebl/rt/widgets/mplayer.py b/cebl/rt/widgets/mplayer.py index 78fbac5..27d254e 100644 --- a/cebl/rt/widgets/mplayer.py +++ b/cebl/rt/widgets/mplayer.py @@ -1,4 +1,5 @@ import os + import wx from wx.lib import newevent import wx.media as wxm @@ -11,13 +12,13 @@ def _filesAndPaths(cwd): files = os.listdir(cwd) - files = [f for f in files if not f.startswith('.')] + files = [f for f in files if not f.startswith(".")] filesPath = [cwd + os.path.sep + f for f in files] return files, filesPath class MPlayerPanel(wx.Panel): - def __init__(self, parent=None, cwd='~', *args, **kwargs): + def __init__(self, parent=None, cwd="~", *args, **kwargs): wx.Panel.__init__(self, parent=parent, *args, **kwargs) self.initNavbar() @@ -29,7 +30,7 @@ def __init__(self, parent=None, cwd='~', *args, **kwargs): self.setCWD(cwd) def initMediaCtrl(self): - #self.mediaControlBox = ControlBox(self, label='Media', orient=wx.HORIZONTAL) + #self.mediaControlBox = ControlBox(self, label="Media", orient=wx.HORIZONTAL) self.mediaCtrl = wxm.MediaCtrl(self, style=wx.SIMPLE_BORDER) self.mediaCtrl.Hide() @@ -39,51 +40,51 @@ def initMediaCtrl(self): self.Bind(wxm.EVT_MEDIA_STOP, self.onStop) def initNavbar(self): - self.navControlBox = ControlBox(self, label='Navigation', + self.navControlBox = ControlBox(self, label="Navigation", orient=wx.HORIZONTAL) - self.playButton = wx.Button(self, label='Play') + self.playButton = wx.Button(self, label="Play") self.Bind(wx.EVT_BUTTON, self.play, self.playButton) self.navControlBox.Add(self.playButton, proportion=0, flag=wx.ALL | wx.EXPAND, border=5) - self.rewAlbumButton = wx.Button(self, label='Album <-') + self.rewAlbumButton = wx.Button(self, label="Album <-") self.Bind(wx.EVT_BUTTON, self.rewAlbum, self.rewAlbumButton) self.navControlBox.Add(self.rewAlbumButton, proportion=0, flag=wx.BOTTOM | wx.RIGHT | wx.TOP | wx.EXPAND, border=5) - self.forAlbumButton = wx.Button(self, label='Album ->') + self.forAlbumButton = wx.Button(self, label="Album ->") self.Bind(wx.EVT_BUTTON, self.forAlbum, self.forAlbumButton) self.navControlBox.Add(self.forAlbumButton, proportion=0, flag=wx.BOTTOM | wx.RIGHT | wx.TOP | wx.EXPAND, border=5) - self.rewSongButton = wx.Button(self, label='Song <-') + self.rewSongButton = wx.Button(self, label="Song <-") self.Bind(wx.EVT_BUTTON, self.rewSong, self.rewSongButton) self.navControlBox.Add(self.rewSongButton, proportion=0, flag=wx.BOTTOM | wx.RIGHT | wx.TOP | wx.EXPAND, border=5) - self.forSongButton = wx.Button(self, label='Song ->') + self.forSongButton = wx.Button(self, label="Song ->") self.Bind(wx.EVT_BUTTON, self.forSong, self.forSongButton) self.navControlBox.Add(self.forSongButton, proportion=0, flag=wx.BOTTOM | wx.RIGHT | wx.TOP | wx.EXPAND, border=5) - #self.previewButton = wx.Button(self, label='Preview') + #self.previewButton = wx.Button(self, label="Preview") #self.Bind(wx.EVT_BUTTON, self.preview, self.previewButton) #self.navControlBox.Add(self.previewButton, proportion=0, # flag=wx.BOTTOM | wx.RIGHT | wx.TOP | wx.EXPAND, border=5) - #self.stopButton = wx.Button(self, label='Stop') + #self.stopButton = wx.Button(self, label="Stop") #self.Bind(wx.EVT_BUTTON, self.stop, self.stopButton) #self.navControlBox.Add(self.stopButton, proportion=0, # flag=wx.BOTTOM | wx.RIGHT | wx.TOP | wx.EXPAND, border=5) def initMusicLists(self): - self.albumControlBox = ControlBox(self, label='Albums', orient=wx.VERTICAL) + self.albumControlBox = ControlBox(self, label="Albums", orient=wx.VERTICAL) self.albumListBox = wx.ListBox(self, choices=[], style=wx.LB_SINGLE) self.albumControlBox.Add(self.albumListBox, proportion=1, flag=wx.ALL | wx.EXPAND, border=5) - self.songControlBox = ControlBox(self, label='Songs', orient=wx.VERTICAL) + self.songControlBox = ControlBox(self, label="Songs", orient=wx.VERTICAL) self.songListBox = wx.ListBox(self, choices=[], style=wx.LB_SINGLE) self.songControlBox.Add(self.songListBox, proportion=1, flag=wx.ALL | wx.EXPAND, border=5) @@ -114,14 +115,14 @@ def setCWD(self, cwd): def updateAlbums(self): files, filesPath = _filesAndPaths(self.cwd) - self.albumList = [f for f,fp in zip(files,filesPath) + self.albumList = [f for f, fp in zip(files, filesPath) if os.path.isdir(fp)] self.albumList.sort() self.albumListBox.Clear() - if len(self.albumList) > 0: + if self.albumList: self.albumListBox.AppendItems(self.albumList) self.albumListBox.SetSelection(0) self.albumListBox.EnsureVisible(0) @@ -132,27 +133,27 @@ def updateSongs(self): files, filesPath = _filesAndPaths(albumPath) - self.songList = [f for f,fp in zip(files,filesPath) - if os.path.isfile(fp) and fp.endswith('.flac')] + self.songList = [f for f, fp in zip(files, filesPath) + if os.path.isfile(fp) and fp.endswith(".flac")] self.songList.sort() self.songListBox.Clear() - if len(self.songList) > 0: + if self.songList: self.songListBox.AppendItems(self.songList) self.songListBox.SetSelection(0) self.albumListBox.EnsureVisible(0) def loadAndPlay(self): - #self.mediaCtrl.Load('/home/idfah/tests/python/wx/reggae.wav') + #self.mediaCtrl.Load("/home/idfah/tests/python/wx/reggae.wav") song = self.songList[self.songListBox.GetSelection()] album = self.albumList[self.albumListBox.GetSelection()] songPath = self.cwd + os.path.sep + album + os.path.sep + song status = self.mediaCtrl.Load(songPath) if not status: - raise RuntimeError('Failed to load song %s.' % str(songPath)) + raise RuntimeError("Failed to load song %s." % str(songPath)) self.mediaCtrl.ShowPlayerControls() self.mediaCtrl.Play() diff --git a/cebl/rt/widgets/textstim.py b/cebl/rt/widgets/textstim.py index 9de823b..20f7ddc 100644 --- a/cebl/rt/widgets/textstim.py +++ b/cebl/rt/widgets/textstim.py @@ -1,13 +1,7 @@ """Text-stimulus widgets that present text stimuli to the screen. """ - -import numpy as np -import random -import string import wx -from cebl import util - from .wxgraphics import DrawablePanel @@ -96,7 +90,7 @@ def setStimText(self, stimText=None, refresh=True): If None (default) the stimulus text will not be drawn. """ - self.stimText = stimText + self.stimText = stimText if refresh: self.refresh() @@ -145,7 +139,7 @@ def setFeedText(self, feedText=None, refresh=True): If None (default) the feedback message will not be drawn. """ - self.feedText = feedText + self.feedText = feedText if refresh: self.refresh() @@ -240,7 +234,7 @@ def drawFeed(self, dc): dc.SetFont(self.feedFont) trimmedFeedText = \ - self.feedText[max(len(self.feedText)-self.feedLength,0):] + self.feedText[max(len(self.feedText)-self.feedLength, 0):] dc.DrawText(trimmedFeedText, 10, 5) From 3f39f7cab6163b72a3e0590dbce3d26d1a5e713a Mon Sep 17 00:00:00 2001 From: Elliott Forney Date: Sat, 2 Mar 2019 23:51:24 -0700 Subject: [PATCH 5/9] style: linting utils --- cebl/rt/pages/mentaltasks.py | 9 ++- cebl/rt/pages/motorpong.py | 9 ++- cebl/rt/pages/power.py | 8 ++- cebl/rt/pages/specgram.py | 11 ++-- cebl/rt/widgets/gridspeller.py | 33 +++++----- cebl/util/__init__.py | 15 ++--- cebl/util/arr.py | 106 +++++++++++++++++++++------------ cebl/util/attr.py | 4 ++ cebl/util/clsm.py | 32 +++++++--- cebl/util/comp.py | 38 ++++++++++-- cebl/util/crc.py | 44 -------------- cebl/util/ds.py | 8 --- cebl/util/errm.py | 2 + cebl/util/pack.py | 16 ++--- cebl/util/shuffle.py | 13 ++-- cebl/util/stats.py | 44 +++++++++++++- requirements.txt | 1 + 17 files changed, 234 insertions(+), 159 deletions(-) delete mode 100644 cebl/util/crc.py delete mode 100644 cebl/util/ds.py diff --git a/cebl/rt/pages/mentaltasks.py b/cebl/rt/pages/mentaltasks.py index aa40a59..3032b48 100644 --- a/cebl/rt/pages/mentaltasks.py +++ b/cebl/rt/pages/mentaltasks.py @@ -1,8 +1,11 @@ import copy -import numpy as np + import matplotlib.pyplot as plt from matplotlib.backends.backend_wxagg \ import FigureCanvasWxAgg as FigureCanvas +import munch +import numpy as np + import wx from wx.lib.agw import aui import wx.lib.agw.floatspin as agwfs @@ -487,7 +490,7 @@ def initConfig(self): #self.choices = ['Song', 'Right', 'Count', 'Left'] self.choices = ['Song', 'Right', 'Cube', 'Left'] - self.welchConfig = util.Holder( + self.welchConfig = munch.Munch( classifierKind = 'Linear Discrim', span = 0.2, logTrans = True, @@ -497,7 +500,7 @@ def initConfig(self): ) # autoregression config - self.autoregConfig = util.Holder( + self.autoregConfig = munch.Munch( horizon = 1 ) diff --git a/cebl/rt/pages/motorpong.py b/cebl/rt/pages/motorpong.py index 65003f9..d22906b 100644 --- a/cebl/rt/pages/motorpong.py +++ b/cebl/rt/pages/motorpong.py @@ -1,8 +1,11 @@ import copy -import numpy as np + import matplotlib.pyplot as plt from matplotlib.backends.backend_wxagg \ import FigureCanvasWxAgg as FigureCanvas +import munch +import numpy as np + import wx from wx.lib.agw import aui import wx.lib.agw.floatspin as agwfs @@ -546,7 +549,7 @@ def initConfig(self): self.choices = ['Left', 'Right'] - self.welchConfig = util.Holder( + self.welchConfig = munch.Munch( classifierKind = 'Linear Discrim', span = 0.5, logTrans = True, @@ -556,7 +559,7 @@ def initConfig(self): ) # autoregression config - self.autoregConfig = util.Holder( + self.autoregConfig = munch.Munch( horizon = 1 ) diff --git a/cebl/rt/pages/power.py b/cebl/rt/pages/power.py index b08396a..e34245e 100644 --- a/cebl/rt/pages/power.py +++ b/cebl/rt/pages/power.py @@ -1,3 +1,5 @@ +import munch + import wx from wx.lib.agw import aui @@ -329,11 +331,11 @@ def initConfig(self): self.method = 'FFT+Welch' - self.welchConfig = util.Holder( - span=1.0 # width of spans/sub-windows used in Welch's method + self.welchConfig = munch.Munch( + span=1.0 # width of spans/sub-windows used in Welch's method ) - self.autoregConfig = util.Holder( + self.autoregConfig = munch.Munch( order=20, nFreq=150 ) diff --git a/cebl/rt/pages/specgram.py b/cebl/rt/pages/specgram.py index b556c7a..7d81205 100644 --- a/cebl/rt/pages/specgram.py +++ b/cebl/rt/pages/specgram.py @@ -1,11 +1,14 @@ +import os +import time + import matplotlib.pyplot as plt from matplotlib.backends.backend_wxagg \ import FigureCanvasWxAgg as FigureCanvas from matplotlib.colors import LogNorm as pltLogNorm from matplotlib.colors import Normalize as pltLinNorm +import munch import numpy as np -import os -import time + import wx from wx.lib.agw import aui @@ -388,12 +391,12 @@ def initConfig(self): self.setRefreshDelay(200) - self.waveletConfig = util.Holder( + self.waveletConfig = munch.Munch( nFreq = 100, span = 10 ) - self.fourierConfig = util.Holder() + self.fourierConfig = munch.Munch() def initCanvas(self): """Initialize a new matplotlib canvas, figure and axis. diff --git a/cebl/rt/widgets/gridspeller.py b/cebl/rt/widgets/gridspeller.py index 6f850ef..9202216 100644 --- a/cebl/rt/widgets/gridspeller.py +++ b/cebl/rt/widgets/gridspeller.py @@ -1,7 +1,9 @@ """Text-stimulus widgets that present text stimuli to the screen. """ +import munch import numpy as np + import wx from cebl import util @@ -9,22 +11,21 @@ from .wxgraphics import DrawablePanel -grid = util.Holder() - -grid.enter = '\u21B5' -grid.back = '\u232B' -grid.space = '__' -grid.upper = '\u21E7' -grid.lower = '\u21E9' -grid.ellip = '\u2026' -grid.left = '\u2190' -grid.right = '\u2192' -grid.up = '\u2191' -grid.down = '\u2193' - -grid.num = '123' -grid.etc = 'Etc' -grid.sym = 'Sym' +grid = munch.Munch( + enter = '\u21B5', + back = '\u232B', + space = '__', + upper = '\u21E7', + lower = '\u21E9', + ellip = '\u2026', + left = '\u2190', + right = '\u2192', + up = '\u2191', + down = '\u2193', + num = '123', + etc = 'Etc', + sym = 'Sym' +) grid.normal, grid.highlighted, grid.unhighlighted, grid.selected = range(4) diff --git a/cebl/util/__init__.py b/cebl/util/__init__.py index 3068058..b52738d 100644 --- a/cebl/util/__init__.py +++ b/cebl/util/__init__.py @@ -3,13 +3,17 @@ import sys +#if sys.platform.startswith('linux'): +# from .fasttanh import tanh +#else: +# from numpy import tanh +from numpy import tanh + from .arr import * from .attr import * from .cache import * from .clsm import * from .comp import * -from .crc import * -from .ds import * from .embed import * from .errm import * #from .fasttanh import * @@ -19,10 +23,3 @@ from .parallel import * from .shuffle import * from .stats import * - -#if sys.platform.startswith('linux'): -# from .fasttanh import tanh -#else: -# from numpy import tanh -from numpy import tanh - diff --git a/cebl/util/arr.py b/cebl/util/arr.py index dee9daf..4ae44c4 100644 --- a/cebl/util/arr.py +++ b/cebl/util/arr.py @@ -30,8 +30,8 @@ def accum(x, n, accumf=np.sum, truncate=True, axis=None): The accumulated matrix. Examples: - >>> x = np.reshape((1,)*(4*3*3), (4, 3, 3)) - >>> x + > x = np.reshape((1,)*(4*3*3), (4, 3, 3)) + > x array([[[1, 1, 1], [1, 1, 1], [1, 1, 1]], @@ -47,9 +47,9 @@ def accum(x, n, accumf=np.sum, truncate=True, axis=None): [[1, 1, 1], [1, 1, 1], [1, 1, 1]]]) - >>> accum(x, 2, axis=None) + > accum(x, 2, axis=None) array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) - >>> accum(x, 2, axis=0) + > accum(x, 2, axis=0) array([[[2, 2, 2], [2, 2, 2], [2, 2, 2]], @@ -57,7 +57,7 @@ def accum(x, n, accumf=np.sum, truncate=True, axis=None): [[2, 2, 2], [2, 2, 2], [2, 2, 2]]]) - >>> accum(x, 3, axis=1) + > accum(x, 3, axis=1) array([[[3, 3, 3]], [[3, 3, 3]], @@ -133,14 +133,14 @@ def bias(x, value=1, axis=-1): A new matrix with value appended. Examples: - >>> import numpy as np - >>> from cebl import util + > import numpy as np + > from cebl import util - >>> a = np.arange(10) - >>> util.bias(a, axis=0) + > a = np.arange(10) + > util.bias(a, axis=0) array([[ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], [ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]) - >>> util.bias(a, axis=1) + > util.bias(a, axis=1) array([[ 0., 1.], [ 1., 1.], [ 2., 1.], @@ -151,20 +151,20 @@ def bias(x, value=1, axis=-1): [ 7., 1.], [ 8., 1.], [ 9., 1.]]) - >>> util.bias(a, axis=None) + > util.bias(a, axis=None) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1]) - >>> a = np.random.random((3, 2)) - >>> util.bias(a, axis=0) + > a = np.random.random((3, 2)) + > util.bias(a, axis=0) array([[ 0.5734496 , 0.41789283], [ 0.15415034, 0.99381062], [ 0.80518692, 0.86804327], [ 1. , 1. ]]) - >>> util.bias(a, axis=1) + > util.bias(a, axis=1) array([[ 0.5734496 , 0.41789283, 1. ], [ 0.15415034, 0.99381062, 1. ], [ 0.80518692, 0.86804327, 1. ]]) - >>> util.bias(a, value=3) + > util.bias(a, value=3) array([[ 0.5734496 , 0.41789283, 3. ], [ 0.15415034, 0.99381062, 3. ], [ 0.80518692, 0.86804327, 3. ]]) @@ -201,6 +201,23 @@ def bias(x, value=1, axis=-1): return xb def capInf(x, copy=False): + """Cap np.inf to a very large value. + + Args: + x: A list or numpy array. + + copy: If False, numpy arrays will be + modified in place. If False or + if x is not a numpy array, then + x will be copied. + + Returns: + A numpy array with the same shape as x + with all np.inf and -np.inf values replaced + by the largest / smallest values that can + be represented by x's dtype, i.e., + np.finfo(x.dtype).min and np.finfo(x.dtype).max + """ x = np.array(x, copy=copy) mn = np.finfo(x.dtype).min @@ -217,23 +234,6 @@ def capInf(x, copy=False): return x -def capZero(x, copy=False): - """ - Notes: If copy is False and x is a numpy array, - then x is modified in place. - """ - x = np.array(x, copy=copy) - - tiny = np.finfo(x.dtype).tiny - - if x.ndim == 0: - if x < tiny: - x[...] = tiny - else: - x[x < tiny] = tiny - - return x - def colmat(x, dtype=None, copy=False): x = np.array(x, copy=copy) @@ -261,6 +261,36 @@ def colsep(x, scale=None, returnScale=False): else: return sep +def capZero(x, copy=False): + """Cap zero values at a very small number that + is larger than zero. + + Args: + x: A list or numpy array. + + copy: If False, numpy arrays will be + modified in place. If False or + if x is not a numpy array, then + x will be copied. + + Returns: + A numpy array with the same shape as x + with all zero values replaced by the + small number, greater than zero found + from np.finfo(x.dtype).tiny + """ + x = np.array(x, copy=copy) + + tiny = np.finfo(x.dtype).tiny + + if x.ndim == 0: + if x < tiny: + x[...] = tiny + else: + x[x < tiny] = tiny + + return x + def hashArray(x): return hashlib.sha1(np.ascontiguousarray(x).view(np.uint8)).hexdigest() @@ -300,6 +330,12 @@ def punion1d(probs): else: return np.apply_along_axis(func1d=punion1d, axis=axis, arr=probs) +def segdot(x1, x2): + assert x1.ndim == 3 + assert x2.ndim == 2 + + return x1.reshape((-1, x1.shape[-1])).dot(x2).reshape((x1.shape[0], -1, x2.shape[-1])) + def segmat(xs, dtype=None, copy=False): xs = np.array(xs, copy=copy) @@ -314,12 +350,6 @@ def segmat(xs, dtype=None, copy=False): return xs -def segdot(x1, x2): - assert x1.ndim == 3 - assert x2.ndim == 2 - - return x1.reshape((-1, x1.shape[-1])).dot(x2).reshape((x1.shape[0], -1, x2.shape[-1])) - def softmaxM1(x): mx = np.max((np.max(x), 0.0)) emx = capZero(np.exp(-mx)) diff --git a/cebl/util/attr.py b/cebl/util/attr.py index 1736063..d572403 100644 --- a/cebl/util/attr.py +++ b/cebl/util/attr.py @@ -1,5 +1,9 @@ +"""Tools for working with iterators. +""" import collections def isiterable(x): + """Return True if we can iterate over x, otherwise False. + """ return isinstance(x, collections.Iterable) diff --git a/cebl/util/clsm.py b/cebl/util/clsm.py index 9725a82..323ae55 100644 --- a/cebl/util/clsm.py +++ b/cebl/util/clsm.py @@ -7,6 +7,8 @@ def roc(classProbs): + """Receiver operating characteristic. + """ if len(classProbs) > 2: raise RuntimeError('roc is only valid for two-class problems.') @@ -115,22 +117,22 @@ def confusion(classLabels, normalize=True): Otherwise, each cell is a label count. Examples: - >>> from cebl import util - >>> import numpy as np + > from cebl import util + > import numpy as np - >>> a = [[0, 0, 0, 1], [1, 1, 1, 1, 1, 0], [2, 2]] + > a = [[0, 0, 0, 1], [1, 1, 1, 1, 1, 0], [2, 2]] - >>> con = util.confusion(a) + > con = util.confusion(a) - >>> con + > con array([[ 0.75 , 0.16666667, 0. ], [ 0.25 , 0.83333333, 0. ], [ 0. , 0. , 1. ]]) - >>> np.sum(con, axis=0) + > np.sum(con, axis=0) array([ 1., 1., 1.]) - >>> util.confusion(a, normalize=False) + > util.confusion(a, normalize=False) array([[ 3., 1., 0.], [ 1., 5., 0.], [ 0., 0., 2.]]) @@ -149,6 +151,20 @@ def confusion(classLabels, normalize=True): return confMat def itrSimple(accuracy, nCls, decisionRate): + """Compute information transfer rate in bits per minute using + accuracy and number of classes instead of class labels. + + Args: + accuracy: Fraction of correctly labeled examples. + + nCls: Number of classes total. + + decisionRate: Scalar rate at which labels are assigned + in decisions per minute. + + Returns: + Scalar information transfer rate in bits per minute. + """ if accuracy < 0.0 or np.isclose(accuracy, 0.0): return 0.0 @@ -161,7 +177,7 @@ def itrSimple(accuracy, nCls, decisionRate): return decisionRate * (left + middle + right) def itr(classLabels, decisionRate=60.0): - """Information transfer rate in bits-per-minute + """Information transfer rate in bits per minute. Args: classLabels: A list with length equal to the number of classes diff --git a/cebl/util/comp.py b/cebl/util/comp.py index 3921181..c636ce5 100644 --- a/cebl/util/comp.py +++ b/cebl/util/comp.py @@ -1,3 +1,5 @@ +"""Handle file compression easily. +""" import bz2 import gzip import lzma @@ -5,14 +7,40 @@ compressedExtensions = ('xz', 'bz2', 'gz') -def openCompressedFile(fileName, mode='rb', *args, **kwargs): +def openCompressedFile(fileName, mode='rb', **kwargs): + """Open a compressed file using an algorithm derived + from its file extension. + + Args: + fileName: The name of the file to open. + + mode: The mode to use when opening the file. + See the documentation for the standard + open function. + + **kwargs: Additional arguments to pass to the + library used for opening. Generally, + these arguments are the same as in the + standard open function. + + Returns: + A handle to the decompressed file stream. + + Notes: + The following compression methods are suppored: + xz, bz2, gz + + No decompression will be used, and the open function + will be used if the file does not end in one of + these file extensions. + """ fileNameLower = fileName.lower() if fileNameLower.endswith('.xz'): - return lzma.open(fileName, mode, *args, **kwargs) + return lzma.open(fileName, mode, **kwargs) elif fileNameLower.endswith('.bz2'): - return bz2.open(fileName, mode, *args, **kwargs) + return bz2.open(fileName, mode, **kwargs) elif fileNameLower.endswith('.gz'): - return gzip.open(fileName, mode, *args, **kwargs) + return gzip.open(fileName, mode, **kwargs) else: - return open(fileName, mode, *args, **kwargs) + return open(fileName, mode, **kwargs) diff --git a/cebl/util/crc.py b/cebl/util/crc.py deleted file mode 100644 index 66f7af0..0000000 --- a/cebl/util/crc.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Compute CRC checksums. - - Refs: - Based on code provided by stack overflow: - http://stackoverflow.com/questions/25239423/crc-ccitt-16-bit-python-manual-calculation -""" - - -POLYNOMIAL = 0x1021 -PRESET = 0 - -def _initial(c): - crc = 0 - c = c << 8 - for j in range(8): - if (crc ^ c) & 0x8000: - crc = (crc << 1) ^ POLYNOMIAL - else: - crc = crc << 1 - c = c << 1 - return crc - -tab = [ _initial(i) for i in range(256) ] - -def _update_crc(crc, c): - cc = 0xff & c - - tmp = (crc >> 8) ^ cc - crc = (crc << 8) ^ tab[tmp & 0xff] - crc = crc & 0xffff - - return crc - -def crc(str): - crc = PRESET - for c in str: - crc = _update_crc(crc, ord(c)) - return crc - -def crcb(*i): - crc = PRESET - for c in i: - crc = _update_crc(crc, c) - return crc diff --git a/cebl/util/ds.py b/cebl/util/ds.py deleted file mode 100644 index 482e861..0000000 --- a/cebl/util/ds.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Data structures. -""" - -#XXX: this could be done with munch -class Holder: - def __init__(self, **kwargs): - for name, value in kwargs.items(): - setattr(self, name, value) diff --git a/cebl/util/errm.py b/cebl/util/errm.py index b6fbd20..b7a321a 100644 --- a/cebl/util/errm.py +++ b/cebl/util/errm.py @@ -54,6 +54,8 @@ def nrmse(y, g=0.0, mn=None, mx=None, axis=None): return rmse(r, axis=axis)/(mx-mn) def gini(y, g, normalize=True): + """Gini coefficient. + """ if y.ndim > 1 or g.ndim > 1: raise RuntimeError('Gini does not currently support more than one axis.') diff --git a/cebl/util/pack.py b/cebl/util/pack.py index 17bbdd4..054f8e6 100644 --- a/cebl/util/pack.py +++ b/cebl/util/pack.py @@ -80,18 +80,18 @@ def packedViews(shapes, dtype=np.float): this approach to work. Examples: - >>> import numpy as np - >>> from cebl import util + > import numpy as np + > from cebl import util - >>> packed, view1, view2 = util.packedViews(((2,2), (2,3,4))) + > packed, view1, view2 = util.packedViews(((2,2), (2,3,4))) - >>> view1[...] = np.arange(view1.size).reshape(view1.shape) - >>> view1 + > view1[...] = np.arange(view1.size).reshape(view1.shape) + > view1 array([[ 0., 1.], [ 2., 3.]]) - >>> view2[...] = 7.0 - >>> view2 + > view2[...] = 7.0 + > view2 array([[[ 7., 7., 7., 7.], [ 7., 7., 7., 7.], [ 7., 7., 7., 7.]], @@ -100,7 +100,7 @@ def packedViews(shapes, dtype=np.float): [ 7., 7., 7., 7.], [ 7., 7., 7., 7.]]]) - >>> packed + > packed array([ 0., 1., 2., 3., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7., 7.]) diff --git a/cebl/util/shuffle.py b/cebl/util/shuffle.py index 12339e5..1df62c2 100644 --- a/cebl/util/shuffle.py +++ b/cebl/util/shuffle.py @@ -29,7 +29,7 @@ def blockShuffle(x, n, axis=None): shp = x.shape - indx = np.reshape(range(x.size), (x.size//n,n)) + indx = np.reshape(range(x.size), (x.size//n, n)) np.random.shuffle(indx) indx = indx.reshape((-1,)) @@ -41,7 +41,7 @@ def blockShuffle(x, n, axis=None): (axis, x.shape[axis], n)) lax = x.shape[axis] - indx = np.reshape(range(lax), (lax//n,n)) + indx = np.reshape(range(lax), (lax//n, n)) np.random.shuffle(indx) indx = indx.reshape((-1,)) @@ -62,16 +62,15 @@ def cycle(x, n): New list with length n. Examples: - >>> import util + > import util - >>> x = range(4) + > x = range(4) - >>> util.cycle(x, 3) + > util.cycle(x, 3) [0, 1, 2] - >>> util.cycle(x, 6) + > util.cycle(x, 6) [0, 1, 2, 3, 0, 1] """ l = int(np.ceil(n/float(len(x)))) return (x*l)[0:n] - diff --git a/cebl/util/stats.py b/cebl/util/stats.py index 4261de3..aedabab 100644 --- a/cebl/util/stats.py +++ b/cebl/util/stats.py @@ -5,9 +5,47 @@ import scipy.stats as spstats -def conf(x, width=0.95, axis=None): +def tconf(x, width=0.95, axis=None): + """Compute confidence intervals of the student's t distribution. + + Args: + x: A list or numpy array of floats of + observed values. + + width: The width of the confidence interval. + + axis: The axis along which to compute the + confidence intervales. + + Returns: + If axis is None, a tuple containing the (lower, upper) + confidence intervals. If axis is not None, then a matrix + containing the pairs of lower and upper confidence intervals + is returned. + + Examples: + > x = np.random.normal(size=1000, loc=2.2, scale=2.0) + + > util.conf(x) + (2.1420429487795936, 2.3901103741951317) + + > util.conf(x, width=0.9, axis=1) + array([[ 1.7757, 2.8146], + [ 1.3863, 2.4277], + [ 1.7898, 2.8078], + [ 1.7131, 2.768 ], + [ 1.5982, 2.6495], + [ 1.6322, 2.6527], + [ 1.0066, 2.0738], + [ 1.6856, 2.726 ], + [ 1.3983, 2.4251], + [ 1.5353, 2.5691]]) + + """ + x = np.array(x, copy=False) + def conf1(v): - return spstats.t.interval(width, len(v)-1, - loc=np.mean(v), scale=spstats.sem(v)) + return spstats.t.interval( + width, len(v)-1, loc=np.mean(v), scale=spstats.sem(v)) return conf1(x.ravel()) if axis is None else np.apply_along_axis(conf1, axis, x) diff --git a/requirements.txt b/requirements.txt index 7cac954..5d92924 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ matplotlib +munch numpy pylibftdi scipy From 1a66bbd0a666fc8b76fc0410592fcd6673eeaf91 Mon Sep 17 00:00:00 2001 From: Elliott Forney Date: Sun, 3 Mar 2019 01:18:02 -0700 Subject: [PATCH 6/9] style: pylinting sig --- cebl/sig/psd.py | 8 +++-- cebl/sig/resamp.py | 2 ++ cebl/sig/smooth.py | 64 +++++++++++++++++++++++++++++++++++--- cebl/sig/spatial.py | 2 ++ cebl/sig/specgram.py | 15 ++++----- cebl/sig/stat.py | 14 +++++++++ cebl/sig/windows.py | 73 +++++++++++++++++++++++++++++++++++++++++--- 7 files changed, 160 insertions(+), 18 deletions(-) diff --git a/cebl/sig/psd.py b/cebl/sig/psd.py index 87a61e7..9ae1734 100644 --- a/cebl/sig/psd.py +++ b/cebl/sig/psd.py @@ -109,9 +109,11 @@ def __init__(self, s, sampRate=1.0, span=3.0, overlap=0.5, windowFunc=windows.ha Refs: @article{heinzel2002spectrum, - title={Spectrum and spectral density estimation by the Discrete Fourier transform (DFT), - including a comprehensive list of window functions and some new flat-top windows}, - author={Heinzel, G. and R{\"u}diger, A. and Schilling, R. and Hannover, T.}, + title={Spectrum and spectral density estimation by the Discrete + Fourier transform (DFT), including a comprehensive list of + window functions and some new flat-top windows}, + author={Heinzel, G. and R{\"u}diger, A. and Schilling, + R. and Hannover, T.}, journal={Max Plank Institute}, year={2002} } diff --git a/cebl/sig/resamp.py b/cebl/sig/resamp.py index e2d4c76..348fdb1 100644 --- a/cebl/sig/resamp.py +++ b/cebl/sig/resamp.py @@ -1,3 +1,5 @@ +"""Signal temporal resampling. +""" import matplotlib.pyplot as plt import numpy as np import scipy.signal as spsig diff --git a/cebl/sig/smooth.py b/cebl/sig/smooth.py index 22b2df9..aa6e36e 100644 --- a/cebl/sig/smooth.py +++ b/cebl/sig/smooth.py @@ -1,3 +1,5 @@ +"""Signal temporal smoothing. +""" import matplotlib.pyplot as plt import numpy as np import scipy.signal as spsig @@ -8,6 +10,25 @@ def movingAverage(s, width=2, kernelFunc=windows.boxcar, **kwargs): + """Moving average filter. + + Args: + s: A list or numpy array of shape (nObs, nDim) + containing the values of the signal to filter. + + width: The width of the filter kernel, i.e., the + number of taps. + + kernelFunc: A callable function that takes width as an + argument and generates the desired kernel + window function. Defaults to windows.boxcar. + + **kwargs: Additional arguments to pass to kernelFunc. + + Returns: + A numpy array of shape (nObs, nDim) containing the + filtered version of s. + """ s = util.colmat(s) kernel = kernelFunc(width, **kwargs) @@ -19,16 +40,51 @@ def movingAverage(s, width=2, kernelFunc=windows.boxcar, **kwargs): v=kernel, mode='same') def savitzkyGolay(s, *args, **kwargs): + """Savitzky Golay filter. + This is simply a multi-channel wrapper around + scipy.signal.savgol_filter. + + Args: + s: A list or numpy array of shape (nObs, nDim) + containing the values of the signal to filter. + + *args: Additional arguments to pass to the + **kwargs: scipy.signal.savgol_filter function. + + Returns: + A numpy array of shape (nObs, nDim) containing the + filtered version of s. + """ return spsig.savgol_filter(s, *args, axis=0, **kwargs) -def wiener(s, size=None, noise=None): - # have to add astype because spsig.wiener does not preserve dtype of argument, bug that should be reported XXX - idfah +def wiener(s, size=None, **kwargs): + """Wiener Filter. + This is largely a multi-channel wrapper around + scipy.signal.wiener. + + Args: + s: A list or numpy array of shape (nObs, nDim) + containing the values of the signal to filter. + + size: The size of the wiener filter window. + + **kwargs: Additional arguments to pass to the + scipy.signal.wiener function. + + Returns: + A numpy array of shape (nObs, nDim) containing the + filtered version of s. + """ + # have to add astype because spsig.wiener does not preserve dtype + # bug that should be reported XXX - idfah return np.apply_along_axis( spsig.wiener, axis=0, arr=s, - mysize=size, noise=noise).astype(s.dtype, copy=False) + mysize=size, **kwargs).astype(s.dtype, copy=False) def demoSmooth(): + """Demonstrations of the smoothing functions in this module. + """ n = 1024 x = np.linspace(-10.0*np.pi, 10.0*np.pi, n) @@ -50,7 +106,7 @@ def demoSmooth(): wnSize = 9 wnNoise = 0.5 - yWN = wiener(y, wnSize, wnNoise) + yWN = wiener(y, wnSize, noise=wnNoise) sep = -np.arange(0, 3)[None,:]*2.0 diff --git a/cebl/sig/spatial.py b/cebl/sig/spatial.py index 05360c9..ff659dc 100644 --- a/cebl/sig/spatial.py +++ b/cebl/sig/spatial.py @@ -1,3 +1,5 @@ +""" Spatial signal transformations, i.e., across channels. +""" import matplotlib.pyplot as plt import numpy as np import scipy.signal as spsig diff --git a/cebl/sig/specgram.py b/cebl/sig/specgram.py index ffd89e4..647007b 100644 --- a/cebl/sig/specgram.py +++ b/cebl/sig/specgram.py @@ -67,7 +67,7 @@ def getFreqsPowers(self): def getFreqsPowersPhases(self): return self.freqs, self.powers, self.phases - def plotPower(self, scale='log', chanNames=None, colorbar=True, axs=None, **kwargs): + def plotPower(self, scale='log', chanNames=None, colorbar=True, axs=None): if chanNames is None: chanNames = [str(i) for i in range(self.nChan)] @@ -110,7 +110,6 @@ def plotPower(self, scale='log', chanNames=None, colorbar=True, axs=None, **kwar else: ax = axs[i] - # should pass kwargs to imshow? XXX - idfah img = ax.imshow(powers[:,:,i].T, interpolation='bicubic', origin='lower', cmap=plt.cm.get_cmap('jet'), aspect='auto', norm=norm, extent=(self.times[0], self.times[-1], @@ -129,8 +128,8 @@ def plotPower(self, scale='log', chanNames=None, colorbar=True, axs=None, **kwar if colorbar: if newAxs: fig.tight_layout() - #cbar = fig.colorbar(imgs[-1], norm=norm, ax=axs, pad=0.025, fraction=0.06)#, anchor=(0.0, 0.1)) - cbar = fig.colorbar(imgs[-1], norm=norm, ax=axs, pad=0.025, fraction=0.1)#, anchor=(0.0, 0.1)) + cbar = fig.colorbar(imgs[-1], norm=norm, ax=axs, + pad=0.025, fraction=0.1)#, anchor=(0.0, 0.1)) else: cbar = axs[-1].colorbar(imgs[-1], norm=norm) cbar.set_label(zlabel) @@ -150,7 +149,8 @@ def __init__(self, s, sampRate=1.0, **kwargs): class FFTSpectrogram(SpectrogramBase): - def __init__(self, s, sampRate=1.0, span=0.1, overlap=0.5, windowFunc=windows.hanning, pad=False): + def __init__(self, s, sampRate=1.0, span=0.1, overlap=0.5, + windowFunc=windows.hanning, pad=False): s = util.colmat(s) nObs, nChan = s.shape @@ -209,7 +209,7 @@ def __init__(self, s, sampRate=1.0, span=0.1, overlap=0.5, windowFunc=windows.ha # wrapper around class constructors # pylint: disable=invalid-name -def Spectrogram(s, method='cwt', *args, **kwargs): +def Spectrogram(s, *args, method='cwt', **kwargs): method = method.lower() if method == 'cwt': return CWTSpectrogram(s, *args, **kwargs) @@ -230,7 +230,8 @@ def demoCWT(): t = np.linspace(0.0, sampRate*10.0, sampRate*10.0) s1 = np.sin(t*2.0*np.pi*20.0/float(sampRate)) - s2 = np.sin(t*2.0*np.pi*60.0/float(sampRate)) + np.random.normal(scale=0.02, size=t.size) + s2 = np.sin(t*2.0*np.pi*60.0/float(sampRate)) + \ + np.random.normal(scale=0.02, size=t.size) s3 = np.cumsum(np.random.normal(scale=0.05, size=t.size)) s = np.vstack((s1, s2, s3)).T diff --git a/cebl/sig/stat.py b/cebl/sig/stat.py index 02df2a7..097ea84 100644 --- a/cebl/sig/stat.py +++ b/cebl/sig/stat.py @@ -1,7 +1,21 @@ +"""Signal statistical measures. +""" import numpy as np def autoCorrelation(s): + """Compute the autocorrelations of s. + + Args: + s: A numpy array or list of floating point + values representing the signal. + + Returns: + A numpy array where the i'th values contains + the autocorrelation of the signal at i time lags. + """ + s = np.array(s, copy=False) + def ac1d(x): var = x.var() x = x - x.mean() diff --git a/cebl/sig/windows.py b/cebl/sig/windows.py index f9576cc..da95106 100644 --- a/cebl/sig/windows.py +++ b/cebl/sig/windows.py @@ -1,27 +1,83 @@ +"""Window functions. + +Note: + Includes all window functions from scipy.signal.windows +""" import numpy as np # include all window functions in scipy.signal.windows -# pylint: disable=unused-wildcard-import +# pylint: disable=unused-wildcard-import,wildcard-import from scipy.signal.windows import * def kroneckerDelta(n): + """Kronecker delta window. + + Args: + n: The width of the window. + + Returns: + A numpy array of width n containing + the values of the window. + """ taps = np.zeros(n) taps[n//2] = 1.0 return taps def lanczos(n, radius=3): + """Lanczos window. + + Args: + n: The width of the window. + + radius: Radius of the Lanczos function, + i.e., the width of the central + lobe of the sinc function. + + Returns: + A numpy array of width n containing + the values of the window. + """ taps = np.linspace(-radius, radius, n) return np.sinc(taps/radius) -def ramp(n): +def ramp(n, corner1=None, corner2=None): + """Ramp window. Linearly increases until + corner1, then flat until corner2, then linearly + decreases until the end of the window. + + Args: + n: The width of the window. + + corner1: Number of time steps after which + the window levels off. If None + (default) then will be floor(n/3). + + corner2: Number of time steps after which + the window begins to decrease. + If None (default) then will be + ceil(n*2/3). + + Note: + Raises a RuntimeError if corner1 > corner2. + + Returns: + A numpy array of width n containing + the values of the window. + """ if n < 3: return np.zeros(n) window = np.arange(n)*1.0 - corner1 = int(np.floor(n/3.0)) - corner2 = int(np.ceil(n*2/3.0)) + if corner1 is None: + corner1 = int(np.floor(n/3.0)) + + if corner2 is None: + corner2 = int(np.ceil(n*2/3.0)) + + if corner1 > corner2: + raise RuntimeError("Invalid ramp window corners.") up = window[:corner1] top = window[corner1:corner2] @@ -34,5 +90,14 @@ def ramp(n): return window def sinc(n, radius=3, freq=1.0): + """Sinc window. + + Args: + n: The width of the window. + + Returns: + A numpy array of width n containing + the values of the window. + """ taps = np.linspace(-radius, radius, n) return freq * np.sinc(freq*taps) From 42f2df857f08f6d8f59f057709786fb4878729f0 Mon Sep 17 00:00:00 2001 From: Elliott Forney Date: Thu, 7 Mar 2019 11:53:04 -0700 Subject: [PATCH 7/9] style: linting in ml --- .pylintrc | 4 +- cebl/ml/autoreg.py | 92 +++++++------- cebl/ml/classifier.py | 84 ++++++++++--- cebl/ml/da.py | 125 ++++++++++--------- cebl/ml/knn.py | 4 +- cebl/ml/linreg.py | 6 +- cebl/ml/nnet/esn.py | 8 ++ cebl/ml/nnet/forward.py | 36 +++--- cebl/ml/nnet/softmax.py | 7 +- cebl/ml/optim/__init__.py | 1 - cebl/ml/optim/minibatch.py | 16 +-- cebl/ml/optim/optable.py | 6 +- cebl/ml/optim/pso.py | 36 +++--- cebl/ml/optim/rprop.py | 238 ++++++------------------------------- cebl/ml/optim/sciopt.py | 29 +++-- cebl/ml/optim/sgd.py | 20 ++-- cebl/ml/optim/steepest.py | 36 +++--- cebl/ml/strans/__init__.py | 3 +- cebl/ml/strans/pca.py | 28 ++++- cebl/ml/strans/strans.py | 4 +- cebl/sig/cwt.pyx | 62 +++++----- cebl/util/clsm.py | 3 +- requirements.txt | 14 +-- 23 files changed, 403 insertions(+), 459 deletions(-) diff --git a/.pylintrc b/.pylintrc index adcda84..2e813e7 100644 --- a/.pylintrc +++ b/.pylintrc @@ -508,10 +508,10 @@ max-attributes=20 max-bool-expr=5 # Maximum number of branch for function / method body -max-branches=12 +max-branches=20 # Maximum number of locals for function / method body -max-locals=35 +max-locals=40 # Maximum number of parents for a class (see R0901). max-parents=7 diff --git a/cebl/ml/autoreg.py b/cebl/ml/autoreg.py index 7b112f3..ce5090a 100644 --- a/cebl/ml/autoreg.py +++ b/cebl/ml/autoreg.py @@ -13,7 +13,7 @@ class AutoRegressionBase: def __init__(self, ss, horizon, regClass, *args, **kwargs): """ Args: - ss: (nSeg,nObs[,nDim]) + ss: (nSeg, nObs, [nDim]) """ ss = np.asarray(ss) self.horizon = horizon @@ -30,40 +30,40 @@ def getTargets(self, ss): def train(self, ss, *args, **kwargs): raise NotImplementedError('train not implemented.') - def eval(self, ss, returnResid=False, *args, **kwargs): + def eval(self, ss, *args, returnResid=False, **kwargs): raise NotImplementedError('eval not implemented.') def resid(self, ss, *args, **kwargs): pred, resid = self.eval(ss, *args, returnResid=True, **kwargs) return resid - def abe(self, ss, axis=None, *args, **kwargs): + def abe(self, ss, *args, axis=None, **kwargs): resid = self.resid(ss, *args, **kwargs) return util.abe(resid, axis=axis) - def sse(self, ss, axis=None, *args, **kwargs): + def sse(self, ss, *args, axis=None, **kwargs): resid = self.resid(ss, *args, **kwargs) return util.sse(resid, axis=axis) - def mse(self, ss, axis=None, *args, **kwargs): + def mse(self, ss, *args, axis=None, **kwargs): resid = self.resid(ss, *args, **kwargs) return util.mse(resid, axis=axis) - def rmse(self, ss, axis=None, *args, **kwargs): + def rmse(self, ss, *args, axis=None, **kwargs): resid = self.resid(ss, *args, **kwargs) return util.rmse(resid, axis=axis) - def nrmse(self, ss, axis=None, *args, **kwargs): + def nrmse(self, ss, *args, axis=None, **kwargs): resid = self.resid(ss, *args, **kwargs) return util.nrmse(resid, axis=axis) class AutoRegression(AutoRegressionBase): - def __init__(self, ss, order, horizon=1, regClass=RidgeRegression, *args, **kwargs): + def __init__(self, ss, order, horizon=1, regClass=RidgeRegression, **kwargs): self.order = order AutoRegressionBase.__init__(self, ss, horizon=horizon, - regClass=regClass, *args, **kwargs) + regClass=regClass, **kwargs) def getInputs(self, ss): ss = util.segmat(ss) @@ -82,7 +82,7 @@ def train(self, ss, *args, **kwargs): self.model = self.regClass(x, g, *args, **kwargs) - def eval(self, ss, returnResid=False, *args, **kwargs): + def eval(self, ss, *args, returnResid=False, **kwargs): xs = self.getInputs(ss) gs = self.getTargets(ss) @@ -98,7 +98,7 @@ class AR(AutoRegression): pass def demoAutoRegressionSine(): - time = np.linspace(0.0,10.0*np.pi,5000) + time = np.linspace(0.0, 10.0*np.pi, 5000) s = np.sin(time) #data = [s + np.random.normal(size=s.shape, scale=0.3) for i in range(5)] @@ -115,7 +115,7 @@ def demoAutoRegressionSine(): plt.plot(time[order:], pred[0], color='red') def demoAutoRegressionMulti(): - time = np.linspace(0.0,10.0*np.pi,5000) + time = np.linspace(0.0, 10.0*np.pi, 5000) # noisy cosine chirp s1 = np.cos(time**2/10.0) @@ -127,7 +127,7 @@ def demoAutoRegressionMulti(): n = len(time) s2 = np.empty(n) s2[0] = 0.1 - for i in range(1,n): + for i in range(1, n): s2[i] = np.exp(-a*s2[i-1]**2) + b s3 = np.random.normal(size=len(time), scale=0.3) @@ -153,9 +153,9 @@ def demoAutoRegressionMulti(): sepTrain = np.arange(dataTrain.shape[2])*2.0*np.max(np.abs(data)) sepTest = np.arange(dataTest.shape[2])*2.0*np.max(np.abs(data)) - fig = plt.figure(figsize=(19,8)) + fig = plt.figure(figsize=(19, 8)) - axTrainPred = fig.add_subplot(2,3,1) + axTrainPred = fig.add_subplot(2, 3, 1) axTrainPred.plot(timeTrain, dataTrain[0]-sepTrain, color='gray', linewidth=2) axTrainPred.plot(timeTrain[order:], predTrain[0]-sepTrain, linewidth=1) axTrainPred.autoscale(tight=True) @@ -164,7 +164,7 @@ def demoAutoRegressionMulti(): axTrainPred.set_yticks(-sepTrain) axTrainPred.set_yticklabels(['s1', 's2', 's3']) - axTestPred = fig.add_subplot(2,3,2) + axTestPred = fig.add_subplot(2, 3, 2) axTestPred.plot(timeTest, dataTest[0]-sepTest, color='gray', linewidth=2) axTestPred.plot(timeTest[order:], predTest[0]-sepTest, linewidth=1) axTestPred.autoscale(tight=True) @@ -173,7 +173,7 @@ def demoAutoRegressionMulti(): axTestPred.set_yticks(-sepTrain) axTestPred.set_yticklabels(['s1', 's2', 's3']) - axWeights = fig.add_subplot(2,3,3) + axWeights = fig.add_subplot(2, 3, 3) img = axWeights.imshow(arFit.model.weights, aspect='auto', interpolation='none') cbar = plt.colorbar(img) cbar.set_label('Weight') @@ -183,10 +183,10 @@ def demoAutoRegressionMulti(): axWeights.set_xticks(range(arFit.model.weights.shape[1])) axWeights.set_xticklabels(['s1', 's2', 's3']) axWeights.set_yticks(range(arFit.model.weights.shape[0])) - axWeights.set_yticklabels(list(range(1,arFit.model.weights.shape[0]) + ['bias'])) + axWeights.set_yticklabels(list(range(1, arFit.model.weights.shape[0]) + ['bias'])) axWeights.autoscale(tight=True) - axTrainResid = fig.add_subplot(2,3,4) + axTrainResid = fig.add_subplot(2, 3, 4) axTrainResid.plot(timeTrain[order:], residTrain[0]-sepTrain) axTrainResid.autoscale(tight=True) axTrainResid.set_title('Train Residuals') @@ -194,7 +194,7 @@ def demoAutoRegressionMulti(): axTrainResid.set_yticks(-sepTrain) axTrainResid.set_yticklabels(['s1', 's2', 's3']) - axTestResid = fig.add_subplot(2,3,5) + axTestResid = fig.add_subplot(2, 3, 5) axTestResid.plot(timeTest[order:], residTest[0]-sepTest) axTestResid.autoscale(tight=True) axTestResid.set_title('Test Residuals') @@ -202,7 +202,7 @@ def demoAutoRegressionMulti(): axTestResid.set_yticks(-sepTrain) axTestResid.set_yticklabels(['s1', 's2', 's3']) - axTestResidDist = fig.add_subplot(2,3,6) + axTestResidDist = fig.add_subplot(2, 3, 6) #axTestResidDist.hist(residTest, histtype='stepfilled', normed=True) axTestResidDist.hist(residTest[0], stacked=True, normed=True) axTestResidDist.legend(['s1', 's2', 's3']) @@ -229,7 +229,7 @@ def train(self, ss, *args, **kwargs): self.model.append(self.regClass(x, g, *args, **kwargs)) - def eval(self, ss, returnResid=False, *args, **kwargs): + def eval(self, ss, *args, returnResid=False, **kwargs): ss = util.segmat(ss) preds = [] @@ -245,10 +245,10 @@ def eval(self, ss, returnResid=False, *args, **kwargs): if returnResid: gi.append(gs.squeeze(2)) - preds = np.rollaxis(np.array(preds), 0,3) + preds = np.rollaxis(np.array(preds), 0, 3) if returnResid: - gs = np.rollaxis(np.array(gi), 0,3) + gs = np.rollaxis(np.array(gi), 0, 3) resids = gs - preds return preds, resids else: @@ -258,7 +258,7 @@ class UAR(UnivariateAutoRegression): pass def demoAutoRegressionUni(): - time = np.linspace(0.0,10.0*np.pi,5000) + time = np.linspace(0.0, 10.0*np.pi, 5000) # noisy cosine chirp s1 = np.cos(time**2/10.0) @@ -270,7 +270,7 @@ def demoAutoRegressionUni(): n = len(time) s2 = np.empty(n) s2[0] = 0.1 - for i in range(1,n): + for i in range(1, n): s2[i] = np.exp(-a*s2[i-1]**2) + b s3 = np.random.normal(size=len(time), scale=0.3) @@ -296,9 +296,9 @@ def demoAutoRegressionUni(): sepTrain = np.arange(dataTrain.shape[2])*2.0*np.max(np.abs(data)) sepTest = np.arange(dataTest.shape[2])*2.0*np.max(np.abs(data)) - fig = plt.figure(figsize=(19,8)) + fig = plt.figure(figsize=(19, 8)) - axTrainPred = fig.add_subplot(2,3,1) + axTrainPred = fig.add_subplot(2, 3, 1) axTrainPred.plot(timeTrain, dataTrain[0]-sepTrain, color='gray', linewidth=2) axTrainPred.plot(timeTrain[order:], predTrain[0]-sepTrain, linewidth=1) axTrainPred.autoscale(tight=True) @@ -307,7 +307,7 @@ def demoAutoRegressionUni(): axTrainPred.set_yticks(-sepTrain) axTrainPred.set_yticklabels(['s1', 's2', 's3']) - axTestPred = fig.add_subplot(2,3,2) + axTestPred = fig.add_subplot(2, 3, 2) axTestPred.plot(timeTest, dataTest[0]-sepTest, color='gray', linewidth=2) axTestPred.plot(timeTest[order:], predTest[0]-sepTest, linewidth=1) axTestPred.autoscale(tight=True) @@ -316,7 +316,7 @@ def demoAutoRegressionUni(): axTestPred.set_yticks(-sepTrain) axTestPred.set_yticklabels(['s1', 's2', 's3']) - axWeights = fig.add_subplot(2,3,3) + axWeights = fig.add_subplot(2, 3, 3) #img = axWeights.imshow(arFit.model.weights, aspect='auto', interpolation='none') #cbar = plt.colorbar(img) #cbar.set_label('Weight') @@ -326,10 +326,10 @@ def demoAutoRegressionUni(): #axWeights.set_xticks(range(arFit.model.weights.shape[1])) #axWeights.set_xticklabels(['s1', 's2', 's3']) #axWeights.set_yticks(range(arFit.model.weights.shape[0])) - #axWeights.set_yticklabels(list(range(1,arFit.model.weights.shape[0]) + ['bias'])) + #axWeights.set_yticklabels(list(range(1, arFit.model.weights.shape[0]) + ['bias'])) #axWeights.autoscale(tight=True) - axTrainResid = fig.add_subplot(2,3,4) + axTrainResid = fig.add_subplot(2, 3, 4) axTrainResid.plot(timeTrain[order:], residTrain[0]-sepTrain) axTrainResid.autoscale(tight=True) axTrainResid.set_title('Train Residuals') @@ -337,7 +337,7 @@ def demoAutoRegressionUni(): axTrainResid.set_yticks(-sepTrain) axTrainResid.set_yticklabels(['s1', 's2', 's3']) - axTestResid = fig.add_subplot(2,3,5) + axTestResid = fig.add_subplot(2, 3, 5) axTestResid.plot(timeTest[order:], residTest[0]-sepTest) axTestResid.autoscale(tight=True) axTestResid.set_title('Test Residuals') @@ -345,7 +345,7 @@ def demoAutoRegressionUni(): axTestResid.set_yticks(-sepTrain) axTestResid.set_yticklabels(['s1', 's2', 's3']) - axTestResidDist = fig.add_subplot(2,3,6) + axTestResidDist = fig.add_subplot(2, 3, 6) #axTestResidDist.hist(residTest, histtype='stepfilled', normed=True) axTestResidDist.hist(residTest[0], stacked=True, normed=True) axTestResidDist.legend(['s1', 's2', 's3']) @@ -357,10 +357,10 @@ def demoAutoRegressionUni(): class RecurrentAutoRegression(AutoRegressionBase): - def __init__(self, ss, horizon=1, transient=0, regClass=nnet.ESN, *args, **kwargs): + def __init__(self, ss, horizon=1, transient=0, regClass=nnet.ESN, **kwargs): self.transient = transient AutoRegressionBase.__init__(self, ss, horizon=horizon, - regClass=regClass, *args, **kwargs) + regClass=regClass, **kwargs) def getInputs(self, ss): ss = np.asarray(ss) @@ -376,7 +376,7 @@ def train(self, ss, *args, **kwargs): self.model = self.regClass(xs, gs, *args, **kwargs) - def eval(self, ss, returnResid=False, *args, **kwargs): + def eval(self, ss, *args, returnResid=False, **kwargs): xs = self.getInputs(ss) gs = self.getTargets(ss) @@ -392,7 +392,7 @@ class RAR(RecurrentAutoRegression): pass def demoRecurrentAutoRegression(): - time = np.linspace(0.0,10.0*np.pi,5000) + time = np.linspace(0.0, 10.0*np.pi, 5000) # noisy cosine chirp s1 = np.cos(time**2/10.0) @@ -404,7 +404,7 @@ def demoRecurrentAutoRegression(): n = len(time) s2 = np.empty(n) s2[0] = 0.1 - for i in range(1,n): + for i in range(1, n): s2[i] = np.exp(-a*s2[i-1]**2) + b s3 = np.random.normal(size=len(time), scale=0.3) @@ -430,9 +430,9 @@ def demoRecurrentAutoRegression(): sepTrain = np.arange(dataTrain.shape[2])*2.0*np.max(np.abs(data)) sepTest = np.arange(dataTest.shape[2])*2.0*np.max(np.abs(data)) - fig = plt.figure(figsize=(19,8)) + fig = plt.figure(figsize=(19, 8)) - axTrainPred = fig.add_subplot(2,3,1) + axTrainPred = fig.add_subplot(2, 3, 1) axTrainPred.plot(timeTrain, dataTrain[0]-sepTrain, color='gray', linewidth=2) axTrainPred.plot(timeTrain[horizon:], predTrain[0]-sepTrain, linewidth=1) axTrainPred.autoscale(tight=True) @@ -441,7 +441,7 @@ def demoRecurrentAutoRegression(): axTrainPred.set_yticks(-sepTrain) axTrainPred.set_yticklabels(['s1', 's2', 's3']) - axTestPred = fig.add_subplot(2,3,2) + axTestPred = fig.add_subplot(2, 3, 2) axTestPred.plot(timeTest, dataTest[0]-sepTest, color='gray', linewidth=2) axTestPred.plot(timeTest[horizon:], predTest[0]-sepTest, linewidth=1) axTestPred.autoscale(tight=True) @@ -450,10 +450,10 @@ def demoRecurrentAutoRegression(): axTestPred.set_yticks(-sepTrain) axTestPred.set_yticklabels(['s1', 's2', 's3']) - axWeights = fig.add_subplot(2,3,3) + axWeights = fig.add_subplot(2, 3, 3) rarFit.model.reservoir.plotActDensity(dataTest, ax=axWeights) - axTrainResid = fig.add_subplot(2,3,4) + axTrainResid = fig.add_subplot(2, 3, 4) axTrainResid.plot(timeTrain[horizon:], residTrain[0]-sepTrain) axTrainResid.autoscale(tight=True) axTrainResid.set_title('Train Residuals') @@ -461,7 +461,7 @@ def demoRecurrentAutoRegression(): axTrainResid.set_yticks(-sepTrain) axTrainResid.set_yticklabels(['s1', 's2', 's3']) - axTestResid = fig.add_subplot(2,3,5) + axTestResid = fig.add_subplot(2, 3, 5) axTestResid.plot(timeTest[horizon:], residTest[0]-sepTest) axTestResid.autoscale(tight=True) axTestResid.set_title('Test Residuals') @@ -469,7 +469,7 @@ def demoRecurrentAutoRegression(): axTestResid.set_yticks(-sepTrain) axTestResid.set_yticklabels(['s1', 's2', 's3']) - axTestResidDist = fig.add_subplot(2,3,6) + axTestResidDist = fig.add_subplot(2, 3, 6) #axTestResidDist.hist(residTest, histtype='stepfilled', normed=True) axTestResidDist.hist(residTest[0], stacked=True, normed=True) axTestResidDist.legend(['s1', 's2', 's3']) diff --git a/cebl/ml/classifier.py b/cebl/ml/classifier.py index 942231b..2edf2bc 100644 --- a/cebl/ml/classifier.py +++ b/cebl/ml/classifier.py @@ -1,7 +1,9 @@ +"""Base class for classifiers. +""" import numpy as np from cebl import util -from cebl.util.clsm import * +from cebl.util import clsm from . import label @@ -15,7 +17,7 @@ def __init__(self, nIn, nCls): Args: nCls: Number of classes. - nIn: Number of input dimensions. + nIn: Number of input dimensions. """ self.nIn = nIn self.nCls = nCls @@ -31,6 +33,21 @@ def train(self): raise NotImplementedError('train not implemented.') def discrim(self, x, *args, **kwargs): + """Discriminant function for a classifier. This method is used to + discriminate between classes, i.e., assign class labels. It simply + calss self.probs in order to establish the default discriminant + function but it is faster for some classifiers to override this + and return discriminants without performing full probability + computations. + + Args: + x: A numpy array of shape (nObs, [nIn]) containing the + input data for which we seek to find the discriminants. + + Returns: + A numpy array of shape (nObs, nCls) containing the discriminant + values for each observation and class. + """ return self.probs(x, *args, **kwargs) def discrimKnown(self, classData, *args, **kwargs): @@ -42,7 +59,9 @@ def probs(self, x): by ALL classifiers. Args: - x: Input data. A numpy array with shape (nObs[,nIn]). + x: A numpy array of shape (nObs, [nIn]) containing the + input data for which we seek to find class membership + probabilities. Returns: A numpy array of shape (nObs,nCls) containing the @@ -57,9 +76,11 @@ def probs(self, x): raise NotImplementedError('probs not implemented.') def probsKnown(self, classData, *args, **kwargs): + """Assign probabilities to data with known class membership. + """ return [self.probs(cls, *args, **kwargs) for cls in classData] - def label(self, x, method='single', *args, **kwargs): + def label(self, x, *args, method='single', **kwargs): """Assign class labels to novel data. Args: @@ -90,7 +111,7 @@ def labelSingle(self, x, *args, **kwargs): dv = self.discrim(x, *args, **kwargs) return np.argmax(dv, axis=1) - def labelVote(self, x, n, truncate=True, *args, **kwargs): + def labelVote(self, x, n, *args, truncate=True, **kwargs): """Assign class labels by voting across successive class labels. Args: @@ -121,7 +142,7 @@ def vc(l): return util.accum(labels, n, accumf=voteCount, truncate=truncate, axis=None) - def labelIntersect(self, x, n, truncate=True, *args, **kwargs): + def labelIntersect(self, x, n, *args, truncate=True, **kwargs): """Assign class labels using the intersection of independent probabilities across successive observations. @@ -155,9 +176,6 @@ class labels. # find the class membership probabilities probs = self.probs(x, *args, **kwargs) - # use log probabilities for performance and stability - logProbs = np.log(util.capZero(probs)) - # accumulate by summing log probs across n observations # equivalent to multiplying probs since we are only interested in the argmax intersect = util.accum(probs, n, accumf=np.sum, truncate=truncate, axis=0) @@ -165,7 +183,7 @@ class labels. # label is argmax of accumulated/summed log probabilities return np.argmax(intersect, axis=1) - def labelUnion(self, x, n, truncate=True, *args, **kwargs): + def labelUnion(self, x, n, *args, truncate=True, **kwargs): """Assign class labels using the union of independent probabilities across successive observations. @@ -206,27 +224,55 @@ class labels. return np.argmax(probs, axis=1) def labelKnown(self, classData, *args, **kwargs): + """Assign class labels to data with known class membership. + """ return [self.label(cls, *args, **kwargs) for cls in classData] def auc(self, classData, *args, **kwargs): - return auc(self.probsKnown(classData, *args, **kwargs)) + """Area under the roc curve. + """ + return clsm.auc(self.probsKnown(classData, *args, **kwargs)) def bca(self, classData, *args, **kwargs): - return bca(self.labelKnown(classData, *args, **kwargs)) + """Balanced classification accuracy. + + See util.clsm.bca for details. + """ + return clsm.bca(self.labelKnown(classData, *args, **kwargs)) def ca(self, classData, *args, **kwargs): - return ca(self.labelKnown(classData, *args, **kwargs)) + """Classification accuracy. + + See util.clsm.ca for details. + """ + return clsm.ca(self.labelKnown(classData, *args, **kwargs)) - def confusion(self, classData, normalize=True, *args, **kwargs): - return confusion(self.labelKnown(classData, *args, **kwargs), normalize=True) + def confusion(self, classData, *args, normalize=True, **kwargs): + """Confusion matrix. - def itr(self, classData, decisionRate=60.0, *args, **kwargs): - return itr(self.labelKnown(classData, *args, **kwargs), decisionRate=decisionRate) + See util.clsm.confusion for details. + """ + return clsm.confusion(self.labelKnown(classData, *args, **kwargs), normalize=normalize) + + def itr(self, classData, *args, decisionRate=60.0, **kwargs): + """Information transfer rate. + + See util.clsm.itr for details. + """ + return clsm.itr(self.labelKnown(classData, *args, **kwargs), decisionRate=decisionRate) def lloss(self, classData, *args, **kwargs): + """Log loss. + + See util.clsm.lloss for details. + """ classProbs = self.probsKnown(classData, *args, **kwargs) probs, g = label.indicatorsFromList(classProbs) - return lloss(probs, g) + return clsm.lloss(probs, g) def roc(self, classData, *args, **kwargs): - return roc(self.probsKnown(classData, *args, **kwargs)) + """Receiver operating characteristic. + + See util.clsm.roc for details. + """ + return clsm.roc(self.probsKnown(classData, *args, **kwargs)) diff --git a/cebl/ml/da.py b/cebl/ml/da.py index f046f19..8ef0170 100644 --- a/cebl/ml/da.py +++ b/cebl/ml/da.py @@ -1,6 +1,5 @@ """Discriminant Analysis classifiers. """ -import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np @@ -108,19 +107,23 @@ def train(self, classData): ##cvi = sp.linalg.pinvh(cv) #try: # cvi = np.linalg.inv(cv) - #except np.linalg.LinAlgError as e: + #except np.linalg.LinAlgError: # raise RuntimeError('Failed to invert covariance matrix, consider using shrinkage.') try: + # pylint: disable=no-member cvi = sp.linalg.pinvh(cv) + except Exception as e: - raise RuntimeError('Failed to invert covariance matrix, consider using shrinkage.') + raise RuntimeError( + 'Pseudo inversion of covariance matrix failed: ' + str(e)) self.invCovs.append(cvi) sign, logDet = np.linalg.slogdet(cv) if sign == 0: - raise RuntimeError('Covariance matrix has zero determinant, consider using shrinkage.') + raise RuntimeError( + 'Covariance matrix has zero determinant, consider using shrinkage.') #self.intercepts[i] = logDet - 2.0*logPriors[i] @@ -210,18 +213,18 @@ def demoQDA2d(): """QDA Example. """ # covariance matrices - covRed = [[1, -0.9], - [-0.9, 1]] + covRed = [[1, -0.9], + [-0.9, 1]] covGreen = [[0.8, -0.5], [-0.5, 0.8]] - covBlue = [[0.3, 0.0], - [0.0, 0.3]] + covBlue = [[0.3, 0.0], + [0.0, 0.3]] # red data red = np.random.multivariate_normal( - (-1.2,-1.2), covRed, 500) + (-1.2, -1.2), covRed, 500) # green data green = np.random.multivariate_normal( @@ -242,9 +245,9 @@ def demoQDA2d(): model = QuadraticDiscriminantAnalysis(data) # find class labels - redLabel = model.label(red) # one at a time + redLabel = model.label(red) # one at a time greenLabel = model.label(green) - blueLabel = model.label(blue) + blueLabel = model.label(blue) print('red labels\n-------') print(redLabel) @@ -265,9 +268,9 @@ def demoQDA2d(): ax = fig.add_subplot(2, 2, 1) # training data - ax.scatter(red[:,0], red[:,1], color="red") + ax.scatter(red[:,0], red[:,1], color="red") ax.scatter(green[:,0], green[:,1], color="green") - ax.scatter(blue[:,0], blue[:,1], color="blue") + ax.scatter(blue[:,0], blue[:,1], color="blue") # generate grid over training data sw = 0.02 @@ -280,17 +283,17 @@ def demoQDA2d(): probs = model.probs(z) # red, green, blue and max probabilities - pRed = np.reshape(probs[:,0,None], x.shape) + pRed = np.reshape(probs[:,0,None], x.shape) pGreen = np.reshape(probs[:,1,None], x.shape) - pBlue = np.reshape(probs[:,2,None], x.shape) - pMax = np.reshape(np.max(probs, axis=1), x.shape) + pBlue = np.reshape(probs[:,2,None], x.shape) + pMax = np.reshape(np.max(probs, axis=1), x.shape) # red, green, blue and max probability densities densities = model.dens(z) - dRed = np.reshape(densities[:,0,None], x.shape) + dRed = np.reshape(densities[:,0,None], x.shape) dGreen = np.reshape(densities[:,1,None], x.shape) - dBlue = np.reshape(densities[:,2,None], x.shape) - dMax = np.reshape(np.max(densities, axis=1), x.shape) + dBlue = np.reshape(densities[:,2,None], x.shape) + dMax = np.reshape(np.max(densities, axis=1), x.shape) # class intersections diffRG = pRed - pGreen @@ -308,14 +311,14 @@ def demoQDA2d(): color = color.swapaxes(1, 2).T # flip colors to fade to white - zro = np.zeros_like(x) - colorFlip = np.ones((3, x.shape[0], x.shape[1])) + zro = np.zeros_like(x) + colorFlip = np.ones((3, x.shape[0], x.shape[1])) colorFlip -= (np.array((zro, dRed, dRed)) + np.array((dGreen, zro, dGreen)) + np.array((dBlue, dBlue, zro))) colorFlip -= np.min(colorFlip) colorFlip /= np.max(colorFlip) - colorFlip = colorFlip.swapaxes(1, 2).T + colorFlip = colorFlip.swapaxes(1, 2).T # probability density surface surf = ax.plot_surface(x, y, dMax, facecolors=colorFlip, @@ -330,17 +333,16 @@ def demoQDA2d(): color = color.swapaxes(1, 2).T # flip colors to fade to white - zro = np.zeros_like(x) - colorFlip = np.ones((3, x.shape[0], x.shape[1])) + zro = np.zeros_like(x) + colorFlip = np.ones((3, x.shape[0], x.shape[1])) colorFlip -= (np.array((zro, pRed, pRed)) + np.array((pGreen, zro, pGreen)) + np.array((pBlue, pBlue, zro))) colorFlip -= np.min(colorFlip) colorFlip /= np.max(colorFlip) - colorFlip = colorFlip.swapaxes(1, 2).T + colorFlip = colorFlip.swapaxes(1, 2).T # probability density surface - #surf = ax.plot_surface(x, y, pMax, cmap=matplotlib.cm.jet, linewidth=0) surf = ax.plot_surface(x, y, pMax, facecolors=colorFlip, linewidth=0.02, shade=True) surf.set_edgecolor('black') # add edgecolor back in, bug? @@ -363,7 +365,7 @@ def demoQDA2d(): ax = fig.add_subplot(2, 2, 4, projection='3d') labels = model.label(z) - lMax = np.reshape(labels, x.shape) + lMax = np.reshape(labels, x.shape) surf = ax.plot_surface(x, y, lMax, facecolors=colorFlip, linewidth=0.02)#, antialiased=False) @@ -373,7 +375,6 @@ def demoQDA2d(): fig.tight_layout() -#covCache = util.Cache(2) class LinearDiscriminantAnalysis(Classifier): """Linear Discriminant Analysis Classifier. """ @@ -432,15 +433,8 @@ def train(self, classData): self.avgCov = np.zeros((self.nIn, self.nIn), dtype=self.dtype) # sum up class covariances - for i, cls in enumerate(classData): + for cls in classData: self.avgCov += np.cov(cls, rowvar=False) - #key = util.hashArray(cls) - #cov = covCache[key] - #if cov is None: - # cov = np.cov(cls, rowvar=False) - # covCache[key] = cov - # #print('cache miss') - #self.avgCov += cov # average covariance over number of classes self.avgCov /= self.nCls @@ -453,17 +447,20 @@ def train(self, classData): ##self.invCov = sp.linalg.pinvh(self.avgCov) #try: # self.invCov = np.linalg.inv(self.avgCov) - #except np.linalg.LinAlgError as e: + #except np.linalg.LinAlgError: # raise RuntimeError('Failed to invert covariance matrix, consider using shrinkage.') try: + # pylint: disable=no-member self.invCov = sp.linalg.pinvh(self.avgCov) except Exception as e: - raise RuntimeError('Failed to invert covariance matrix, consider using shrinkage.') + raise RuntimeError( + 'Pseudo inversion of covariance matrix failed: ' + str(e)) sign, self.logDet = np.linalg.slogdet(self.avgCov) if sign == 0: - raise RuntimeError('Covariance matrix has zero determinant, consider using shrinkage.') + raise RuntimeError( + 'Covariance matrix has zero determinant, consider using shrinkage.') # model coefficients # (ndim, nCls) = (ndim, ndim) x (ndim, nCls) @@ -484,7 +481,7 @@ def discrim(self, x): x: Input data. A numpy array with shape (nObs[,nIn]). Returns: - Numpy array with shape (nObs,nCls) containing the discriminant values. + Numpy array with shape (nObs, nCls) containing the discriminant values. Notes: These values are the log of the evaluated discriminant functions @@ -495,7 +492,7 @@ def discrim(self, x): # discriminant values # (nObs, nCls) = (nObs, ndim) x (ndim, nCls) + (nObs, nCls) - dv = (x @ self.weights) + self.intercepts.reshape((1,-1)) + dv = (x @ self.weights) + self.intercepts.reshape((1, -1)) return dv @@ -544,8 +541,9 @@ def probs(self, x): dens = util.capZero(np.exp(logDens-mx)) return dens / dens.sum(axis=1)[:,None] - # hack, doesn't work for QDA or other algorithms where discrim is not comparable XXX - idfah - # handles ties better since probs may be equal within precision but not log likelihoods (discrims) + # XXX: hack alert + # doesn't work for QDA or other algorithms where discrim is not comparable - idfah + # handles ties better since probs may be equal within precision but not discrims def auc(self, classData, *args, **kwargs): return util.auc(self.discrimKnown(classData, *args, **kwargs)) def roc(self, classData, *args, **kwargs): @@ -563,7 +561,7 @@ def demoLDA2d(): # red data red = np.random.multivariate_normal( - (-1,-1), cov, 500) + (-1, -1), cov, 500) # green data green = np.random.multivariate_normal( @@ -583,9 +581,9 @@ def demoLDA2d(): model = LinearDiscriminantAnalysis(data, shrinkage=0) # find class labels - redLabel = model.label(red) # one at a time + redLabel = model.label(red) # one at a time greenLabel = model.label(green) - blueLabel = model.label(blue) + blueLabel = model.label(blue) print('red labels\n-------') print(redLabel) @@ -603,9 +601,9 @@ def demoLDA2d(): ax = fig.add_subplot(2, 2, 1) # training data - ax.scatter(red[:,0], red[:,1], color="red") + ax.scatter(red[:,0], red[:,1], color="red") ax.scatter(green[:,0], green[:,1], color="green") - ax.scatter(blue[:,0], blue[:,1], color="blue") + ax.scatter(blue[:,0], blue[:,1], color="blue") # generate grid over training data sw = 0.02 @@ -618,14 +616,14 @@ def demoLDA2d(): probs = model.probs(z) # red, green, blue and max probabilities - pRed = np.reshape(probs[:,0,None], x.shape) + pRed = np.reshape(probs[:,0,None], x.shape) pGreen = np.reshape(probs[:,1,None], x.shape) - pBlue = np.reshape(probs[:,2,None], x.shape) - pMax = np.reshape(np.max(probs, axis=1), x.shape) + pBlue = np.reshape(probs[:,2,None], x.shape) + pMax = np.reshape(np.max(probs, axis=1), x.shape) # class intersections - diffRG = pRed - pGreen - diffRB = pRed - pBlue + diffRG = pRed - pGreen + diffRB = pRed - pBlue diffGB = pGreen - pBlue ax.contour(x, y, diffRG, colors='black', levels=(0,)) ax.contour(x, y, diffRB, colors='black', levels=(0,)) @@ -633,10 +631,10 @@ def demoLDA2d(): # red, green, blue and max probability densities densities = model.dens(z) - dRed = np.reshape(densities[:,0,None], x.shape) + dRed = np.reshape(densities[:,0,None], x.shape) dGreen = np.reshape(densities[:,1,None], x.shape) - dBlue = np.reshape(densities[:,2,None], x.shape) - dMax = np.reshape(np.max(densities, axis=1), x.shape) + dBlue = np.reshape(densities[:,2,None], x.shape) + dMax = np.reshape(np.max(densities, axis=1), x.shape) # second figure shows 3d plots of probability densities ax = fig.add_subplot(2, 2, 2, projection='3d') @@ -646,17 +644,16 @@ def demoLDA2d(): color = color.swapaxes(1, 2).T # flip colors to fade to white - zro = np.zeros_like(x) - colorFlip = np.ones((3, x.shape[0], x.shape[1])) + zro = np.zeros_like(x) + colorFlip = np.ones((3, x.shape[0], x.shape[1])) colorFlip -= (np.array((zro, dRed, dRed)) + np.array((dGreen, zro, dGreen)) + np.array((dBlue, dBlue, zro))) colorFlip -= np.min(colorFlip) colorFlip /= np.max(colorFlip) - colorFlip = colorFlip.swapaxes(1, 2).T + colorFlip = colorFlip.swapaxes(1, 2).T # probability density surface - #surf = ax.plot_surface(x, y, dMax, cmap=matplotlib.cm.jet, linewidth=0) surf = ax.plot_surface(x, y, dMax, facecolors=colorFlip, linewidth=0.02, shade=True) surf.set_edgecolor('black') # add edgecolor back in, bug? @@ -669,14 +666,14 @@ def demoLDA2d(): color = color.swapaxes(1, 2).T # flip colors to fade to white - zro = np.zeros_like(x) - colorFlip = np.ones((3, x.shape[0], x.shape[1])) + zro = np.zeros_like(x) + colorFlip = np.ones((3, x.shape[0], x.shape[1])) colorFlip -= (np.array((zro, pRed, pRed)) + np.array((pGreen, zro, pGreen)) + np.array((pBlue, pBlue, zro))) colorFlip -= np.min(colorFlip) colorFlip /= np.max(colorFlip) - colorFlip = colorFlip.swapaxes(1, 2).T + colorFlip = colorFlip.swapaxes(1, 2).T # probability density surface surf = ax.plot_surface(x, y, pMax, facecolors=colorFlip, @@ -701,7 +698,7 @@ def demoLDA2d(): ax = fig.add_subplot(2, 2, 4, projection='3d') labels = model.label(z) - lMax = np.reshape(labels, x.shape) + lMax = np.reshape(labels, x.shape) surf = ax.plot_surface(x, y, lMax, facecolors=colorFlip, linewidth=0.02)#, antialiased=False) diff --git a/cebl/ml/knn.py b/cebl/ml/knn.py index b976b5b..b221dd2 100644 --- a/cebl/ml/knn.py +++ b/cebl/ml/knn.py @@ -15,8 +15,8 @@ def __init__(self, classData, k=1, distMetric='euclidean', **kwargs): self.k = k minObs = min([len(cls) for cls in classData]) if self.k > minObs: - raise RuntimeError('k=%d exceeds the number of examples in ' + - 'smallest training class %d.' % (k, minObs)) + raise RuntimeError(('k=%d exceeds the number of examples in ' % k) + + ('smallest training class %d.' % minObs)) if callable(distMetric): self.distFunc = lambda x1, x2: distMetric(x1, x2, **kwargs) diff --git a/cebl/ml/linreg.py b/cebl/ml/linreg.py index 4b00f70..ff7162d 100644 --- a/cebl/ml/linreg.py +++ b/cebl/ml/linreg.py @@ -38,10 +38,8 @@ def train(self, x, g): b = x1.T @ g if self.pseudoInv is None: - if np.linalg.cond(a) < 1.0 / np.finfo(self.dtype).eps: - pseudoInv = True - else: - pseudoInv = False + cond = np.linalg.cond(a) * np.finfo(self.dtype).eps + pseudoInv = cond < 1.0 else: pseudoInv = self.pseudoInv diff --git a/cebl/ml/nnet/esn.py b/cebl/ml/nnet/esn.py index 5b8a3bf..4dfee1d 100644 --- a/cebl/ml/nnet/esn.py +++ b/cebl/ml/nnet/esn.py @@ -1,3 +1,5 @@ +"""Echo State Network (ESN). +""" import matplotlib.pyplot as plt import numpy as np import scipy.sparse as spsparse @@ -413,6 +415,8 @@ def plotWeightImg(self, ax=None): class ESNReservoir(EchoStateNetworkReservoir): + """Alias for EchoStateNetworkReservoir. + """ pass @@ -486,6 +490,8 @@ def addSideTrack(self, x, act): return act class ESNFromReservoir(EchoStateNetworkFromReservoir): + """Alias for EchoStateNetworkFromReservoir. + """ pass @@ -505,6 +511,8 @@ def __init__(self, x, g, nRes=1024, rwScale=0.95, rwConn=0.01, verbose=verbose, **kwargs) class ESN(EchoStateNetwork): + """Alias for EchoStateNetwork. + """ pass diff --git a/cebl/ml/nnet/forward.py b/cebl/ml/nnet/forward.py index e15ae1c..601bac6 100644 --- a/cebl/ml/nnet/forward.py +++ b/cebl/ml/nnet/forward.py @@ -1,3 +1,5 @@ +"""Feedforward Neural Network for regression. +""" import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np @@ -12,6 +14,8 @@ class ForwardNetwork(Regression, optim.Optable): + """Feedforward Neural Network for regression. + """ def __init__(self, x, g, nHidden=10, transFunc=transfer.lecun, weightInitFunc=pinit.lecun, penalty=None, elastic=1.0, optimFunc=optim.scg, **kwargs): @@ -113,7 +117,7 @@ def eval(self, x): x: Input data. A numpy array with shape (nObs[,nDim]). Returns: - A numpy array with shape (nObs,nOut) containing the + A numpy array with shape (nObs, nOut) containing the network outputs for each input in x. """ x = np.asarray(x) @@ -347,9 +351,9 @@ def demoFN1d(): results = model.trainResult - fig = plt.figure(figsize=(16,12)) + fig = plt.figure(figsize=(16, 12)) - axFit = fig.add_subplot(3,2,1) + axFit = fig.add_subplot(3, 2, 1) axFit.plot(x, gClean, linewidth=2, color='blue') axFit.plot(x, g, linewidth=2, color='black') axFit.plot(x, model.eval(x), linewidth=2, color='red') @@ -358,19 +362,19 @@ def demoFN1d(): axFit.set_xlabel('Input') axFit.set_ylabel('Output') - axError = fig.add_subplot(3,2,2) + axError = fig.add_subplot(3, 2, 2) axError.plot(results['eTrace']) axError.set_title('Training Error') axError.set_xlabel('Epoch') axError.set_ylabel('Mean-Squared Error') - axHResponse = fig.add_subplot(3,2,3) + axHResponse = fig.add_subplot(3, 2, 3) axHResponse.plot(x, model.evalHiddens(x)[0], linewidth=2) axHResponse.set_title('Hidden Unit Response') axHResponse.set_xlabel('Input') axHResponse.set_ylabel('Hidden Unit Output') - axHWeight = fig.add_subplot(3,2,4) + axHWeight = fig.add_subplot(3, 2, 4) img = axHWeight.imshow(model.hws[0], aspect='auto', interpolation='none', cmap=plt.cm.winter) cbar = plt.colorbar(img) @@ -379,7 +383,7 @@ def demoFN1d(): axHWeight.set_xlabel('Hidden Unit') axHWeight.set_ylabel('Input') axHWeight.set_yticks(range(model.hws[0].shape[0])) - axHWeight.set_yticklabels(list(range(1,model.hws[0].shape[0])) + ['bias']) + axHWeight.set_yticklabels(list(range(1, model.hws[0].shape[0])) + ['bias']) pTrace = np.array(results['pTrace']) #sTrace = np.array(results['sTrace']) @@ -388,13 +392,13 @@ def demoFN1d(): #hwTrace = sTrace vwTrace = pTrace[:,model.vw.size:] - axHWTrace = fig.add_subplot(3,2,5) + axHWTrace = fig.add_subplot(3, 2, 5) axHWTrace.plot(hwTrace) axHWTrace.set_title('Hidden Weight Trace') axHWTrace.set_xlabel('Epoch') axHWTrace.set_ylabel('Weight') - axVWTrace = fig.add_subplot(3,2,6) + axVWTrace = fig.add_subplot(3, 2, 6) axVWTrace.plot(vwTrace) axVWTrace.set_title('Visible Weight Trace') axVWTrace.set_xlabel('Epoch') @@ -430,7 +434,7 @@ def radialSinc(x): gStd = g.std() gStand = (g - gMean) / gStd - model = FN(xStand, gStand, nHidden=(4,4,4), transFunc=transfer.gaussian, + model = FN(xStand, gStand, nHidden=(4, 4, 4), transFunc=transfer.gaussian, optimFunc=optim.scg, maxIter=1000, precision=0.0, accuracy=0.0, eTrace=True, pTrace=True, verbose=True) results = model.trainResult @@ -441,31 +445,31 @@ def radialSinc(x): fig = plt.figure() - axTargSurf = fig.add_subplot(2,3,1, projection='3d') + axTargSurf = fig.add_subplot(2, 3, 1, projection='3d') targSurf = axTargSurf.plot_surface(xx1, xx2, gg, linewidth=0.0, cmap=plt.cm.jet) targSurf.set_edgecolor('black') - axTargCont = fig.add_subplot(2,3,2) + axTargCont = fig.add_subplot(2, 3, 2) axTargCont.contour(x1, x2, gg, 40, color='black', marker='o', s=400, linewidth=3, cmap=plt.cm.jet) eTrace = results['eTrace'] - axError = fig.add_subplot(2,3,3) + axError = fig.add_subplot(2, 3, 3) axError.plot(eTrace) axError.set_title('Training Error') axError.set_xlabel('Epoch') axError.set_ylabel('Mean-Squared Error') - axPredSurf = fig.add_subplot(2,3,4, projection='3d') + axPredSurf = fig.add_subplot(2, 3, 4, projection='3d') predSurf = axPredSurf.plot_surface(xx1, xx2, yy, linewidth=0.0, cmap=plt.cm.jet) predSurf.set_edgecolor('black') - axPredCont = fig.add_subplot(2,3,5) + axPredCont = fig.add_subplot(2, 3, 5) axPredCont.contour(x1, x2, yy, 40, color='black', marker='o', s=400, linewidth=3, cmap=plt.cm.jet) pTrace = np.array(results['pTrace']) - axHWTrace = fig.add_subplot(2,3,6) + axHWTrace = fig.add_subplot(2, 3, 6) axHWTrace.plot(pTrace) axHWTrace.set_title('Weight Trace') axHWTrace.set_xlabel('Epoch') diff --git a/cebl/ml/nnet/softmax.py b/cebl/ml/nnet/softmax.py index 25b1aaf..b42b8ea 100644 --- a/cebl/ml/nnet/softmax.py +++ b/cebl/ml/nnet/softmax.py @@ -1,3 +1,5 @@ +"""Feedforward Neural Network with softmax visible layer for classification. +""" import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np @@ -13,8 +15,7 @@ class ForwardNetworkSoftmax(Classifier, optim.Optable): - """Two-Layer Feedforward Neural Network with softmax visible layer - for classification. + """Feedforward Neural Network with softmax visible layer for classification. """ def __init__(self, classData, nHidden=10, transFunc=transfer.lecun, weightInitFunc=pinit.lecun, penalty=None, elastic=1.0, @@ -333,7 +334,7 @@ def demoFNS2d(): mx = np.max(np.vstack(classData), axis=0) # train model - model = FNS(classData, nHidden=(4,8,16), optimFunc=optim.scg, + model = FNS(classData, nHidden=(4, 8, 16), optimFunc=optim.scg, transFunc=transfer.lecun, precision=1.0e-5, #transFunc=transfer.exprect, precision=1.0e-10, #transFunc=transfer.rectifierTwist, precision=1.0e-10, diff --git a/cebl/ml/optim/__init__.py b/cebl/ml/optim/__init__.py index 8700edf..0cb97b3 100644 --- a/cebl/ml/optim/__init__.py +++ b/cebl/ml/optim/__init__.py @@ -1,6 +1,5 @@ """Optimization. """ - import scipy.optimize as spopt from .alopex import * diff --git a/cebl/ml/optim/minibatch.py b/cebl/ml/optim/minibatch.py index 315b689..d63ac71 100644 --- a/cebl/ml/optim/minibatch.py +++ b/cebl/ml/optim/minibatch.py @@ -128,12 +128,14 @@ def minibatch(optable, x, g, batchSize=10, maxRound=10, print('iterations: %d' % totalIter) # save result into a dictionary - result = {} - result['iteration'] = totalIter - result['round'] = curRound - result['reason'] = reason - - if pTrace: result['pTrace'] = paramTrace - if eTrace: result['eTrace'] = errorTrace + result = { + "iteration": totalIter, + "round": curRound, + "reason": reason + } + + # pylint: disable=multiple-statements + if pTrace: result["pTrace"] = paramTrace + if eTrace: result["eTrace"] = errorTrace return result diff --git a/cebl/ml/optim/optable.py b/cebl/ml/optim/optable.py index 5396335..d170689 100644 --- a/cebl/ml/optim/optable.py +++ b/cebl/ml/optim/optable.py @@ -1,6 +1,8 @@ +"""Interfaces for optimization routines. +""" class Optable: - """Base class/interface for classes that can be optimized using the routines - in the optable module. This class holds the abstract methods that should/can + """Interface for classes that can be optimized using the routines in the + optable module. This class holds the abstract methods that should/can be overridden and the documentation regarding their arguments and usage. """ diff --git a/cebl/ml/optim/pso.py b/cebl/ml/optim/pso.py index b255073..5673e1a 100644 --- a/cebl/ml/optim/pso.py +++ b/cebl/ml/optim/pso.py @@ -1,3 +1,5 @@ +"""Particle Swarm Optimization (PSO). +""" import matplotlib.pyplot as plt import matplotlib.cm as pltcm from mpl_toolkits.mplot3d import Axes3D @@ -6,14 +8,15 @@ from . import tests -def pso(optable, nParticles=10, pInit=0.5, vInit=0.01, +def pso(optable, *args, + nParticles=10, pInit=0.5, vInit=0.01, #momentum=0.9, pAttract=0.3, gAttract=0.3, momentum=0.9, pAttract=0.3, gAttract=0.3, accuracy=0.0, precision=0.0, divergeThresh=1.0e10, maxIter=10000, eTrace=False, pTrace=False, callback=None, verbose=False, - *args, **kwargs): + **kwargs): """Particle Swarm Optimization (PSO). """ params = optable.parameters() @@ -53,7 +56,7 @@ def pso(optable, nParticles=10, pInit=0.5, vInit=0.01, callback(optable, iteration, paramTrace, errorTrace) while True: - for i,p in enumerate(pParams): + for i, p in enumerate(pParams): pr = np.random.random(params.size) gr = np.random.random(params.size) @@ -116,22 +119,26 @@ def pso(optable, nParticles=10, pInit=0.5, vInit=0.01, print(reason) # save result into a dictionary - result = {} - result['error'] = gError - result['iteration'] = iteration - result['reason'] = reason + result = { + "error": gError, + "iteration": iteration, + "reason": reason + } - if eTrace: result['eTrace'] = errorTrace - if pTrace: result['pTrace'] = paramTrace + # pylint: disable=multiple-statements + if eTrace: result["eTrace"] = errorTrace + if pTrace: result["pTrace"] = paramTrace return result def demoPSO(): + """Demonstration of particle swarm optimization. + """ rosen = tests.Rosen(optimFunc=pso, nParticles=10, accuracy=0.01, maxIter=5000, verbose=True)#, initialSolution=(2.5, -2.5)) n = 200 - rng=(-3.0,3.0, -4.0,8.0) + rng = (-3.0, 3.0, -4.0, 8.0) x = np.linspace(rng[0], rng[1], n) y = np.linspace(rng[2], rng[3], n) @@ -142,20 +149,21 @@ def demoPSO(): values = rosen.eval(points) zz = values.reshape((xx.shape[0], yy.shape[1])) - fig = plt.figure(figsize=(12,6)) - axSurf = fig.add_subplot(1,2,1, projection='3d') + fig = plt.figure(figsize=(12, 6)) + axSurf = fig.add_subplot(1, 2, 1, projection='3d') surf = axSurf.plot_surface(xx, yy, zz, linewidth=1.0, cmap=pltcm.jet) surf.set_edgecolor('black') - axCont = fig.add_subplot(1,2,2) + axCont = fig.add_subplot(1, 2, 2) axCont.contour(x, y, zz, 40, color='black') axCont.scatter(rosen.a, rosen.a**2, color='black', marker='o', s=400, linewidth=3) axCont.scatter(*rosen.solution, color='red', marker='x', s=400, linewidth=3) paramTrace = np.array(rosen.trainResult['pTrace']) for i in range(paramTrace.shape[1]): - axCont.plot(paramTrace[:,i:,0], paramTrace[:,i:,1], color=plt.cm.jet(i/float(paramTrace.shape[1])), linewidth=1) + axCont.plot(paramTrace[:,i:,0], paramTrace[:,i:,1], + color=plt.cm.jet(i/float(paramTrace.shape[1])), linewidth=1) fig.tight_layout() diff --git a/cebl/ml/optim/rprop.py b/cebl/ml/optim/rprop.py index ff29a51..84135a3 100644 --- a/cebl/ml/optim/rprop.py +++ b/cebl/ml/optim/rprop.py @@ -1,17 +1,19 @@ +"""Resilient backpropagation. +""" import matplotlib.pyplot as plt import numpy as np from . import tests -def rprop(optable, +def rprop(optable, *args, stepInitial=0.05, stepUp=1.02, stepDown=0.6, stepMin=0.0, stepMax=50.0, accuracy=0.0, precision=1.0e-10, divergeThresh=1.0e10, maxIter=2500, pTrace=False, sTrace=False, eTrace=False, - callback=None, verbose=False, *args, **kwargs): - """Resilient backpropigation. + callback=None, verbose=False, **kwargs): + """Resilient backpropagation. Requires a first-order gradient estimate. Args: @@ -85,7 +87,8 @@ def rprop(optable, Refs: @inproceedings{riedmiller1993direct, - title={A direct adaptive method for faster backpropagation learning: The RPROP algorithm}, + title={A direct adaptive method for faster backpropagation + learning: The RPROP algorithm}, author={Riedmiller, Martin and Braun, Heinrich}, booktitle={IEEE International Conference on Neural Networks} pages={586--591}, @@ -94,7 +97,6 @@ def rprop(optable, } """ params = optable.parameters() - paramsStart = params.copy() # initialize all step sizes to stepInitial steps = np.ones_like(params) * stepInitial @@ -178,32 +180,36 @@ def rprop(optable, print(reason) # save result into a dictionary - result = {} - result['params'] = params - result['error'] = error - result['iteration'] = iteration - result['reason'] = reason - - if pTrace: result['pTrace'] = paramTrace - if sTrace: result['sTrace'] = stepTrace - if eTrace: result['eTrace'] = errorTrace + result = { + "params": params, + "error": error, + "iteration": iteration, + "reason": reason + } + + # pylint: disable=multiple-statements + if pTrace: result["pTrace"] = paramTrace + if sTrace: result["sTrace"] = stepTrace + if eTrace: result["eTrace"] = errorTrace return result def demoRProp(): + """Demonstration of resilient backpropagation. + """ rosen = tests.Rosen(optimFunc=rprop, maxIter=10000, verbose=True) rosen.plot() -def irprop(optable, +def irprop(optable, *args, stepInitial=0.05, stepUp=1.02, stepDown=0.6, #stepInitial=0.05, stepUp=1.015, stepDown=0.5, stepMin=0.0, stepMax=50.0, accuracy=0.0, precision=1.0e-10, divergeThresh=1.0e10, maxIter=2500, pTrace=False, sTrace=False, eTrace=False, - callback=None, verbose=False, *args, **kwargs): - """Resilient backpropigation. + callback=None, verbose=False, **kwargs): + """Improved resilient backpropagation. Requires a first-order gradient estimate. Args: @@ -277,7 +283,8 @@ def irprop(optable, Refs: @inproceedings{riedmiller1993direct, - title={A direct adaptive method for faster backpropagation learning: The RPROP algorithm}, + title={A direct adaptive method for faster backpropagation + learning: The RPROP algorithm}, author={Riedmiller, Martin and Braun, Heinrich}, booktitle={IEEE International Conference on Neural Networks} pages={586--591}, @@ -394,196 +401,27 @@ def irprop(optable, print(reason) # save result into a dictionary - result = {} - result['params'] = params - result['error'] = error - result['iteration'] = iteration - result['reason'] = reason - - if pTrace: result['pTrace'] = paramTrace - if sTrace: result['sTrace'] = stepTrace - if eTrace: result['eTrace'] = errorTrace + result = { + "params": params, + "error": error, + "iteration": iteration, + "reason": reason + } + + # pylint: disable=multiple-statements + if pTrace: result["pTrace"] = paramTrace + if sTrace: result["sTrace"] = stepTrace + if eTrace: result["eTrace"] = errorTrace return result def demoIRProp(): + """Demonstration of improved resilient backpropagation. + """ rosen = tests.Rosen(optimFunc=irprop, maxIter=10000, verbose=True) rosen.plot() -def srprop(optable, x, g, batchSize=10, maxRound=np.inf, - stepInitial=0.05, stepUp=1.02, stepDown=0.6, - stepMin=0.0, stepMax=50.0, - accuracy=0.0, precision=1.0e-10, - divergeThresh=1.0e10, maxIter=2500, - pTrace=False, sTrace=False, eTrace=False, - callback=None, verbose=True, *args, **kwargs): - """Stochastic Resilient Backpropigation. - """ - # make sure x and g are numpy arrays - x = np.asarray(x) - g = np.asarray(g) - - # make sure x and g have same size - assert len(x) == len(g) - - # number of observations - nObs = len(x) - - params = optable.parameters() - paramsStart = params.copy() - - # initialize all step sizes to stepInitial - steps = np.ones_like(params) * stepInitial - - # initialize unknown error to infinity - error = np.inf - - # initialize gradient to zero in order to yield no flips - grad = np.zeros_like(params) - - paramTrace = [] - stepTrace = [] - errorTrace = [] - - # indices into x and g used to select minibatches - batchInd = np.arange(nObs) - - # current round of minibatches - curRound = 0 - - iteration = 0 - - # termination reason - reason = '' - - # for each round - done = False - while not done: - if verbose: - print('=======') - print('round: %d' % curRound) - print('error: %.5f' % optable.error(x=x, g=g)) - print('=======') - - # start index into current minibatch - start = 0 - - # randomly shuffle minibatches each round - np.random.shuffle(batchInd) - xShuff = x[batchInd] - gShuff = g[batchInd] - - # for each minibatch - curBatch = 0 - while True: - # end index into current minibatch - end = start + batchSize - - # don't process last minibatch - # if smaller than batchSize - if end > nObs: - break - - # select current batch - xMini = xShuff[start:end] - gMini = gShuff[start:end] - - # compute value of the error function and the gradient - errorPrev = error - gradPrev = grad - error, grad = optable.gradient(*args, x=x, g=g, returnError=True, **kwargs) - - if verbose: - print('%d %d %d %6f' % (iteration, curRound, curBatch, error)) - - if callback is not None: - callback(optable, iteration, paramTrace, errorTrace) - - # keep parameter history if requested - if pTrace: - paramTrace.append(params.copy()) - - # keep step trace if requested - if sTrace: - stepTrace.append(steps.copy()) - - # keep error function history if requested - if eTrace: - errorTrace.append(error) - - # terminate if maximum iterations reached - if iteration >= maxIter: - reason = 'maxiter' - done = True - break - - # terminate if desired accuracy reached - if error < accuracy: - reason = 'accuracy' - done = True - break - - # terminate if desired precision reached - if np.abs(error - errorPrev) < precision: - reason = 'precision' - done = True - break - - # terminate if the error function diverges - if error > divergeThresh: - reason = 'diverge' - done = True - break - - flips = grad * gradPrev - - # decrease step sizes where gradient flipped - flipsNeg = np.where(flips < 0.0)[0] - steps[flipsNeg] *= stepDown - steps[...] = np.maximum(steps, stepMin) - - # increase step sizes where gradient did not flip - flipsPos = np.where(flips > 0.0)[0] - steps[flipsPos] *= stepUp - steps[...] = np.minimum(steps, stepMax) - - params[...] += steps * -np.sign(grad) - - # move mini-batch forward - start += batchSize - - # increment iteration counter - iteration += 1 - - # increment batch counters - curBatch += 1 - - # increment round counter - curRound += 1 - - if curRound >= maxRound: - reason = 'maxround' - done = True - break - - if verbose: - print(reason) - - # save result into a dictionary - result = {} - result['params'] = params - result['error'] = error - result['iteration'] = iteration - result['reason'] = reason - - if pTrace: result['pTrace'] = paramTrace - if sTrace: result['sTrace'] = stepTrace - if eTrace: result['eTrace'] = errorTrace - - return result - - if __name__ == '__main__': demoRProp() plt.show() diff --git a/cebl/ml/optim/sciopt.py b/cebl/ml/optim/sciopt.py index 6e7ecaa..ca0f5aa 100644 --- a/cebl/ml/optim/sciopt.py +++ b/cebl/ml/optim/sciopt.py @@ -1,16 +1,17 @@ +"""Scipy optimization. +""" import matplotlib.pyplot as plt -import numpy as np import scipy.optimize as spopt from . import tests -def sciopt(optable, +def sciopt(optable, *args, method='CG', options=None, maxIter=1000, precision=1.0e-10, pTrace=False, eTrace=False, callback=None, verbose=False, - *args, **kwargs): + **kwargs): """Wrapper for scipy optimization routines. """ # get view of parameters to optimize @@ -74,26 +75,34 @@ def cb(p): params.flat[...] = optres['x'] - result = {} - result['error'] = optres['fun'] - result['params'] = params - result['iteration'] = cb.iteration - result['reason'] = optres['message'] + result = { + "error": optres["fun"], + "params": params, + "iteration": cb.iteration, + "reason": optres["message"] + } - if pTrace: result['pTrace'] = paramTrace - if eTrace: result['eTrace'] = errorTrace + # pylint: disable=multiple-statements + if pTrace: result["pTrace"] = paramTrace + if eTrace: result["eTrace"] = errorTrace return result def demoScioptPowell(): + """Demonstration of Powell's method for gradient-free optimization. + """ rosen = tests.Rosen(optimFunc=sciopt, method='Powell', verbose=True, options={'maxfev': 1000}) rosen.plot() def demoScioptBFGS(): + """Demonstration of BFGS optimization. + """ rosen = tests.Rosen(optimFunc=sciopt, method='BFGS', verbose=True) rosen.plot() def demoScioptCG(): + """Demonstration of conjugate gradient optimization with line search. + """ rosen = tests.Rosen(optimFunc=sciopt, method='CG', verbose=True) rosen.plot() diff --git a/cebl/ml/optim/sgd.py b/cebl/ml/optim/sgd.py index 26aac54..27273d4 100644 --- a/cebl/ml/optim/sgd.py +++ b/cebl/ml/optim/sgd.py @@ -119,14 +119,16 @@ def sgd(optable, x, g, batchSize=30, print(reason) # save result into a dictionary - result = dict() - result['error'] = error - result['params'] = params - result['error'] = error - result['iteration'] = iteration - result['reason'] = reason - - if pTrace: result['pTrace'] = paramTrace - if eTrace: result['eTrace'] = errorTrace + result = { + "error": error, + "params": params, + "error": error, + "iteration": iteration, + "reason": reason + } + + # pylint: disable=multiple-statements + if pTrace: result["pTrace"] = paramTrace + if eTrace: result["eTrace"] = errorTrace return result diff --git a/cebl/ml/optim/steepest.py b/cebl/ml/optim/steepest.py index d071424..0407a3a 100644 --- a/cebl/ml/optim/steepest.py +++ b/cebl/ml/optim/steepest.py @@ -148,15 +148,17 @@ def steepest(optable, print(reason) # save result into a dictionary - result = dict() - result['error'] = error - result['params'] = params - result['error'] = error - result['iteration'] = iteration - result['reason'] = reason - - if pTrace: result['pTrace'] = paramTrace - if eTrace: result['eTrace'] = errorTrace + result = { + "error": error, + "params": params, + "error": error, + "iteration": iteration, + "reason": reason + } + + # pylint: disable=multiple-statements + if pTrace: result["pTrace"] = paramTrace + if eTrace: result["eTrace"] = errorTrace return result @@ -321,13 +323,15 @@ def steepestl(optable, stepInitial=0.1, lineSearchPrecision=1.0e-4, print(reason) # save result into a dictionary - result = dict() - result['error'] = error - result['params'] = params - result['error'] = error - result['iteration'] = iteration - result['reason'] = reason - + result = { + "error": error, + "params": params, + "error": error, + "iteration": iteration, + "reason": reason + } + + # pylint: disable=multiple-statements if pTrace: result['pTrace'] = paramTrace if eTrace: result['eTrace'] = errorTrace diff --git a/cebl/ml/strans/__init__.py b/cebl/ml/strans/__init__.py index 813af5d..98352e5 100644 --- a/cebl/ml/strans/__init__.py +++ b/cebl/ml/strans/__init__.py @@ -1,6 +1,5 @@ -"""Linear spatial transformations. +"""Linear signal transforms. """ - from .csp import * from .ica import * from .msf import * diff --git a/cebl/ml/strans/pca.py b/cebl/ml/strans/pca.py index 9b61a26..6bf1f00 100644 --- a/cebl/ml/strans/pca.py +++ b/cebl/ml/strans/pca.py @@ -1,6 +1,5 @@ """Principal Components Analysis. """ - import matplotlib.pyplot as plt import numpy as np import scipy.signal as spsig @@ -11,11 +10,25 @@ class PrincipalComponentsAnalysis(STrans): + """Principal components analysis signal transform. + """ def __init__(self, s, *args, **kwargs): + """Construct a new principal components analysis signal transform. + + Args: + x: A numpy array of shape (nObs, nDim) containing the + values of the training signal. + + *args: Additional arguments are passed to STrans. + **kwargs: + """ STrans.__init__(self, s, *args, **kwargs) self.train(s) def train(self, s): + """Train a new principal components analysis signal transform. + Called during class construction. + """ s = self.prep(s) if s.shape[0] >= s.shape[1]: @@ -33,9 +46,13 @@ def train(self, s): self.wInv[...] = v * d def getMags(self): + """Get the magnitudes of each component. + """ return self.mags def plotMags(self, standardize=True, ax=None, **kwargs): + """Plot the magnitudes of the principal components. + """ result = {} if ax is None: fig = plt.figure() @@ -52,9 +69,13 @@ def plotMags(self, standardize=True, ax=None, **kwargs): return result class PCA(PrincipalComponentsAnalysis): + """Alias for PrincipalComponentsAnalysis + """ pass def demoPCA(): + """Demonstration of PCA. + """ n = 1000 t = np.linspace(0.0, 30*np.pi, n) @@ -67,7 +88,7 @@ def demoPCA(): theta1 = np.pi/6.0 rot1 = np.array([[np.cos(theta1), -np.sin(theta1), 0.0], [np.sin(theta1), np.cos(theta1), 0.0], - [0.0, 0.0, 1.0]]) + [0.0, 0.0, 1.0]]) theta2 = np.pi/4.0 rot2 = np.array([[ np.cos(theta2), 0.0, np.sin(theta2)], @@ -111,6 +132,8 @@ def demoPCA(): fig.tight_layout() def demoPCA2d(): + """Demonstrate PCA in two dimensions. + """ n = 1000 theta = np.pi/6.0 rot = np.array([[np.cos(theta), -np.sin(theta)], @@ -122,7 +145,6 @@ def demoPCA2d(): s = np.vstack((s1, s2)) pca = PCA(s) - y = pca.transform(s) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) diff --git a/cebl/ml/strans/strans.py b/cebl/ml/strans/strans.py index 54a7320..72e478c 100644 --- a/cebl/ml/strans/strans.py +++ b/cebl/ml/strans/strans.py @@ -1,3 +1,5 @@ +"""Linear signal transforms. +""" import matplotlib.pyplot as plt import numpy as np @@ -5,7 +7,7 @@ class STrans: - """Base class for linear, spatial signal transforms. + """Base class for linear signal transforms. """ def __init__(self, s, lags=0, demean=True): s = util.colmat(s) diff --git a/cebl/sig/cwt.pyx b/cebl/sig/cwt.pyx index 224bbef..d86d351 100644 --- a/cebl/sig/cwt.pyx +++ b/cebl/sig/cwt.pyx @@ -13,8 +13,10 @@ class ContinuousWaveletTransform(object): http://fieldtrip.fcdonders.nl/ @article{tallon1997oscillatory, - title={Oscillatory $\gamma$-band (30--70 Hz) activity induced by a visual search task in humans}, - author={Tallon-Baudry, Catherine and Bertrand, Olivier and Delpuech, Claude and Pernier, Jacques}, + title={Oscillatory $\gamma$-band (30--70 Hz) activity induced + by a visual search task in humans}, + author={Tallon-Baudry, Catherine and Bertrand, Olivier and Delpuech, + Claude and Pernier, Jacques}, journal={Journal of Neuroscience}, volume={17}, number={2}, @@ -24,7 +26,8 @@ class ContinuousWaveletTransform(object): } @book{addison2017illustrated, - title={The illustrated wavelet transform handbook: introductory theory and applications in science, engineering, medicine and finance}, + title={The illustrated wavelet transform handbook: introductory theory + and applications in science, engineering, medicine and finance}, author={Addison, Paul S}, year={2017}, publisher={CRC press} @@ -66,41 +69,42 @@ class ContinuousWaveletTransform(object): return (dialation * np.exp(-time**2.0/(2.0*timeScale**2.0)) * np.exp(2.0j * np.pi * freq * time)) - ''' pure python. - def apply(self, s): - ##cdef long nObs, nChan, i, padDiff, padFront, padBack - s = util.colmat(s) + ## #pure python. + ## def apply(self, s): + ## ##cdef long nObs, nChan, i, padDiff, padFront, padBack + ## s = util.colmat(s) - # number of observations and channels - nObs, nChan = s.shape + ## # number of observations and channels + ## nObs, nChan = s.shape - # empty arrays to hold power and phase information - powers = np.zeros((nObs, self.nFreq, nChan), dtype=s.dtype) - phases = np.zeros((nObs, self.nFreq, nChan), dtype=s.dtype) + ## # empty arrays to hold power and phase information + ## powers = np.zeros((nObs, self.nFreq, nChan), dtype=s.dtype) + ## phases = np.zeros((nObs, self.nFreq, nChan), dtype=s.dtype) - for i,wlet in enumerate(self.wavelets): - conv = np.apply_along_axis(lambda d: - np.convolve(d, wlet, mode='full'), - axis=0, arr=s) + ## for i,wlet in enumerate(self.wavelets): + ## conv = np.apply_along_axis(lambda d: + ## np.convolve(d, wlet, mode='full'), + ## axis=0, arr=s) - padDiff = (conv.shape[0] - s.shape[0]) - padFront = padDiff // 2 - padBack = padDiff - padFront - conv = conv[padFront:-padBack,:] + ## padDiff = (conv.shape[0] - s.shape[0]) + ## padFront = padDiff // 2 + ## padBack = padDiff - padFront + ## conv = conv[padFront:-padBack,:] - ##conv = np.apply_along_axis(lambda d: - ## spsig.fftconvolve(d, wlet, mode='same'), - ## axis=0, arr=s) + ## ##conv = np.apply_along_axis(lambda d: + ## ## spsig.fftconvolve(d, wlet, mode='same'), + ## ## axis=0, arr=s) - powers[:,i,:] = 2.0*np.abs(conv)**2 / \ - np.sum(np.abs(wlet))**2 - phases[:,i,:] = np.angle(conv) + ## powers[:,i,:] = 2.0*np.abs(conv)**2 / \ + ## np.sum(np.abs(wlet))**2 + ## phases[:,i,:] = np.angle(conv) - powers /= self.sampRate + ## powers /= self.sampRate + + ## return self.freqs, powers, phases - return self.freqs, powers, phases - ''' def apply(self, s): + # XXX: need to measure if this is really any faster in cython - idfah cdef long nObs, nChan, nFreq, i, j, padDiff, padFront, padBack s = util.colmat(s) diff --git a/cebl/util/clsm.py b/cebl/util/clsm.py index 323ae55..1eeb72e 100644 --- a/cebl/util/clsm.py +++ b/cebl/util/clsm.py @@ -40,7 +40,6 @@ def roc(classProbs): def auc(classProbs): """Area under the roc curve. """ - if len(classProbs) > 2: raise RuntimeError('auc is only implemented for two-class problems.') @@ -63,7 +62,7 @@ def auc(classProbs): return score / float(denom) def bca(classLabels): - """Balanced classification accuracy + """Balanced classification accuracy. """ con = confusion(classLabels, normalize=False) return np.mean(np.diag(con) / np.sum(con, axis=0)) diff --git a/requirements.txt b/requirements.txt index 5d92924..de69272 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ -matplotlib -munch -numpy -pylibftdi -scipy -serial -wxpython +matplotlib==2.0 +munch==2.2 +numpy==1.13 +pylibftdi==0.17 +pyserial==3.4 +scipy==1.0 +wxPython==4.0 From 58094ff4518796192d9568b5cb6e6d759ce5dfd9 Mon Sep 17 00:00:00 2001 From: Elliott Forney Date: Tue, 12 Mar 2019 00:07:15 -0600 Subject: [PATCH 8/9] style: single quotes to double around strings --- cebl/ml/arc.py | 138 +++++++++--------- cebl/ml/autoreg.py | 156 ++++++++++----------- cebl/ml/classifier.py | 18 +-- cebl/ml/da.py | 88 ++++++------ cebl/ml/ensemble.py | 30 ++-- cebl/ml/knn.py | 48 +++---- cebl/ml/linreg.py | 26 ++-- cebl/ml/logreg.py | 48 +++---- cebl/ml/nnet/__init__.py | 1 - cebl/ml/nnet/conv.py | 90 ++++++------ cebl/ml/nnet/convac.py | 124 ++++++++--------- cebl/ml/nnet/convreg.py | 34 ++--- cebl/ml/nnet/elman.py | 42 +++--- cebl/ml/nnet/esn.py | 104 +++++++------- cebl/ml/nnet/forward.py | 92 ++++++------ cebl/ml/nnet/multielman.py | 32 ++--- cebl/ml/nnet/softmax.py | 48 +++---- cebl/ml/nnet/transfer.py | 10 +- cebl/ml/optim/alopex.py | 60 ++++---- cebl/ml/optim/alopexas.py | 126 ++++++++--------- cebl/ml/optim/minibatch.py | 36 ++--- cebl/ml/optim/optable.py | 8 +- cebl/ml/optim/pso.py | 28 ++-- cebl/ml/optim/rprop.py | 28 ++-- cebl/ml/optim/sciopt.py | 24 ++-- cebl/ml/optim/sgd.py | 12 +- cebl/ml/optim/steepest.py | 32 ++--- cebl/ml/optim/tests.py | 44 +++--- cebl/ml/paraminit.py | 2 +- cebl/ml/part.py | 2 +- cebl/ml/regression.py | 4 +- cebl/ml/som.py | 12 +- cebl/ml/stand.py | 10 +- cebl/ml/strans/csp.py | 10 +- cebl/ml/strans/ica.py | 30 ++-- cebl/ml/strans/msf.py | 10 +- cebl/ml/strans/pca.py | 20 +-- cebl/ml/strans/strans.py | 4 +- cebl/sig/__init__.py | 1 - cebl/sig/bandpass.py | 278 ++++++++++++++++++------------------- cebl/sig/psd.py | 70 +++++----- cebl/sig/resamp.py | 86 ++++++------ cebl/sig/smooth.py | 22 +-- cebl/sig/spatial.py | 12 +- cebl/sig/specgram.py | 56 ++++---- cebl/sig/stat.py | 2 +- cebl/util/__init__.py | 2 +- cebl/util/arr.py | 2 +- cebl/util/cache.py | 34 ++--- cebl/util/clsm.py | 8 +- cebl/util/comp.py | 10 +- cebl/util/embed.py | 10 +- cebl/util/errm.py | 2 +- cebl/util/parallel.py | 2 +- cebl/util/shuffle.py | 4 +- 55 files changed, 1115 insertions(+), 1117 deletions(-) diff --git a/cebl/ml/arc.py b/cebl/ml/arc.py index 6bca30d..bfb650c 100644 --- a/cebl/ml/arc.py +++ b/cebl/ml/arc.py @@ -27,7 +27,7 @@ def train(self, classData, **autoRegKwargs): [self.autoRegClass(ss, **autoRegKwargs) for ss in classData] trainErrors = [self.modelErrors(ss) for ss in classData] - self.errorModel = KNN(trainErrors, k=self.k, distMetric='cosine') + self.errorModel = KNN(trainErrors, k=self.k, distMetric="cosine") def modelErrors(self, ss, *args, **kwargs): errors = [] @@ -43,7 +43,7 @@ class ARCC(AutoRegressiveClassifierCosine): pass class AutoRegressiveClassifier3(Classifier): - def __init__(self, classData, center='none', + def __init__(self, classData, center="none", autoRegClass=AutoRegression, **autoRegKwargs): # initialize Classifier base class Classifier.__init__(self, util.segmat(classData[0]).shape[2], len(classData)) @@ -61,17 +61,17 @@ def train(self, classData, center, **autoRegKwargs): center = center.lower() - if center == 'none': + if center == "none": pass - elif center == 'mean': + elif center == "mean": self.center = np.mean([np.mean(te, axis=0) for te in trainErrors], axis=0) - elif center == 'median': + elif center == "median": self.center = np.mean([np.median(te, axis=0) for te in trainErrors], axis=0) else: - raise RuntimeError('Invalid center method %s.' % str(center)) + raise RuntimeError("Invalid center method %s." % str(center)) def modelErrors(self, ss, *args, **kwargs): errors = [np.vstack([mdl.rmse(s, *args, **kwargs) for s in ss]) for mdl in self.models] @@ -156,30 +156,30 @@ def demoARC(): model = ARC(trainData, order=order) - print('Training Performance:') - print('=======') - print('Labels: ', model.labelKnown(trainData)) - print('CA: ', model.ca(trainData)) - print('BCA: ', model.bca(trainData)) - print('AUC: ', model.auc(trainData)) + print("Training Performance:") + print("=======") + print("Labels: ", model.labelKnown(trainData)) + print("CA: ", model.ca(trainData)) + print("BCA: ", model.bca(trainData)) + print("AUC: ", model.auc(trainData)) print() - print('Test Performance:') - print('=======') - print('Labels: ', model.labelKnown(testData)) - print('CA: ', model.ca(testData)) - print('BCA: ', model.bca(testData)) - print('AUC: ', model.auc(testData)) + print("Test Performance:") + print("=======") + print("Labels: ", model.labelKnown(testData)) + print("CA: ", model.ca(testData)) + print("BCA: ", model.bca(testData)) + print("AUC: ", model.auc(testData)) print() fig = plt.figure(figsize=(20,6)) axSigs = fig.add_subplot(1, 3, 1) - axSigs.plot(x, trainData[0][0].T, color='blue', linewidth=2, label=r'$\mathbf{sin}(x)$') - axSigs.plot(x, trainData[0].T, color='blue', alpha=0.1, linewidth=2) - axSigs.plot(x, 3.0+trainData[1][0].T, color='red', linewidth=2, label=r'$\mathbf{sin}(2x)$') - axSigs.plot(x, 3.0+trainData[1].T, color='red', alpha=0.1, linewidth=2) - axSigs.set_title('Noisy Sinusoids with Random Phase Shifts') - axSigs.set_xlabel('Time') - axSigs.set_ylabel('Signal') + axSigs.plot(x, trainData[0][0].T, color="blue", linewidth=2, label=r"$\mathbf{sin}(x)$") + axSigs.plot(x, trainData[0].T, color="blue", alpha=0.1, linewidth=2) + axSigs.plot(x, 3.0+trainData[1][0].T, color="red", linewidth=2, label=r"$\mathbf{sin}(2x)$") + axSigs.plot(x, 3.0+trainData[1].T, color="red", alpha=0.1, linewidth=2) + axSigs.set_title("Noisy Sinusoids with Random Phase Shifts") + axSigs.set_xlabel("Time") + axSigs.set_ylabel("Signal") axSigs.legend() axSigs.autoscale(tight=True) @@ -194,33 +194,33 @@ def demoARC(): axTrainErrs = fig.add_subplot(1, 3, 2) #axTrainErrs = fig.add_subplot(1, 2, 1) - axTrainErrs.scatter(trainErrors[0][:,0], trainErrors[0][:,1], color='blue') - axTrainErrs.scatter(trainErrors[1][:,0], trainErrors[1][:,1], color='red') - axTrainErrs.set_title('Training Relative Modeling Errors') - axTrainErrs.set_xlabel(r'$\mathbf{sin}(x)$ model error') - axTrainErrs.set_ylabel(r'$\mathbf{sin}(2x)$ model error') + axTrainErrs.scatter(trainErrors[0][:,0], trainErrors[0][:,1], color="blue") + axTrainErrs.scatter(trainErrors[1][:,0], trainErrors[1][:,1], color="red") + axTrainErrs.set_title("Training Relative Modeling Errors") + axTrainErrs.set_xlabel(r"$\mathbf{sin}(x)$ model error") + axTrainErrs.set_ylabel(r"$\mathbf{sin}(2x)$ model error") allTrainErrs = np.vstack(trainErrors) mn = allTrainErrs.min() mx = allTrainErrs.max() - axTrainErrs.plot((mn,mx), (mn,mx), color='grey', linestyle='-.') + axTrainErrs.plot((mn,mx), (mn,mx), color="grey", linestyle="-.") axTrainErrs.grid() axTrainErrs.autoscale(tight=True) axTestErrs = fig.add_subplot(1, 3, 3) #axTestErrs = fig.add_subplot(1, 2, 2) - axTestErrs.scatter(testErrors[0][:,0], testErrors[0][:,1], color='blue') - axTestErrs.scatter(testErrors[1][:,0], testErrors[1][:,1], color='red') - axTestErrs.set_title('Testing Relative Modeling Errors') - axTestErrs.set_xlabel(r'$\mathbf{sin}(x)$ model error') - axTestErrs.set_ylabel(r'$\mathbf{sin}(2x)$ model error') + axTestErrs.scatter(testErrors[0][:,0], testErrors[0][:,1], color="blue") + axTestErrs.scatter(testErrors[1][:,0], testErrors[1][:,1], color="red") + axTestErrs.set_title("Testing Relative Modeling Errors") + axTestErrs.set_xlabel(r"$\mathbf{sin}(x)$ model error") + axTestErrs.set_ylabel(r"$\mathbf{sin}(2x)$ model error") allTestErrs = np.vstack(testErrors) mn = allTestErrs.min() mx = allTestErrs.max() - axTestErrs.plot((mn,mx), (mn,mx), color='grey', linestyle='-.') + axTestErrs.plot((mn,mx), (mn,mx), color="grey", linestyle="-.") axTestErrs.grid() axTestErrs.autoscale(tight=True) @@ -272,30 +272,30 @@ def demoRARC(): model = RARC(trainData, nRes=512) - print('Training Performance:') - print('=======') - print('Labels: ', model.labelKnown(trainData)) - print('CA: ', model.ca(trainData)) - print('BCA: ', model.bca(trainData)) - print('AUC: ', model.auc(trainData)) + print("Training Performance:") + print("=======") + print("Labels: ", model.labelKnown(trainData)) + print("CA: ", model.ca(trainData)) + print("BCA: ", model.bca(trainData)) + print("AUC: ", model.auc(trainData)) print() - print('Test Performance:') - print('=======') - print('Labels: ', model.labelKnown(testData)) - print('CA: ', model.ca(testData)) - print('BCA: ', model.bca(testData)) - print('AUC: ', model.auc(testData)) + print("Test Performance:") + print("=======") + print("Labels: ", model.labelKnown(testData)) + print("CA: ", model.ca(testData)) + print("BCA: ", model.bca(testData)) + print("AUC: ", model.auc(testData)) print() fig = plt.figure(figsize=(20, 6)) axSigs = fig.add_subplot(1, 3, 1) - axSigs.plot(x, trainData[0][0].T, color='blue', linewidth=2, label=r'$\mathbf{sin}(x)$') - axSigs.plot(x, trainData[0].T, color='blue', alpha=0.1, linewidth=2) - axSigs.plot(x, 3.0+trainData[1][0].T, color='red', linewidth=2, label=r'$\mathbf{sin}(2x)$') - axSigs.plot(x, 3.0+trainData[1].T, color='red', alpha=0.1, linewidth=2) - axSigs.set_title('Noisy Sinusoids with Random Phase Shifts') - axSigs.set_xlabel('Time') - axSigs.set_ylabel('Signal') + axSigs.plot(x, trainData[0][0].T, color="blue", linewidth=2, label=r"$\mathbf{sin}(x)$") + axSigs.plot(x, trainData[0].T, color="blue", alpha=0.1, linewidth=2) + axSigs.plot(x, 3.0+trainData[1][0].T, color="red", linewidth=2, label=r"$\mathbf{sin}(2x)$") + axSigs.plot(x, 3.0+trainData[1].T, color="red", alpha=0.1, linewidth=2) + axSigs.set_title("Noisy Sinusoids with Random Phase Shifts") + axSigs.set_xlabel("Time") + axSigs.set_ylabel("Signal") axSigs.legend() axSigs.autoscale(tight=True) @@ -310,39 +310,39 @@ def demoRARC(): axTrainErrs = fig.add_subplot(1, 3, 2) #axTrainErrs = fig.add_subplot(1, 2, 1) - axTrainErrs.scatter(trainErrors[0][:,0], trainErrors[0][:,1], color='blue') - axTrainErrs.scatter(trainErrors[1][:,0], trainErrors[1][:,1], color='red') - axTrainErrs.set_title('Training Relative Modeling Errors') - axTrainErrs.set_xlabel(r'$\mathbf{sin}(x)$ model error') - axTrainErrs.set_ylabel(r'$\mathbf{sin}(2x)$ model error') + axTrainErrs.scatter(trainErrors[0][:,0], trainErrors[0][:,1], color="blue") + axTrainErrs.scatter(trainErrors[1][:,0], trainErrors[1][:,1], color="red") + axTrainErrs.set_title("Training Relative Modeling Errors") + axTrainErrs.set_xlabel(r"$\mathbf{sin}(x)$ model error") + axTrainErrs.set_ylabel(r"$\mathbf{sin}(2x)$ model error") allTrainErrs = np.vstack(trainErrors) mn = allTrainErrs.min() mx = allTrainErrs.max() - axTrainErrs.plot((mn,mx), (mn,mx), color='grey', linestyle='-.') + axTrainErrs.plot((mn,mx), (mn,mx), color="grey", linestyle="-.") axTrainErrs.grid() axTrainErrs.autoscale(tight=True) axTestErrs = fig.add_subplot(1, 3, 3) #axTestErrs = fig.add_subplot(1, 2, 2) - axTestErrs.scatter(testErrors[0][:,0], testErrors[0][:,1], color='blue') - axTestErrs.scatter(testErrors[1][:,0], testErrors[1][:,1], color='red') - axTestErrs.set_title('Testing Relative Modeling Errors') - axTestErrs.set_xlabel(r'$\mathbf{sin}(x)$ model error') - axTestErrs.set_ylabel(r'$\mathbf{sin}(2x)$ model error') + axTestErrs.scatter(testErrors[0][:,0], testErrors[0][:,1], color="blue") + axTestErrs.scatter(testErrors[1][:,0], testErrors[1][:,1], color="red") + axTestErrs.set_title("Testing Relative Modeling Errors") + axTestErrs.set_xlabel(r"$\mathbf{sin}(x)$ model error") + axTestErrs.set_ylabel(r"$\mathbf{sin}(2x)$ model error") allTestErrs = np.vstack(testErrors) mn = allTestErrs.min() mx = allTestErrs.max() - axTestErrs.plot((mn,mx), (mn,mx), color='grey', linestyle='-.') + axTestErrs.plot((mn,mx), (mn,mx), color="grey", linestyle="-.") axTestErrs.grid() axTestErrs.autoscale(tight=True) fig.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoARC() plt.show() diff --git a/cebl/ml/autoreg.py b/cebl/ml/autoreg.py index ce5090a..115e134 100644 --- a/cebl/ml/autoreg.py +++ b/cebl/ml/autoreg.py @@ -22,16 +22,16 @@ def __init__(self, ss, horizon, regClass, *args, **kwargs): self.train(ss, *args, **kwargs) def getInputs(self, ss): - raise NotImplementedError('getInputs not implemented.') + raise NotImplementedError("getInputs not implemented.") def getTargets(self, ss): - raise NotImplementedError('getTargets not implemented.') + raise NotImplementedError("getTargets not implemented.") def train(self, ss, *args, **kwargs): - raise NotImplementedError('train not implemented.') + raise NotImplementedError("train not implemented.") def eval(self, ss, *args, returnResid=False, **kwargs): - raise NotImplementedError('eval not implemented.') + raise NotImplementedError("eval not implemented.") def resid(self, ss, *args, **kwargs): pred, resid = self.eval(ss, *args, returnResid=True, **kwargs) @@ -111,8 +111,8 @@ def demoAutoRegressionSine(): pred, resid = arFit.eval((s,), returnResid=True) - plt.plot(time, s, color='blue') - plt.plot(time[order:], pred[0], color='red') + plt.plot(time, s, color="blue") + plt.plot(time[order:], pred[0], color="red") def demoAutoRegressionMulti(): time = np.linspace(0.0, 10.0*np.pi, 5000) @@ -156,59 +156,59 @@ def demoAutoRegressionMulti(): fig = plt.figure(figsize=(19, 8)) axTrainPred = fig.add_subplot(2, 3, 1) - axTrainPred.plot(timeTrain, dataTrain[0]-sepTrain, color='gray', linewidth=2) + axTrainPred.plot(timeTrain, dataTrain[0]-sepTrain, color="gray", linewidth=2) axTrainPred.plot(timeTrain[order:], predTrain[0]-sepTrain, linewidth=1) axTrainPred.autoscale(tight=True) - axTrainPred.set_title('Train Predictions') - axTrainPred.set_xlabel('Time') + axTrainPred.set_title("Train Predictions") + axTrainPred.set_xlabel("Time") axTrainPred.set_yticks(-sepTrain) - axTrainPred.set_yticklabels(['s1', 's2', 's3']) + axTrainPred.set_yticklabels(["s1", "s2", "s3"]) axTestPred = fig.add_subplot(2, 3, 2) - axTestPred.plot(timeTest, dataTest[0]-sepTest, color='gray', linewidth=2) + axTestPred.plot(timeTest, dataTest[0]-sepTest, color="gray", linewidth=2) axTestPred.plot(timeTest[order:], predTest[0]-sepTest, linewidth=1) axTestPred.autoscale(tight=True) - axTestPred.set_title('Test Predictions') - axTestPred.set_xlabel('Time') + axTestPred.set_title("Test Predictions") + axTestPred.set_xlabel("Time") axTestPred.set_yticks(-sepTrain) - axTestPred.set_yticklabels(['s1', 's2', 's3']) + axTestPred.set_yticklabels(["s1", "s2", "s3"]) axWeights = fig.add_subplot(2, 3, 3) - img = axWeights.imshow(arFit.model.weights, aspect='auto', interpolation='none') + img = axWeights.imshow(arFit.model.weights, aspect="auto", interpolation="none") cbar = plt.colorbar(img) - cbar.set_label('Weight') - axWeights.set_title('Model Weights') - axWeights.set_xlabel('Output') - axWeights.set_ylabel('Input') + cbar.set_label("Weight") + axWeights.set_title("Model Weights") + axWeights.set_xlabel("Output") + axWeights.set_ylabel("Input") axWeights.set_xticks(range(arFit.model.weights.shape[1])) - axWeights.set_xticklabels(['s1', 's2', 's3']) + axWeights.set_xticklabels(["s1", "s2", "s3"]) axWeights.set_yticks(range(arFit.model.weights.shape[0])) - axWeights.set_yticklabels(list(range(1, arFit.model.weights.shape[0]) + ['bias'])) + axWeights.set_yticklabels(list(range(1, arFit.model.weights.shape[0]) + ["bias"])) axWeights.autoscale(tight=True) axTrainResid = fig.add_subplot(2, 3, 4) axTrainResid.plot(timeTrain[order:], residTrain[0]-sepTrain) axTrainResid.autoscale(tight=True) - axTrainResid.set_title('Train Residuals') - axTrainResid.set_xlabel('Time') + axTrainResid.set_title("Train Residuals") + axTrainResid.set_xlabel("Time") axTrainResid.set_yticks(-sepTrain) - axTrainResid.set_yticklabels(['s1', 's2', 's3']) + axTrainResid.set_yticklabels(["s1", "s2", "s3"]) axTestResid = fig.add_subplot(2, 3, 5) axTestResid.plot(timeTest[order:], residTest[0]-sepTest) axTestResid.autoscale(tight=True) - axTestResid.set_title('Test Residuals') - axTestResid.set_xlabel('Time') + axTestResid.set_title("Test Residuals") + axTestResid.set_xlabel("Time") axTestResid.set_yticks(-sepTrain) - axTestResid.set_yticklabels(['s1', 's2', 's3']) + axTestResid.set_yticklabels(["s1", "s2", "s3"]) axTestResidDist = fig.add_subplot(2, 3, 6) - #axTestResidDist.hist(residTest, histtype='stepfilled', normed=True) + #axTestResidDist.hist(residTest, histtype="stepfilled", normed=True) axTestResidDist.hist(residTest[0], stacked=True, normed=True) - axTestResidDist.legend(['s1', 's2', 's3']) - axTestResidDist.set_title('Test Residual Distribution') - axTestResidDist.set_xlabel('Residual') - axTestResidDist.set_ylabel('Density') + axTestResidDist.legend(["s1", "s2", "s3"]) + axTestResidDist.set_title("Test Residual Distribution") + axTestResidDist.set_xlabel("Residual") + axTestResidDist.set_ylabel("Density") fig.tight_layout() @@ -299,59 +299,59 @@ def demoAutoRegressionUni(): fig = plt.figure(figsize=(19, 8)) axTrainPred = fig.add_subplot(2, 3, 1) - axTrainPred.plot(timeTrain, dataTrain[0]-sepTrain, color='gray', linewidth=2) + axTrainPred.plot(timeTrain, dataTrain[0]-sepTrain, color="gray", linewidth=2) axTrainPred.plot(timeTrain[order:], predTrain[0]-sepTrain, linewidth=1) axTrainPred.autoscale(tight=True) - axTrainPred.set_title('Train Predictions') - axTrainPred.set_xlabel('Time') + axTrainPred.set_title("Train Predictions") + axTrainPred.set_xlabel("Time") axTrainPred.set_yticks(-sepTrain) - axTrainPred.set_yticklabels(['s1', 's2', 's3']) + axTrainPred.set_yticklabels(["s1", "s2", "s3"]) axTestPred = fig.add_subplot(2, 3, 2) - axTestPred.plot(timeTest, dataTest[0]-sepTest, color='gray', linewidth=2) + axTestPred.plot(timeTest, dataTest[0]-sepTest, color="gray", linewidth=2) axTestPred.plot(timeTest[order:], predTest[0]-sepTest, linewidth=1) axTestPred.autoscale(tight=True) - axTestPred.set_title('Test Predictions') - axTestPred.set_xlabel('Time') + axTestPred.set_title("Test Predictions") + axTestPred.set_xlabel("Time") axTestPred.set_yticks(-sepTrain) - axTestPred.set_yticklabels(['s1', 's2', 's3']) + axTestPred.set_yticklabels(["s1", "s2", "s3"]) axWeights = fig.add_subplot(2, 3, 3) - #img = axWeights.imshow(arFit.model.weights, aspect='auto', interpolation='none') + #img = axWeights.imshow(arFit.model.weights, aspect="auto", interpolation="none") #cbar = plt.colorbar(img) - #cbar.set_label('Weight') - #axWeights.set_title('Model Weights') - #axWeights.set_xlabel('Output') - #axWeights.set_ylabel('Input') + #cbar.set_label("Weight") + #axWeights.set_title("Model Weights") + #axWeights.set_xlabel("Output") + #axWeights.set_ylabel("Input") #axWeights.set_xticks(range(arFit.model.weights.shape[1])) - #axWeights.set_xticklabels(['s1', 's2', 's3']) + #axWeights.set_xticklabels(["s1", "s2", "s3"]) #axWeights.set_yticks(range(arFit.model.weights.shape[0])) - #axWeights.set_yticklabels(list(range(1, arFit.model.weights.shape[0]) + ['bias'])) + #axWeights.set_yticklabels(list(range(1, arFit.model.weights.shape[0]) + ["bias"])) #axWeights.autoscale(tight=True) axTrainResid = fig.add_subplot(2, 3, 4) axTrainResid.plot(timeTrain[order:], residTrain[0]-sepTrain) axTrainResid.autoscale(tight=True) - axTrainResid.set_title('Train Residuals') - axTrainResid.set_xlabel('Time') + axTrainResid.set_title("Train Residuals") + axTrainResid.set_xlabel("Time") axTrainResid.set_yticks(-sepTrain) - axTrainResid.set_yticklabels(['s1', 's2', 's3']) + axTrainResid.set_yticklabels(["s1", "s2", "s3"]) axTestResid = fig.add_subplot(2, 3, 5) axTestResid.plot(timeTest[order:], residTest[0]-sepTest) axTestResid.autoscale(tight=True) - axTestResid.set_title('Test Residuals') - axTestResid.set_xlabel('Time') + axTestResid.set_title("Test Residuals") + axTestResid.set_xlabel("Time") axTestResid.set_yticks(-sepTrain) - axTestResid.set_yticklabels(['s1', 's2', 's3']) + axTestResid.set_yticklabels(["s1", "s2", "s3"]) axTestResidDist = fig.add_subplot(2, 3, 6) - #axTestResidDist.hist(residTest, histtype='stepfilled', normed=True) + #axTestResidDist.hist(residTest, histtype="stepfilled", normed=True) axTestResidDist.hist(residTest[0], stacked=True, normed=True) - axTestResidDist.legend(['s1', 's2', 's3']) - axTestResidDist.set_title('Test Residual Distribution') - axTestResidDist.set_xlabel('Residual') - axTestResidDist.set_ylabel('Density') + axTestResidDist.legend(["s1", "s2", "s3"]) + axTestResidDist.set_title("Test Residual Distribution") + axTestResidDist.set_xlabel("Residual") + axTestResidDist.set_ylabel("Density") fig.tight_layout() @@ -433,22 +433,22 @@ def demoRecurrentAutoRegression(): fig = plt.figure(figsize=(19, 8)) axTrainPred = fig.add_subplot(2, 3, 1) - axTrainPred.plot(timeTrain, dataTrain[0]-sepTrain, color='gray', linewidth=2) + axTrainPred.plot(timeTrain, dataTrain[0]-sepTrain, color="gray", linewidth=2) axTrainPred.plot(timeTrain[horizon:], predTrain[0]-sepTrain, linewidth=1) axTrainPred.autoscale(tight=True) - axTrainPred.set_title('Train Predictions') - axTrainPred.set_xlabel('Time') + axTrainPred.set_title("Train Predictions") + axTrainPred.set_xlabel("Time") axTrainPred.set_yticks(-sepTrain) - axTrainPred.set_yticklabels(['s1', 's2', 's3']) + axTrainPred.set_yticklabels(["s1", "s2", "s3"]) axTestPred = fig.add_subplot(2, 3, 2) - axTestPred.plot(timeTest, dataTest[0]-sepTest, color='gray', linewidth=2) + axTestPred.plot(timeTest, dataTest[0]-sepTest, color="gray", linewidth=2) axTestPred.plot(timeTest[horizon:], predTest[0]-sepTest, linewidth=1) axTestPred.autoscale(tight=True) - axTestPred.set_title('Test Predictions') - axTestPred.set_xlabel('Time') + axTestPred.set_title("Test Predictions") + axTestPred.set_xlabel("Time") axTestPred.set_yticks(-sepTrain) - axTestPred.set_yticklabels(['s1', 's2', 's3']) + axTestPred.set_yticklabels(["s1", "s2", "s3"]) axWeights = fig.add_subplot(2, 3, 3) rarFit.model.reservoir.plotActDensity(dataTest, ax=axWeights) @@ -456,31 +456,31 @@ def demoRecurrentAutoRegression(): axTrainResid = fig.add_subplot(2, 3, 4) axTrainResid.plot(timeTrain[horizon:], residTrain[0]-sepTrain) axTrainResid.autoscale(tight=True) - axTrainResid.set_title('Train Residuals') - axTrainResid.set_xlabel('Time') + axTrainResid.set_title("Train Residuals") + axTrainResid.set_xlabel("Time") axTrainResid.set_yticks(-sepTrain) - axTrainResid.set_yticklabels(['s1', 's2', 's3']) + axTrainResid.set_yticklabels(["s1", "s2", "s3"]) axTestResid = fig.add_subplot(2, 3, 5) axTestResid.plot(timeTest[horizon:], residTest[0]-sepTest) axTestResid.autoscale(tight=True) - axTestResid.set_title('Test Residuals') - axTestResid.set_xlabel('Time') + axTestResid.set_title("Test Residuals") + axTestResid.set_xlabel("Time") axTestResid.set_yticks(-sepTrain) - axTestResid.set_yticklabels(['s1', 's2', 's3']) + axTestResid.set_yticklabels(["s1", "s2", "s3"]) axTestResidDist = fig.add_subplot(2, 3, 6) - #axTestResidDist.hist(residTest, histtype='stepfilled', normed=True) + #axTestResidDist.hist(residTest, histtype="stepfilled", normed=True) axTestResidDist.hist(residTest[0], stacked=True, normed=True) - axTestResidDist.legend(['s1', 's2', 's3']) - axTestResidDist.set_title('Test Residual Distribution') - axTestResidDist.set_xlabel('Residual') - axTestResidDist.set_ylabel('Density') + axTestResidDist.legend(["s1", "s2", "s3"]) + axTestResidDist.set_title("Test Residual Distribution") + axTestResidDist.set_xlabel("Residual") + axTestResidDist.set_ylabel("Density") fig.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoAutoRegressionSine() demoAutoRegressionMulti() #demoAutoRegressionUni() diff --git a/cebl/ml/classifier.py b/cebl/ml/classifier.py index 2edf2bc..8e46130 100644 --- a/cebl/ml/classifier.py +++ b/cebl/ml/classifier.py @@ -30,7 +30,7 @@ def train(self): classData: Training data as a list of numpy arrays with shape (nCls,nObs[,nIn]). """ - raise NotImplementedError('train not implemented.') + raise NotImplementedError("train not implemented.") def discrim(self, x, *args, **kwargs): """Discriminant function for a classifier. This method is used to @@ -70,17 +70,17 @@ def probs(self, x): Notes: If a classifier does not follow a probabilistic model, - it can simple return 1's for positive labels and zeros + it can simple return 1"s for positive labels and zeros everywhere else. """ - raise NotImplementedError('probs not implemented.') + raise NotImplementedError("probs not implemented.") def probsKnown(self, classData, *args, **kwargs): """Assign probabilities to data with known class membership. """ return [self.probs(cls, *args, **kwargs) for cls in classData] - def label(self, x, *args, method='single', **kwargs): + def label(self, x, *args, method="single", **kwargs): """Assign class labels to novel data. Args: @@ -96,16 +96,16 @@ def label(self, x, *args, method='single', **kwargs): method = method.lower() - if method == 'single': + if method == "single": return self.labelSingle(x, *args, **kwargs) - elif method == 'vote': + elif method == "vote": return self.labelVote(x, *args, **kwargs) - elif method == 'intersect': + elif method == "intersect": return self.labelIntersect(x, *args, **kwargs) - elif method == 'union': + elif method == "union": return self.labelUnion(x, *args, **kwargs) else: - raise RuntimeError('Unknown method.') + raise RuntimeError("Unknown method.") def labelSingle(self, x, *args, **kwargs): dv = self.discrim(x, *args, **kwargs) diff --git a/cebl/ml/da.py b/cebl/ml/da.py index 8ef0170..dc33101 100644 --- a/cebl/ml/da.py +++ b/cebl/ml/da.py @@ -108,7 +108,7 @@ def train(self, classData): #try: # cvi = np.linalg.inv(cv) #except np.linalg.LinAlgError: - # raise RuntimeError('Failed to invert covariance matrix, consider using shrinkage.') + # raise RuntimeError("Failed to invert covariance matrix, consider using shrinkage.") try: # pylint: disable=no-member @@ -116,14 +116,14 @@ def train(self, classData): except Exception as e: raise RuntimeError( - 'Pseudo inversion of covariance matrix failed: ' + str(e)) + "Pseudo inversion of covariance matrix failed: " + str(e)) self.invCovs.append(cvi) sign, logDet = np.linalg.slogdet(cv) if sign == 0: raise RuntimeError( - 'Covariance matrix has zero determinant, consider using shrinkage.') + "Covariance matrix has zero determinant, consider using shrinkage.") #self.intercepts[i] = logDet - 2.0*logPriors[i] @@ -249,19 +249,19 @@ def demoQDA2d(): greenLabel = model.label(green) blueLabel = model.label(blue) - print('red labels\n-------') + print("red labels\n-------") print(redLabel) print(redLabel.shape) - print('\ngreen labels\n-------') + print("\ngreen labels\n-------") print(greenLabel) print(greenLabel.shape) - print('\nblue labels\n-------') + print("\nblue labels\n-------") print(blueLabel) print(blueLabel.shape) - print('ca: ', model.ca(data)) - print('bca: ', model.bca(data)) - print('confusion:\n', model.confusion(data)) + print("ca: ", model.ca(data)) + print("bca: ", model.bca(data)) + print("confusion:\n", model.confusion(data)) # first figure shows training data and class intersections fig = plt.figure() @@ -299,12 +299,12 @@ def demoQDA2d(): diffRG = pRed - pGreen diffRB = pRed - pBlue diffGB = pGreen - pBlue - ax.contour(x, y, diffRG, colors='black', levels=(0,)) - ax.contour(x, y, diffRB, colors='black', levels=(0,)) - ax.contour(x, y, diffGB, colors='black', levels=(0,)) + ax.contour(x, y, diffRG, colors="black", levels=(0,)) + ax.contour(x, y, diffRB, colors="black", levels=(0,)) + ax.contour(x, y, diffGB, colors="black", levels=(0,)) # second figure shows 3d plots of probability densities - ax = fig.add_subplot(2, 2, 2, projection='3d') + ax = fig.add_subplot(2, 2, 2, projection="3d") # straight class colors for suface plots color = np.reshape([dRed, dGreen, dBlue], (3, x.shape[0], x.shape[1])) @@ -323,10 +323,10 @@ def demoQDA2d(): # probability density surface surf = ax.plot_surface(x, y, dMax, facecolors=colorFlip, linewidth=0.02, shade=True) - surf.set_edgecolor('black') # add edgecolor back in, bug? + surf.set_edgecolor("black") # add edgecolor back in, bug? # third figure shows 3d plots of probabilities - ax = fig.add_subplot(2, 2, 3, projection='3d') + ax = fig.add_subplot(2, 2, 3, projection="3d") # straight class colors for suface plots color = np.reshape([pRed, pGreen, pBlue], (3, x.shape[0], x.shape[1])) @@ -345,24 +345,24 @@ def demoQDA2d(): # probability density surface surf = ax.plot_surface(x, y, pMax, facecolors=colorFlip, linewidth=0.02, shade=True) - surf.set_edgecolor('black') # add edgecolor back in, bug? + surf.set_edgecolor("black") # add edgecolor back in, bug? """ # third figure shows contours and color image of probability densities ax = fig.add_subplot(2, 2, 3) #ax.pcolor(x, y, pMax) - ax.imshow(colorFlip, origin='lower', - extent=(mn[0], mx[0], mn[1], mx[1]), aspect='auto') + ax.imshow(colorFlip, origin="lower", + extent=(mn[0], mx[0], mn[1], mx[1]), aspect="auto") # contours nLevel = 6 - cs = ax.contour(x, y, pMax, colors='black', + cs = ax.contour(x, y, pMax, colors="black", levels=np.linspace(np.min(pMax), np.max(pMax), nLevel)) cs.clabel(fontsize=6) """ # fourth figure - ax = fig.add_subplot(2, 2, 4, projection='3d') + ax = fig.add_subplot(2, 2, 4, projection="3d") labels = model.label(z) lMax = np.reshape(labels, x.shape) @@ -370,7 +370,7 @@ def demoQDA2d(): surf = ax.plot_surface(x, y, lMax, facecolors=colorFlip, linewidth=0.02)#, antialiased=False) #surf.set_edgecolor(np.vstack(color)) - surf.set_edgecolor('black') + surf.set_edgecolor("black") fig.tight_layout() @@ -448,19 +448,19 @@ def train(self, classData): #try: # self.invCov = np.linalg.inv(self.avgCov) #except np.linalg.LinAlgError: - # raise RuntimeError('Failed to invert covariance matrix, consider using shrinkage.') + # raise RuntimeError("Failed to invert covariance matrix, consider using shrinkage.") try: # pylint: disable=no-member self.invCov = sp.linalg.pinvh(self.avgCov) except Exception as e: raise RuntimeError( - 'Pseudo inversion of covariance matrix failed: ' + str(e)) + "Pseudo inversion of covariance matrix failed: " + str(e)) sign, self.logDet = np.linalg.slogdet(self.avgCov) if sign == 0: raise RuntimeError( - 'Covariance matrix has zero determinant, consider using shrinkage.') + "Covariance matrix has zero determinant, consider using shrinkage.") # model coefficients # (ndim, nCls) = (ndim, ndim) x (ndim, nCls) @@ -542,7 +542,7 @@ def probs(self, x): return dens / dens.sum(axis=1)[:,None] # XXX: hack alert - # doesn't work for QDA or other algorithms where discrim is not comparable - idfah + # doesn"t work for QDA or other algorithms where discrim is not comparable - idfah # handles ties better since probs may be equal within precision but not discrims def auc(self, classData, *args, **kwargs): return util.auc(self.discrimKnown(classData, *args, **kwargs)) @@ -585,16 +585,16 @@ def demoLDA2d(): greenLabel = model.label(green) blueLabel = model.label(blue) - print('red labels\n-------') + print("red labels\n-------") print(redLabel) - print('\ngreen labels\n-------') + print("\ngreen labels\n-------") print(greenLabel) - print('\nblue labels\n-------') + print("\nblue labels\n-------") print(blueLabel) - print('ca: ', model.ca(data)) - print('bca: ', model.bca(data)) - print('confusion:\n', model.confusion(data)) + print("ca: ", model.ca(data)) + print("bca: ", model.bca(data)) + print("confusion:\n", model.confusion(data)) # first figure shows training data and class intersections fig = plt.figure() @@ -625,9 +625,9 @@ def demoLDA2d(): diffRG = pRed - pGreen diffRB = pRed - pBlue diffGB = pGreen - pBlue - ax.contour(x, y, diffRG, colors='black', levels=(0,)) - ax.contour(x, y, diffRB, colors='black', levels=(0,)) - ax.contour(x, y, diffGB, colors='black', levels=(0,)) + ax.contour(x, y, diffRG, colors="black", levels=(0,)) + ax.contour(x, y, diffRB, colors="black", levels=(0,)) + ax.contour(x, y, diffGB, colors="black", levels=(0,)) # red, green, blue and max probability densities densities = model.dens(z) @@ -637,7 +637,7 @@ def demoLDA2d(): dMax = np.reshape(np.max(densities, axis=1), x.shape) # second figure shows 3d plots of probability densities - ax = fig.add_subplot(2, 2, 2, projection='3d') + ax = fig.add_subplot(2, 2, 2, projection="3d") # straight class colors for suface plots color = np.reshape([dRed, dGreen, dBlue], (3, x.shape[0], x.shape[1])) @@ -656,10 +656,10 @@ def demoLDA2d(): # probability density surface surf = ax.plot_surface(x, y, dMax, facecolors=colorFlip, linewidth=0.02, shade=True) - surf.set_edgecolor('black') # add edgecolor back in, bug? + surf.set_edgecolor("black") # add edgecolor back in, bug? # third figure shows 3d plots of probabilities - ax = fig.add_subplot(2, 2, 3, projection='3d') + ax = fig.add_subplot(2, 2, 3, projection="3d") # straight class colors for suface plots color = np.reshape([pRed, pGreen, pBlue], (3, x.shape[0], x.shape[1])) @@ -678,24 +678,24 @@ def demoLDA2d(): # probability density surface surf = ax.plot_surface(x, y, pMax, facecolors=colorFlip, linewidth=0.02, shade=True) - surf.set_edgecolor('black') # add edgecolor back in, bug? + surf.set_edgecolor("black") # add edgecolor back in, bug? """ # third figure shows contours and color image of probability densities ax = fig.add_subplot(2, 2, 3) #ax.pcolor(x, y, pMax) - ax.imshow(colorFlip, origin='lower', - extent=(mn[0], mx[0], mn[1], mx[1]), aspect='auto') + ax.imshow(colorFlip, origin="lower", + extent=(mn[0], mx[0], mn[1], mx[1]), aspect="auto") # contours nLevel=6 - cs = ax.contour(x, y, pMax, colors='black', + cs = ax.contour(x, y, pMax, colors="black", levels=np.linspace(np.min(pMax), np.max(pMax), nLevel)) cs.clabel(fontsize=6) """ # fourth figure - ax = fig.add_subplot(2, 2, 4, projection='3d') + ax = fig.add_subplot(2, 2, 4, projection="3d") labels = model.label(z) lMax = np.reshape(labels, x.shape) @@ -703,12 +703,12 @@ def demoLDA2d(): surf = ax.plot_surface(x, y, lMax, facecolors=colorFlip, linewidth=0.02)#, antialiased=False) #surf.set_edgecolor(np.vstack(color)) - surf.set_edgecolor('black') + surf.set_edgecolor("black") fig.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoLDA2d() demoQDA2d() plt.show() diff --git a/cebl/ml/ensemble.py b/cebl/ml/ensemble.py index 8aa5e64..db67041 100644 --- a/cebl/ml/ensemble.py +++ b/cebl/ml/ensemble.py @@ -60,8 +60,8 @@ def train(self, x, g, regClass, *args, **kwargs): if dimPerModel is not None: if xSub.ndim != 2: - raise RuntimeError('Cannot subset dimensions for x with shape ' + \ - str(x.shape) + '.') + raise RuntimeError("Cannot subset dimensions for x with shape " + \ + str(x.shape) + ".") dimInd = np.arange(self.nIn) np.random.shuffle(dimInd) @@ -90,14 +90,14 @@ def evalModels(self, x, *args, **kwargs): return np.array(ys) - def eval(self, x, method='mean', *args, **kwargs): + def eval(self, x, method="mean", *args, **kwargs): ys = self.evalModels(x, *args, **kwargs) - if method == 'mean': + if method == "mean": return np.mean(ys, axis=0) - elif method == 'median': + elif method == "median": return np.median(ys, axis=0) else: - raise RuntimeError('Invalid method %s.' % str(method)) + raise RuntimeError("Invalid method %s." % str(method)) class ClassEnsemble(Classifier): def __init__(self, classData, nModels=10, obsFrac=0.5, @@ -145,8 +145,8 @@ def train(self, classData, clsClass, *args, **kwargs): if dimPerModel is not None: if classDataSub[0].ndim != 2: - raise RuntimeError('Cannot subset dimensions with shape ' + \ - str(classDataSub[0].shape) + '.') + raise RuntimeError("Cannot subset dimensions with shape " + \ + str(classDataSub[0].shape) + ".") dimInd = np.arange(self.nIn) np.random.shuffle(dimInd) @@ -200,12 +200,12 @@ def demoClassEnsemble(): c2Probs = model.probs(c2) c3Probs = model.probs(c3) - print('c1:') + print("c1:") print(model.label(c1)) - print('c2:') + print("c2:") print(model.label(c2)) print(model.probs(c1)) - print('c3:') + print("c3:") print(model.label(c3)) print(model.probs(c1)) @@ -213,10 +213,10 @@ def demoClassEnsemble(): xProbs = model.probs(x) plt.plot(x, xProbs, linewidth=2) - plt.scatter(c1, np.zeros_like(c1), color='blue') - plt.scatter(c2, np.zeros_like(c2), color='green') - plt.scatter(c3, np.zeros_like(c3), color='red') + plt.scatter(c1, np.zeros_like(c1), color="blue") + plt.scatter(c2, np.zeros_like(c2), color="green") + plt.scatter(c3, np.zeros_like(c3), color="red") -if __name__ == '__main__': +if __name__ == "__main__": demoClassEnsemble() plt.show() diff --git a/cebl/ml/knn.py b/cebl/ml/knn.py index b221dd2..bf1ec8f 100644 --- a/cebl/ml/knn.py +++ b/cebl/ml/knn.py @@ -8,15 +8,15 @@ class KNearestNeighbors(Classifier): - def __init__(self, classData, k=1, distMetric='euclidean', **kwargs): + def __init__(self, classData, k=1, distMetric="euclidean", **kwargs): Classifier.__init__(self, util.colmat(classData[0]).shape[1], len(classData)) self.k = k minObs = min([len(cls) for cls in classData]) if self.k > minObs: - raise RuntimeError(('k=%d exceeds the number of examples in ' % k) + - ('smallest training class %d.' % minObs)) + raise RuntimeError(("k=%d exceeds the number of examples in " % k) + + ("smallest training class %d." % minObs)) if callable(distMetric): self.distFunc = lambda x1, x2: distMetric(x1, x2, **kwargs) @@ -95,21 +95,21 @@ def demoKNN(): print(model.probs(classData[1]).dtype) print(model.probs(classData[2]).dtype) - print('red labels\n-------') + print("red labels\n-------") print(redLabel) - print('\ngreen labels\n-------') + print("\ngreen labels\n-------") print(greenLabel) - print('\nblue labels\n-------') + print("\nblue labels\n-------") print(blueLabel) - print('ca:', model.ca(classData)) - print('bca:', model.bca(classData)) - print('confusion:\n', model.confusion(classData)) + print("ca:", model.ca(classData)) + print("bca:", model.bca(classData)) + print("confusion:\n", model.confusion(classData)) # first figure shows training data and class intersections fig = plt.figure() ax = fig.add_subplot(2, 2, 1) - ax.set_title('Class Data') + ax.set_title("Class Data") # training data ax.scatter(red[:,0], red[:,1], color="red") @@ -136,13 +136,13 @@ def demoKNN(): ##diffRG = pRed - pGreen ##diffRB = pRed - pBlue ##diffGB = pGreen - pBlue - ##ax.contour(x, y, diffRG, colors='black', levels=(0,)) - ##ax.contour(x, y, diffRB, colors='black', levels=(0,)) - ##ax.contour(x, y, diffGB, colors='black', levels=(0,)) + ##ax.contour(x, y, diffRG, colors="black", levels=(0,)) + ##ax.contour(x, y, diffRB, colors="black", levels=(0,)) + ##ax.contour(x, y, diffGB, colors="black", levels=(0,)) # second figure shows 3d plots of probability densities - ax = fig.add_subplot(2, 2, 2, projection='3d') - ax.set_title('P(C = k)') + ax = fig.add_subplot(2, 2, 2, projection="3d") + ax.set_title("P(C = k)") # straight class colors for suface plots color = np.reshape([pRed, pGreen, pBlue], (3, x.shape[0], x.shape[1])) @@ -164,33 +164,33 @@ def demoKNN(): #surf = ax.plot_surface(x, y, pMax, cmap=matplotlib.cm.jet, linewidth=0) surf = ax.plot_surface(x, y, pMax, facecolors=colorFlip, linewidth=0.02, shade=True) - surf.set_edgecolor('black') # add edgecolor back in, bug? + surf.set_edgecolor("black") # add edgecolor back in, bug? # third figure shows contours and color image of probability densities ax = fig.add_subplot(2, 2, 3) - ax.set_title('max_K P(C = k)') + ax.set_title("max_K P(C = k)") #ax.pcolor(x, y, pMax) - ax.imshow(colorFlip, origin='lower', - extent=(mn[0], mx[0], mn[1], mx[1]), aspect='auto') + ax.imshow(colorFlip, origin="lower", + extent=(mn[0], mx[0], mn[1], mx[1]), aspect="auto") # contours nLevel = 4 - cs = ax.contour(x, y, pMax, colors='black', + cs = ax.contour(x, y, pMax, colors="black", levels=np.linspace(np.min(pMax), np.max(pMax), nLevel)) cs.clabel(fontsize=6) # fourth figure - ax = fig.add_subplot(2, 2, 4, projection='3d') - ax.set_title('argmax_K P(C = k)') + ax = fig.add_subplot(2, 2, 4, projection="3d") + ax.set_title("argmax_K P(C = k)") labels = model.label(z) lMax = np.reshape(labels, x.shape) surf = ax.plot_surface(x, y, lMax, facecolors=colorFlip, linewidth=0.02)#, antialiased=False) - surf.set_edgecolor('black') + surf.set_edgecolor("black") -if __name__ == '__main__': +if __name__ == "__main__": demoKNN() plt.show() diff --git a/cebl/ml/linreg.py b/cebl/ml/linreg.py index ff7162d..f0def17 100644 --- a/cebl/ml/linreg.py +++ b/cebl/ml/linreg.py @@ -77,17 +77,17 @@ def demoRidgeRegression1dQuad(): fig = plt.figure() ax = fig.add_subplot(1, 1, 1) - ax.plot(x, y, marker='o', color='black') + ax.plot(x, y, marker="o", color="black") linearModel = RidgeRegression(x, y) - ax.plot(x, linearModel.eval(x), color='green') + ax.plot(x, linearModel.eval(x), color="green") x2 = x.repeat(2).reshape((-1, 2)) x2[:,1] **= 2 quadraticModel = RidgeRegression(x2, y) - ax.plot(x, quadraticModel.eval(x2), color='red') + ax.plot(x, quadraticModel.eval(x2), color="red") def demoRidgeRegression1d(): x = np.linspace(0.0, 3.0, 50) @@ -101,7 +101,7 @@ def demoRidgeRegression1d(): model = RidgeRegression(x, y) - ax.plot(x, model.eval(x), color='green') + ax.plot(x, model.eval(x), color="green") class LinearRegressionElastic(Regression, optim.Optable): @@ -222,26 +222,26 @@ def demoLinearRegressionElastic(): fig = plt.figure() axLines = fig.add_subplot(1, 2, 1) - #ax.scatter(x, g1, color='red') - #ax.scatter(x, g2, color='green') - #ax.scatter(x, g3, color='blue') + #ax.scatter(x, g1, color="red") + #ax.scatter(x, g2, color="green") + #ax.scatter(x, g3, color="blue") - axLines.plot(y, color='green') + axLines.plot(y, color="green") axWeights = fig.add_subplot(1, 2, 2) - img = axWeights.imshow(np.abs(model.weights), aspect='auto', interpolation='none') + img = axWeights.imshow(np.abs(model.weights), aspect="auto", interpolation="none") cbar = plt.colorbar(img) - cbar.set_label('Weight Magnitude') + cbar.set_label("Weight Magnitude") axWeights.set_xticks((0, 1)) - axWeights.set_xticklabels(('y1', 'y2')) + axWeights.set_xticklabels(("y1", "y2")) axWeights.set_yticks((0, 1, 2, 3)) - axWeights.set_yticklabels(('x1', 'x2', 'x3', 'bias')) + axWeights.set_yticklabels(("x1", "x2", "x3", "bias")) print(model.weights) -if __name__ == '__main__': +if __name__ == "__main__": demoRidgeRegression1d() demoRidgeRegression1dQuad() demoLinearRegressionElastic() diff --git a/cebl/ml/logreg.py b/cebl/ml/logreg.py index 31a259a..9b0ceaa 100644 --- a/cebl/ml/logreg.py +++ b/cebl/ml/logreg.py @@ -148,20 +148,20 @@ def demoLogisticRegression1d(): c2Probs = model.probs(c2) c3Probs = model.probs(c3) - print('c1:') + print("c1:") print(model.label(c1)) - print('c2:') + print("c2:") print(model.label(c2)) - print('c3:') + print("c3:") print(model.label(c3)) x = np.linspace(-2.0, 4.0, 500) xProbs = model.probs(x) plt.plot(x, xProbs, linewidth=2) - plt.scatter(c1, np.zeros_like(c1), color='blue') - plt.scatter(c2, np.zeros_like(c2), color='green') - plt.scatter(c3, np.zeros_like(c3), color='red') + plt.scatter(c1, np.zeros_like(c1), color="blue") + plt.scatter(c2, np.zeros_like(c2), color="green") + plt.scatter(c3, np.zeros_like(c3), color="red") def demoLogisticRegression2d(): # covariance matrix for each training class @@ -190,7 +190,7 @@ def demoLogisticRegression2d(): model = LogisticRegression(classData=classData, verbose=True) #optimFunc=optim.rprop, accuracy=0.0, precision=0.0, maxIter=100, penalty=0.3) #print(model.weights) - #plt.imshow(np.abs(model.weights), interpolation='none') + #plt.imshow(np.abs(model.weights), interpolation="none") #plt.colorbar() # find class labels @@ -198,16 +198,16 @@ def demoLogisticRegression2d(): greenLabel = model.label(green) blueLabel = model.label(blue) - print('red labels\n-------') + print("red labels\n-------") print(redLabel) - print('\ngreen labels\n-------') + print("\ngreen labels\n-------") print(greenLabel) - print('\nblue labels\n-------') + print("\nblue labels\n-------") print(blueLabel) - print('ca:', model.ca(classData)) - print('bca:', model.bca(classData)) - print('confusion:\n', model.confusion(classData)) + print("ca:", model.ca(classData)) + print("bca:", model.bca(classData)) + print("confusion:\n", model.confusion(classData)) # first figure shows training data and class intersections fig = plt.figure() @@ -238,12 +238,12 @@ def demoLogisticRegression2d(): diffRG = pRed - pGreen diffRB = pRed - pBlue diffGB = pGreen - pBlue - ax.contour(x, y, diffRG, colors='black', levels=(0,)) - ax.contour(x, y, diffRB, colors='black', levels=(0,)) - ax.contour(x, y, diffGB, colors='black', levels=(0,)) + ax.contour(x, y, diffRG, colors="black", levels=(0,)) + ax.contour(x, y, diffRB, colors="black", levels=(0,)) + ax.contour(x, y, diffGB, colors="black", levels=(0,)) # second figure shows 3d plots of probability densities - ax = fig.add_subplot(2, 2, 2, projection='3d') + ax = fig.add_subplot(2, 2, 2, projection="3d") # straight class colors for suface plots color = np.reshape([pRed, pGreen, pBlue], (3, x.shape[0], x.shape[1])) @@ -265,30 +265,30 @@ def demoLogisticRegression2d(): #surf = ax.plot_surface(x, y, pMax, cmap=matplotlib.cm.jet, linewidth=0) surf = ax.plot_surface(x, y, pMax, facecolors=colorFlip, linewidth=0.02, shade=True) - surf.set_edgecolor('black') # add edgecolor back in, bug? + surf.set_edgecolor("black") # add edgecolor back in, bug? # third figure shows contours and color image of probability densities ax = fig.add_subplot(2, 2, 3) #ax.pcolor(x, y, pMax) - ax.imshow(colorFlip, origin='lower', - extent=(mn[0], mx[0], mn[1], mx[1]), aspect='auto') + ax.imshow(colorFlip, origin="lower", + extent=(mn[0], mx[0], mn[1], mx[1]), aspect="auto") # contours nLevel = 4 - cs = ax.contour(x, y, pMax, colors='black', + cs = ax.contour(x, y, pMax, colors="black", levels=np.linspace(np.min(pMax), np.max(pMax), nLevel)) cs.clabel(fontsize=6) # fourth figure - ax = fig.add_subplot(2, 2, 4, projection='3d') + ax = fig.add_subplot(2, 2, 4, projection="3d") labels = model.label(z) lMax = np.reshape(labels, x.shape) surf = ax.plot_surface(x, y, lMax, facecolors=colorFlip, linewidth=0.02)#, antialiased=False) - surf.set_edgecolor('black') + surf.set_edgecolor("black") class LogisticRegressionElastic(LogisticRegression): @@ -350,6 +350,6 @@ class LGRE(LogisticRegressionElastic): pass -if __name__ == '__main__': +if __name__ == "__main__": demoLogisticRegression2d() plt.show() diff --git a/cebl/ml/nnet/__init__.py b/cebl/ml/nnet/__init__.py index ed199b5..2f56d4c 100644 --- a/cebl/ml/nnet/__init__.py +++ b/cebl/ml/nnet/__init__.py @@ -1,6 +1,5 @@ """Artificial Neural Networks. """ - from .conv import * from .convac import * from .elman import * diff --git a/cebl/ml/nnet/conv.py b/cebl/ml/nnet/conv.py index bd9e7ed..70032f7 100644 --- a/cebl/ml/nnet/conv.py +++ b/cebl/ml/nnet/conv.py @@ -19,7 +19,7 @@ class ConvolutionalNetwork(Classifier, optim.Optable): def __init__(self, classData, convs=((8,16), (16,8)), nHidden=8, - poolSize=2, poolMethod='average', + poolSize=2, poolMethod="average", transFunc=transfer.lecun, weightInitFunc=pinit.lecun, penalty=None, elastic=1.0, optimFunc=optim.scg, **kwargs): Classifier.__init__(self, util.segmat(classData[0]).shape[2], len(classData)) @@ -32,8 +32,8 @@ def __init__(self, classData, convs=((8,16), (16,8)), nHidden=8, self.nHidden = nHidden self.poolMethod = poolMethod.lower() - if not self.poolMethod in ('stride', 'average'): - raise RuntimeError('Invalid poolMethod %s.' % str(self.poolMethod)) + if not self.poolMethod in ("stride", "average"): + raise RuntimeError("Invalid poolMethod %s." % str(self.poolMethod)) self.poolSize = poolSize if util.isiterable(poolSize) \ else (poolSize,) * self.nConvLayers @@ -50,10 +50,10 @@ def __init__(self, classData, convs=((8,16), (16,8)), nHidden=8, ravelLen = util.segmat(classData[0]).shape[1] # nObs in first seg for width, poolSize in zip(self.convWidths, self.poolSize): - if self.poolMethod == 'stride': + if self.poolMethod == "stride": ravelLen = int(np.ceil((ravelLen - width + 1) / float(poolSize))) - elif self.poolMethod == 'average': + elif self.poolMethod == "average": ravelLen = (ravelLen - width + 1) // poolSize if self.nHidden is None: @@ -107,7 +107,7 @@ def __init__(self, classData, convs=((8,16), (16,8)), nHidden=8, def train(self, classData, optimFunc, **kwargs): x, g = label.indicatorsFromList(classData) - x = np.require(x, requirements=['O', 'C']) + x = np.require(x, requirements=["O", "C"]) self.trainResult = optimFunc(self, x=x, g=g, **kwargs) def parameters(self): @@ -127,11 +127,11 @@ def evalConvs(self, x): c = util.timeEmbed(c, lags=width-1, axis=1) c = phi(util.segdot(c, cw[:-1]) + cw[-1]) - elif self.poolMethod == 'stride': + elif self.poolMethod == "stride": c = util.timeEmbed(c, lags=width-1, axis=1, stride=poolSize) c = util.segdot(c, cw[:-1]) + cw[-1] - elif self.poolMethod == 'average': + elif self.poolMethod == "average": c = util.timeEmbed(c, lags=width-1, axis=1) c = phi(util.segdot(c, cw[:-1]) + cw[-1]) c = util.accum(c, poolSize, axis=1) / poolSize @@ -147,7 +147,7 @@ def probs(self, x): c = self.evalConvs(x)[-1] # flatten to fully-connected - c = c.reshape((c.shape[0], -1), order='F') + c = c.reshape((c.shape[0], -1), order="F") # evaluate hidden and visible layers if self.nHidden is not None: @@ -233,10 +233,10 @@ def gradient(self, x, g, returnError=True): if poolSize == 1: c = util.timeEmbed(c, lags=width-1, axis=1) - elif self.poolMethod == 'stride': + elif self.poolMethod == "stride": c = util.timeEmbed(c, lags=width-1, axis=1, stride=poolSize) - elif self.poolMethod == 'average': + elif self.poolMethod == "average": c = util.timeEmbed(c, lags=width-1, axis=1) c1 = util.bias(c) @@ -251,11 +251,11 @@ def gradient(self, x, g, returnError=True): if poolSize == 1: pass - elif self.poolMethod == 'average': + elif self.poolMethod == "average": c = util.accum(c, poolSize, axis=1) / poolSize # flatten to fully-connected - c = c.reshape((c.shape[0], -1), order='F') + c = c.reshape((c.shape[0], -1), order="F") c1 = util.bias(c) # evaluate hidden and visible layers @@ -294,7 +294,7 @@ def gradient(self, x, g, returnError=True): delta = delta.dot(self.hw[:-1].T) # unflatten deltas back to convolution - delta = delta.reshape((delta.shape[0], -1, self.nConvHiddens[-1]), order='F') + delta = delta.reshape((delta.shape[0], -1, self.nConvHiddens[-1]), order="F") widths = list(self.convWidths[1:]) + [None,] @@ -307,7 +307,7 @@ def gradient(self, x, g, returnError=True): if poolSize == 1: pass - elif self.poolMethod == 'average': + elif self.poolMethod == "average": deltaPool = np.empty_like(cPrime) deltaPool[:,:delta.shape[1]*poolSize] = \ delta.repeat(poolSize, axis=1) / poolSize @@ -323,13 +323,13 @@ def gradient(self, x, g, returnError=True): cgs[l][...] = c1f.T.dot(deltaf) cgs[l] += self.penaltyGradient(l) - if l > 0: # won't propigate back to inputs + if l > 0: # won"t propigate back to inputs delta = util.segdot(delta, self.cws[l][:-1].T) if poolSize == 1: pass - elif self.poolMethod == 'stride': + elif self.poolMethod == "stride": deltaPoolShape = list(delta.shape) deltaPoolShape[1] *= poolSize deltaPool = np.zeros(deltaPoolShape, dtype=delta.dtype) @@ -421,45 +421,45 @@ def gauss_map(n, a=0.62, b=-0.5): testData = standardizer.apply(testData) model = CN(trainData, convs=((2,11),(4,9),(6,7)), nHidden=None, - poolSize=2, poolMethod='average', verbose=True, + poolSize=2, poolMethod="average", verbose=True, optimFunc=optim.scg, maxIter=1000, transFunc=transfer.lecun, precision=1.0e-16, accuracy=0.0, pTrace=True, eTrace=True) - print('Training Performance:') - print('=======') - print('Labels: ', model.labelKnown(trainData)) - print('CA: ', model.ca(trainData)) - print('BCA: ', model.bca(trainData)) - print('AUC: ', model.auc(trainData)) + print("Training Performance:") + print("=======") + print("Labels: ", model.labelKnown(trainData)) + print("CA: ", model.ca(trainData)) + print("BCA: ", model.bca(trainData)) + print("AUC: ", model.auc(trainData)) print() - print('Test Performance:') - print('=======') - print('Labels: ', model.labelKnown(testData)) - print('CA: ', model.ca(testData)) - print('BCA: ', model.bca(testData)) - print('AUC: ', model.auc(testData)) + print("Test Performance:") + print("=======") + print("Labels: ", model.labelKnown(testData)) + print("CA: ", model.ca(testData)) + print("BCA: ", model.bca(testData)) + print("AUC: ", model.auc(testData)) print() nCol = max(model.nConvLayers, 3) fig = plt.figure(figsize=(20,6)) axSigs = fig.add_subplot(3,nCol, 1) - axSigs.plot(x, trainData[0][0].T.squeeze(), color='blue', linewidth=2)#, label=r'$\mathbf{sin}(x)$') - axSigs.plot(x, trainData[0].T.squeeze(), color='blue', alpha=0.1, linewidth=2) - axSigs.plot(x, 10.0+trainData[1][0].T.squeeze(), color='red', linewidth=2)#, label=r'$\mathbf{sin}(2x)$') - axSigs.plot(x, 10.0+trainData[1].T.squeeze(), color='red', alpha=0.1, linewidth=2) - axSigs.set_title('') - axSigs.set_xlabel('Time') - axSigs.set_ylabel('Signal') + axSigs.plot(x, trainData[0][0].T.squeeze(), color="blue", linewidth=2)#, label=r"$\mathbf{sin}(x)$") + axSigs.plot(x, trainData[0].T.squeeze(), color="blue", alpha=0.1, linewidth=2) + axSigs.plot(x, 10.0+trainData[1][0].T.squeeze(), color="red", linewidth=2)#, label=r"$\mathbf{sin}(2x)$") + axSigs.plot(x, 10.0+trainData[1].T.squeeze(), color="red", alpha=0.1, linewidth=2) + axSigs.set_title("") + axSigs.set_xlabel("Time") + axSigs.set_ylabel("Signal") #axSigs.legend() axSigs.autoscale(tight=True) axETrace = fig.add_subplot(3,nCol, 2) - eTrace = np.array(model.trainResult['eTrace']) + eTrace = np.array(model.trainResult["eTrace"]) axETrace.plot(eTrace) axPTrace = fig.add_subplot(3,nCol, 3) - pTrace = np.array(model.trainResult['pTrace']) + pTrace = np.array(model.trainResult["pTrace"]) axPTrace.plot(pTrace) cs1 = model.evalConvs(trainData[0]) @@ -469,8 +469,8 @@ def gauss_map(n, a=0.62, b=-0.5): c1 = cs1[i][0,:,:] c2 = cs2[i][0,:,:] sep = util.colsep(np.vstack((c1,c2))) - axConvs.plot(c1+sep, color='blue', linewidth=2, alpha=0.25) - axConvs.plot(c2+sep, color='red', linewidth=2, alpha=0.25) + axConvs.plot(c1+sep, color="blue", linewidth=2, alpha=0.25) + axConvs.plot(c2+sep, color="red", linewidth=2, alpha=0.25) #axConvs.set_xlim(0.0, 120) axRespon = fig.add_subplot(3,nCol, 2*nCol+1+i) @@ -479,7 +479,7 @@ def gauss_map(n, a=0.62, b=-0.5): responses = np.array(responses) axRespon.plot(freqs.T, np.abs(responses).T) - print('nParams: ', model.parameters().size) + print("nParams: ", model.parameters().size) #for l,cw in enumerate(model.cws): # plt.figure() @@ -488,14 +488,14 @@ def gauss_map(n, a=0.62, b=-0.5): #plt.figure() #plt.hist(model.hw.ravel()) - #plt.title('hw') + #plt.title("hw") #plt.figure() #plt.hist(model.vw.ravel()) - #plt.title('vw') + #plt.title("vw") #fig.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoCN() plt.show() diff --git a/cebl/ml/nnet/convac.py b/cebl/ml/nnet/convac.py index d186e02..98f31f8 100644 --- a/cebl/ml/nnet/convac.py +++ b/cebl/ml/nnet/convac.py @@ -19,7 +19,7 @@ class ConvolutionalNetworkAccum(Classifier, optim.Optable): def __init__(self, classData, convs=((8,16),(16,8)), nHidden=None, - poolSize=2, poolMethod='average', filtOrder=8, + poolSize=2, poolMethod="average", filtOrder=8, transFunc=transfer.lecun, weightInitFunc=pinit.lecun, penalty=None, elastic=1.0, optimFunc=optim.scg, **kwargs): Classifier.__init__(self, util.segmat(classData[0]).shape[2], len(classData)) @@ -48,11 +48,11 @@ def __init__(self, classData, convs=((8,16),(16,8)), nHidden=None, assert len(self.poolSize) == self.nConvLayers self.poolMethod = poolMethod.lower() - if self.poolMethod == 'lanczos': + if self.poolMethod == "lanczos": self.initLanczos(filtOrder) - elif not self.poolMethod in ('stride', 'average'): - raise RuntimeError('Invalid poolMethod %s.' % str(self.poolMethod)) + elif not self.poolMethod in ("stride", "average"): + raise RuntimeError("Invalid poolMethod %s." % str(self.poolMethod)) self.transFunc = transFunc if util.isiterable(transFunc) \ else (transFunc,) * (len(self.layerDims)-1) @@ -101,8 +101,8 @@ def initLanczos(self, filtOrder): self.filtOrder = filtOrder if self.filtOrder % 2 != 0: - raise RuntimeError('Invalid filtOrder: ' + str(self.filtOrder) + - ' Must be an even integer.') + raise RuntimeError("Invalid filtOrder: " + str(self.filtOrder) + + " Must be an even integer.") radius = self.filtOrder // 2 win = np.sinc(np.linspace(-radius, radius, self.filtOrder+1) / float(radius)) # lanczos @@ -148,10 +148,10 @@ def initLanczos(self, filtOrder): def train(self, classData, optimFunc, **kwargs): x, g = label.indicatorsFromList(classData) - x = np.require(x, requirements=['O', 'C']) + x = np.require(x, requirements=["O", "C"]) self.trainResult = optimFunc(self, x=x, g=g, **kwargs) - #dv = self.discrim(x, accum='mult') + #dv = self.discrim(x, accum="mult") #self.normSoftmaxMean = dv.mean() #self.normSoftmaxStd = dv.std() #self.normSoftmaxMin = dv.min() @@ -176,18 +176,18 @@ def evalConvs(self, x): #c = phi(c.dot(cw[:-1]) + cw[-1]) c = phi(util.segdot(c, cw[:-1]) + cw[-1]) - elif self.poolMethod == 'stride': + elif self.poolMethod == "stride": c = util.timeEmbed(c, lags=width-1, axis=1, stride=poolSize) #c = phi(c.dot(cw[:-1]) + cw[-1]) c = phi(util.segdot(c, cw[:-1]) + cw[-1]) - elif self.poolMethod == 'average': + elif self.poolMethod == "average": c = util.timeEmbed(c, lags=width-1, axis=1) #c = phi(c.dot(cw[:-1]) + cw[-1]) c = phi(util.segdot(c, cw[:-1]) + cw[-1]) c = util.accum(c, poolSize, axis=1) / poolSize - elif self.poolMethod == 'lanczos': + elif self.poolMethod == "lanczos": c = util.timeEmbed(c, lags=width-1, axis=1) #c = phi(c.dot(cw[:-1]) + cw[-1]) c = phi(util.segdot(c, cw[:-1]) + cw[-1]) @@ -221,20 +221,20 @@ def stepProbs(self, x): def stepLikes(self, x): return np.log(self.stepProbs(x)) - def discrim(self, x, accum='prod'): + def discrim(self, x, accum="prod"): x = util.segmat(x) accum = accum.lower() # sum loglikes (multiply probs) accumulation - if accum in ('prod', 'mult'): + if accum in ("prod", "mult"): return self.stepLikes(x).sum(axis=1) # sum probs accum accumulation - elif accum in ('sum', 'add'): + elif accum in ("sum", "add"): return self.stepProbs(x).sum(axis=1) # vote likes accumulation - elif accum == 'vote': + elif accum == "vote": likes = self.stepLikes(x) votes = np.zeros_like(likes) @@ -245,18 +245,18 @@ def discrim(self, x, accum='prod'): return votes.sum(axis=1) else: - raise RuntimeError('Invalid discrim accum method: ' + str(accum)) + raise RuntimeError("Invalid discrim accum method: " + str(accum)) - def probs(self, x, squash='softmax', accum='prod'): + def probs(self, x, squash="softmax", accum="prod"): x = util.segmat(x) squash = squash.lower() dv = self.discrim(x, accum=accum) - if squash == 'softmax': + if squash == "softmax": return util.softmax(dv) - #elif squash == 'normsoftmax': + #elif squash == "normsoftmax": # #dv -= self.normSoftmaxMin # #dv /= self.normSoftmaxMax # dv -= self.normSoftmaxMean @@ -264,11 +264,11 @@ def probs(self, x, squash='softmax', accum='prod'): # dv /= self.normSoftmaxStd # return util.softmax(dv) - elif squash == 'frac': + elif squash == "frac": return dv / dv.sum(axis=1)[:,None] else: - raise RuntimeError('Invalid probs squash method: ' + str(squash)) + raise RuntimeError("Invalid probs squash method: " + str(squash)) def penaltyError(self): if self.penalty is None: @@ -344,10 +344,10 @@ def gradient(self, x, g, returnError=True): if poolSize == 1: c = util.timeEmbed(c, lags=width-1, axis=1) - elif self.poolMethod == 'stride': + elif self.poolMethod == "stride": c = util.timeEmbed(c, lags=width-1, axis=1, stride=poolSize) - elif self.poolMethod in ('average', 'lanczos'): + elif self.poolMethod in ("average", "lanczos"): c = util.timeEmbed(c, lags=width-1, axis=1) c1 = util.bias(c) @@ -363,10 +363,10 @@ def gradient(self, x, g, returnError=True): if poolSize == 1: pass - elif self.poolMethod == 'average': + elif self.poolMethod == "average": c = util.accum(c, poolSize, axis=1) / poolSize - elif self.poolMethod == 'lanczos': + elif self.poolMethod == "lanczos": c = util.timeEmbed(c, lags=self.filtOrder, axis=1, stride=poolSize) #c = c.dot(self.filters[l]) c = util.segdot(c, self.filters[l]) @@ -430,7 +430,7 @@ def gradient(self, x, g, returnError=True): if poolSize == 1: pass - elif self.poolMethod == 'average': + elif self.poolMethod == "average": deltaPool = np.empty_like(cPrime) deltaPool[:,:delta.shape[1]*poolSize] = \ delta.repeat(poolSize, axis=1) / poolSize @@ -438,7 +438,7 @@ def gradient(self, x, g, returnError=True): delta = deltaPool - elif self.poolMethod == 'lanczos': + elif self.poolMethod == "lanczos": filt = self.filters[l] #delta = delta.dot(filt.T) @@ -467,7 +467,7 @@ def gradient(self, x, g, returnError=True): if poolSize == 1: pass - elif self.poolMethod == 'stride': + elif self.poolMethod == "stride": deltaPoolShape = list(delta.shape) deltaPoolShape[1] *= poolSize deltaPool = np.zeros(deltaPoolShape, dtype=delta.dtype) @@ -556,49 +556,49 @@ def gauss_map(n, a=0.62, b=-0.5): #model = CNA(trainData, convs=((2,11),(4,9),(6,7)), nHidden=2, model = CNA(trainData, convs=((4,9),(8,9)), nHidden=None, - poolSize=2, poolMethod='average', verbose=True, + poolSize=2, poolMethod="average", verbose=True, optimFunc=optim.scg, maxIter=250, transFunc=transfer.rectifier, #optimFunc=optim.rprop, maxIter=1000, - #optimFunc=optim.sciopt, method='Powell', maxIter=1000) + #optimFunc=optim.sciopt, method="Powell", maxIter=1000) precision=1.0e-10, accuracy=0.0, pTrace=True, eTrace=True) - print('Training Performance:') - print('=======') - print('Labels: ', model.labelKnown(trainData)) - print('ProbsA: ', model.probs(trainData[0])) - print('ProbsB: ', model.probs(trainData[1])) - print('CA: ', model.ca(trainData)) - print('BCA: ', model.bca(trainData)) - print('AUC: ', model.auc(trainData)) + print("Training Performance:") + print("=======") + print("Labels: ", model.labelKnown(trainData)) + print("ProbsA: ", model.probs(trainData[0])) + print("ProbsB: ", model.probs(trainData[1])) + print("CA: ", model.ca(trainData)) + print("BCA: ", model.bca(trainData)) + print("AUC: ", model.auc(trainData)) print() - print('Test Performance:') - print('=======') - print('Labels: ', model.labelKnown(testData)) - print('ProbsA: ', model.probs(testData[0])) - print('ProbsB: ', model.probs(testData[1])) - print('CA: ', model.ca(testData)) - print('BCA: ', model.bca(testData)) - print('AUC: ', model.auc(testData)) + print("Test Performance:") + print("=======") + print("Labels: ", model.labelKnown(testData)) + print("ProbsA: ", model.probs(testData[0])) + print("ProbsB: ", model.probs(testData[1])) + print("CA: ", model.ca(testData)) + print("BCA: ", model.bca(testData)) + print("AUC: ", model.auc(testData)) print() fig = plt.figure(figsize=(20,6)) axSigs = fig.add_subplot(3,3, 1) - axSigs.plot(x, trainData[0][0].T.squeeze(), color='blue', linewidth=2)#, label=r'$\mathbf{sin}(x)$') - axSigs.plot(x, trainData[0].T.squeeze(), color='blue', alpha=0.1, linewidth=2) - axSigs.plot(x, 10.0+trainData[1][0].T.squeeze(), color='red', linewidth=2)#, label=r'$\mathbf{sin}(2x)$') - axSigs.plot(x, 10.0+trainData[1].T.squeeze(), color='red', alpha=0.1, linewidth=2) - axSigs.set_title('') - axSigs.set_xlabel('Time') - axSigs.set_ylabel('Signal') + axSigs.plot(x, trainData[0][0].T.squeeze(), color="blue", linewidth=2)#, label=r"$\mathbf{sin}(x)$") + axSigs.plot(x, trainData[0].T.squeeze(), color="blue", alpha=0.1, linewidth=2) + axSigs.plot(x, 10.0+trainData[1][0].T.squeeze(), color="red", linewidth=2)#, label=r"$\mathbf{sin}(2x)$") + axSigs.plot(x, 10.0+trainData[1].T.squeeze(), color="red", alpha=0.1, linewidth=2) + axSigs.set_title("") + axSigs.set_xlabel("Time") + axSigs.set_ylabel("Signal") #axSigs.legend() axSigs.autoscale(tight=True) axETrace = fig.add_subplot(3,3, 2) - eTrace = np.array(model.trainResult['eTrace']) + eTrace = np.array(model.trainResult["eTrace"]) axETrace.plot(eTrace) axPTrace = fig.add_subplot(3,3, 3) - pTrace = np.array(model.trainResult['pTrace']) + pTrace = np.array(model.trainResult["pTrace"]) axPTrace.plot(pTrace) cs1 = model.evalConvs(trainData[0]) @@ -609,8 +609,8 @@ def gauss_map(n, a=0.62, b=-0.5): c1 = cs1[i][0,:,:] c2 = cs2[i][0,:,:] sep = util.colsep(np.vstack((c1,c2))) - axConvs.plot(c1+sep, color='blue', linewidth=2, alpha=0.25) - axConvs.plot(c2+sep, color='red', linewidth=2, alpha=0.25) + axConvs.plot(c1+sep, color="blue", linewidth=2, alpha=0.25) + axConvs.plot(c2+sep, color="red", linewidth=2, alpha=0.25) axRespon = fig.add_subplot(3,3, 7+i) freqs, responses = zip(*[spsig.freqz(cw) for cw in model.cws[i].T]) @@ -623,10 +623,10 @@ def gauss_map(n, a=0.62, b=-0.5): probs2 = model.stepProbs(trainData[1]) p1 = probs1[0,:,0] p2 = probs2[0,:,1] - axProbs.plot(p1, color='blue', linewidth=2) - axProbs.plot(p2, color='red', linewidth=2) + axProbs.plot(p1, color="blue", linewidth=2) + axProbs.plot(p2, color="red", linewidth=2) - print('nParams: ', model.parameters().size) + print("nParams: ", model.parameters().size) #for l,cw in enumerate(model.cws): # plt.figure() @@ -635,10 +635,10 @@ def gauss_map(n, a=0.62, b=-0.5): #plt.figure() #plt.hist(model.vw.ravel()) - #plt.title('vw') + #plt.title("vw") #fig.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoCNA() plt.show() diff --git a/cebl/ml/nnet/convreg.py b/cebl/ml/nnet/convreg.py index 0659d7e..d835e71 100644 --- a/cebl/ml/nnet/convreg.py +++ b/cebl/ml/nnet/convreg.py @@ -89,10 +89,10 @@ def __init__(self, x, g, convs=((8,16),(16,8)), nHidden=None, def train(self, x, g, optimFunc, **kwargs): x = util.segmat(x) - x = np.require(x, requirements=['O', 'C']) + x = np.require(x, requirements=["O", "C"]) g = util.segmat(g) - g = np.require(g, requirements=['O', 'C']) + g = np.require(g, requirements=["O", "C"]) self.trainResult = optimFunc(self, x=x, g=g, **kwargs) @@ -270,7 +270,7 @@ def gradient(self, x, g, returnError=True): cgs[l][...] = c1f.T.dot(deltaf) cgs[l] += self.penaltyGradient(l) - if l > 0: # won't propigate back to inputs + if l > 0: # won"t propigate back to inputs delta = util.segdot(delta, self.cws[l][:-1].T) delta = deltaDeEmbedSum(delta, self.convWidths[l]) @@ -329,30 +329,30 @@ def demoCNR(): fig = plt.figure(figsize=(20,6)) axSigs = fig.add_subplot(model.nConvLayers,3, 1) - axSigs.plot(t, xTest[0].T.squeeze(), color='blue', linewidth=2) - #axSigs.plot(t, xTest.T.squeeze(), color='blue', alpha=0.1, linewidth=2) - axSigs.plot(t, -3.0+gTest[0].T.squeeze(), color='red', linewidth=2) - #axSigs.plot(t, 3.0-gTest.T.squeeze(), color='red', alpha=0.1, linewidth=2) - axSigs.plot(tTrim, -5.5+yTest[0].T.squeeze(), color='red', linewidth=2) - #axSigs.plot(tTrim, 5.5-yTest.T.squeeze(), color='red', alpha=0.1, linewidth=2) - axSigs.set_title('') - axSigs.set_xlabel('Time') - axSigs.set_ylabel('Signal') + axSigs.plot(t, xTest[0].T.squeeze(), color="blue", linewidth=2) + #axSigs.plot(t, xTest.T.squeeze(), color="blue", alpha=0.1, linewidth=2) + axSigs.plot(t, -3.0+gTest[0].T.squeeze(), color="red", linewidth=2) + #axSigs.plot(t, 3.0-gTest.T.squeeze(), color="red", alpha=0.1, linewidth=2) + axSigs.plot(tTrim, -5.5+yTest[0].T.squeeze(), color="red", linewidth=2) + #axSigs.plot(tTrim, 5.5-yTest.T.squeeze(), color="red", alpha=0.1, linewidth=2) + axSigs.set_title("") + axSigs.set_xlabel("Time") + axSigs.set_ylabel("Signal") axSigs.autoscale(tight=True) axETrace = fig.add_subplot(model.nConvLayers,3, 2) - eTrace = np.array(model.trainResult['eTrace']) + eTrace = np.array(model.trainResult["eTrace"]) axETrace.plot(eTrace) axPTrace = fig.add_subplot(model.nConvLayers,3, 3) - pTrace = np.array(model.trainResult['pTrace']) + pTrace = np.array(model.trainResult["pTrace"]) axPTrace.plot(pTrace) cs = model.evalConvs(xTest) for i in range(model.nConvLayers): axConvs = fig.add_subplot(model.nConvLayers,3, 4+i) c = cs[i][0,:,:] - axConvs.plot(c+util.colsep(c), color='blue', linewidth=2, alpha=0.25) + axConvs.plot(c+util.colsep(c), color="blue", linewidth=2, alpha=0.25) axConvs.autoscale(tight=True) @@ -362,10 +362,10 @@ def demoCNR(): #responses = np.array(responses) #axRespon.plot(freqs.T, np.abs(responses).T) - print('nParams: ', model.parameters().size) + print("nParams: ", model.parameters().size) fig.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoCNR() plt.show() diff --git a/cebl/ml/nnet/elman.py b/cebl/ml/nnet/elman.py index 7ecfe07..619724e 100644 --- a/cebl/ml/nnet/elman.py +++ b/cebl/ml/nnet/elman.py @@ -206,11 +206,11 @@ def xor(a, b): g = np.array([int(xor(x[i-horizon], x[i-horizon-1])) if i > horizon else 0 for i in range(len(x))], dtype=np.float32) - #options = {'maxfev': 1000} + #options = {"maxfev": 1000} #net = ERN(x[None,...], g[None,...], nHidden=10, net = ERN((x[0:250],x[250:500]), (g[0:250],g[250:500]), nHidden=20, unrollSteps=unrollSteps, transient=transient, - #optimFunc=optim.sciopt, method='Powell', + #optimFunc=optim.sciopt, method="Powell", #optimFunc=optim.scg, #optimFunc=optim.pso, nParticles=20, vInit=0.01, momentum=0.85, pAttract=0.2, gAttract=0.6, #optimFunc=optim.alopex, stepInit=0.0001, tempIter=20, @@ -265,9 +265,9 @@ def xor(a, b): for i,momentum in enumerate(momentums): for j,pAttract in enumerate(pAttracts): for k,gAttract in enumerate(gAttracts): - print('momentum: ', momentum) - print('pAttract: ', pAttract) - print('gAttract: ', gAttract) + print("momentum: ", momentum) + print("pAttract: ", pAttract) + print("gAttract: ", gAttract) net = ERN(xTrain[None,...], gTrain[None,...], nHidden=32, unrollSteps=unrollSteps, transient=transient, @@ -275,34 +275,34 @@ def xor(a, b): momentum=momentum, pAttract=pAttract, gAttract=gAttract, maxIter=500, accuracy=0.005, precision=0.0, verbose=False) - its[i,j,k] = net.trainResult['iteration'] - ers[i,j,k] = net.trainResult['error'] - print('Error: ', ers[i,j,k]) - print('=======') + its[i,j,k] = net.trainResult["iteration"] + ers[i,j,k] = net.trainResult["error"] + print("Error: ", ers[i,j,k]) + print("=======") bi, bj, bk = np.unravel_index(np.argmin(ers), ers.shape) - print('=======') - print('Best: ', ers[bi,bj,bk], momentums[bi], pAttracts[bj], gAttracts[bk]) - print('=======') + print("=======") + print("Best: ", ers[bi,bj,bk], momentums[bi], pAttracts[bj], gAttracts[bk]) + print("=======") for i,momentum in enumerate(momentums): fig = plt.figure() ax = fig.add_subplot(1,1,1) - im = ax.imshow(ers[i], origin='lowerleft')#, interpolation='none') + im = ax.imshow(ers[i], origin="lowerleft")#, interpolation="none") fig.colorbar(im) - ax.set_title('ers momentum: %.2f' % momentum) - ax.set_xlabel('pAttract') - ax.set_ylabel('gAttract') + ax.set_title("ers momentum: %.2f" % momentum) + ax.set_xlabel("pAttract") + ax.set_ylabel("gAttract") for i,momentum in enumerate(momentums): fig = plt.figure() ax = fig.add_subplot(1,1,1) - im = ax.imshow(its[i], origin='lowerleft')#, interpolation='none') + im = ax.imshow(its[i], origin="lowerleft")#, interpolation="none") fig.colorbar(im) - ax.set_title('its momentum: %.2f' % momentum) - ax.set_xlabel('pAttract') - ax.set_ylabel('gAttract') + ax.set_title("its momentum: %.2f" % momentum) + ax.set_xlabel("pAttract") + ax.set_ylabel("gAttract") -if __name__ == '__main__': +if __name__ == "__main__": demoERNTXOR() plt.show() diff --git a/cebl/ml/nnet/esn.py b/cebl/ml/nnet/esn.py index 4dfee1d..c5cb368 100644 --- a/cebl/ml/nnet/esn.py +++ b/cebl/ml/nnet/esn.py @@ -70,18 +70,18 @@ def norm2(m): iteration += 1 if np.abs(eigVal-eigValPrev) < precision: - reason = 'precision' + reason = "precision" break if iteration > maxIter: - reason = 'maxiter' + reason = "maxiter" break results = dict() - results['eigVal'] = eigVal - results['eigVec'] = eigVec - results['reason'] = reason - results['iterations'] = iteration + results["eigVal"] = eigVal + results["eigVec"] = eigVec + results["reason"] = reason + results["iterations"] = iteration return results @@ -109,7 +109,7 @@ def miniRProp(x, errFunc, step=0.1, stepUp=1.01, stepDown=0.6, if iteration >= maxIter: if verbose: - print('Maximum iterations %d reached.' % iteration) + print("Maximum iterations %d reached." % iteration) break if np.abs(err) < accuracy: @@ -166,16 +166,16 @@ def __init__(self, x, nRes=1024, rwScale=0.95, rwConn=0.01, ##if self.sparse: ## iw = iw.todense() ## rw = rw.todense() - ##print('iw.shape: ', iw.shape) - ##print('iw min/max: ', (np.min(iw), np.max(iw))) - ##print('iwMult: ', self.iwMult) - ##print('rw.shape: ', rw.shape) + ##print("iw.shape: ", iw.shape) + ##print("iw min/max: ", (np.min(iw), np.max(iw))) + ##print("iwMult: ", self.iwMult) + ##print("rw.shape: ", rw.shape) ##l = np.max(np.abs(np.linalg.eigvals(rw))) - ##print('rwScale: ', l) + ##print("rwScale: ", l) def initIW(self, iwScale, iwConn): if self.verbose: - print('Initializing input weights...') + print("Initializing input weights...") self.iwMult = 1.0 self.iwScale = iwScale @@ -191,7 +191,7 @@ def initIW(self, iwScale, iwConn): def initRW(self, rwScale, rwConn): if self.verbose: - print('Initializing recurrent weights...') + print("Initializing recurrent weights...") self.rwScale = rwScale self.rwConn = rwConn @@ -207,32 +207,32 @@ def initRW(self, rwScale, rwConn): rw[loneNeurons,newConns] = np.random.uniform(-1.0, 1.0, size=loneNeurons.size) if self.verbose: - print('Eliminated %d lone reservor units.' % loneNeurons.size) + print("Eliminated %d lone reservor units." % loneNeurons.size) if self.sparse: if self.verbose: - print('Using sparse linalg to find spectral radius...') + print("Using sparse linalg to find spectral radius...") try: ncv = int(np.max((10, rwConn*self.nRes))) l = np.abs(spsparse.linalg.eigs(spsparse.csr_matrix(rw, dtype=self.dtype), k=1, ncv=ncv, tol=0, return_eigenvectors=False)[0]) except spsparse.linalg.ArpackNoConvergence as e: if self.verbose: - print('ARPACK did not converge, using dense matrices.') + print("ARPACK did not converge, using dense matrices.") l = np.max(np.abs(np.linalg.eigvals(rw))) else: #if self.verbose: - # print('Using dense Rayleigh iteration to find spectral radius...') + # print("Using dense Rayleigh iteration to find spectral radius...") # need to take a closer look at rayleigh, finding all eigenvalues for now XXX - idfah # rayleigh only works for positive matrices ##rayl = rayleigh(rw, eigVal=self.nRes*0.5*self.rwConn, verbose=self.verbose) - #if rayl['reason'] != 'precision': - # print('ESN Warning: rayleigh did not converge: ' + rayl['reason']) - #l = rayl['eigVal'] + #if rayl["reason"] != "precision": + # print("ESN Warning: rayleigh did not converge: " + rayl["reason"]) + #l = rayl["eigVal"] if self.verbose: - print('Finding spectral radius using dense matrices...') + print("Finding spectral radius using dense matrices...") l = np.max(np.abs(np.linalg.eigvals(rw))) rw[:,:] *= self.rwScale / l @@ -249,10 +249,10 @@ def eval(self, x, context=None, returncontext=False): self.actCache.getMaxSize() > 0: key = util.hashArray(x) if key in self.actCache: - #print('cache hit.') + #print("cache hit.") return self.actCache[key] else: - #print('cache miss.') + #print("cache miss.") cacheAct = True nSeg = x.shape[0] @@ -308,9 +308,9 @@ def setIWMult(self, mult): self.iwMult = mult - def scaleIW(self, x, method='brentq'): + def scaleIW(self, x, method="brentq"): if self.verbose: - print('Scaling input weights...') + print("Scaling input weights...") self.actCache.disable() @@ -326,7 +326,7 @@ def stdErr(m): act = self.eval(x) err = np.abs(np.std(act)-scale) if self.verbose: - print('scale, mult: ', (np.std(act), m)) + print("scale, mult: ", (np.std(act), m)) return err def stdRoot(m): @@ -334,26 +334,26 @@ def stdRoot(m): act = self.eval(x) err = np.std(act) - scale if self.verbose: - print('scale, mult: ', (np.std(act), m)) + print("scale, mult: ", (np.std(act), m)) return err - if method == 'rprop': + if method == "rprop": miniRProp(0.75, errFunc=stdErr, maxIter=maxIter, accuracy=accuracy) #verbose=self.verbose) - elif method == 'brentq': + elif method == "brentq": m, r = spopt.brentq(stdRoot, 1.0e-5, 10.0, xtol=accuracy, full_output=True) if self.verbose: - print('brentq iterations: %d' % r.iterations) + print("brentq iterations: %d" % r.iterations) - elif method == 'simplex': - r = spopt.minimize(stdErr, scale, method='Nelder-Mead', tol=accuracy, - options={'maxiter': 100}) + elif method == "simplex": + r = spopt.minimize(stdErr, scale, method="Nelder-Mead", tol=accuracy, + options={"maxiter": 100}) m = r.x else: - raise RuntimeError('Invalid scaleIW method %s.' % method) + raise RuntimeError("Invalid scaleIW method %s." % method) self.actCache.enable() @@ -387,17 +387,17 @@ def plotActDensity(self, x, ax=None, **kwargs): fig = plt.figure() ax = fig.add_subplot(1, 1, 1) - ax.set_xlabel('Density') - ax.set_ylabel('Reservoir Activation') + ax.set_xlabel("Density") + ax.set_ylabel("Reservoir Activation") n, bins, patches = ax.hist(act.ravel(), normed=True, - orientation='horizontal', label='Activations') + orientation="horizontal", label="Activations") lines = ax.plot(np.linspace(0.0, np.max(n), t.size), np.tanh(t-2.0), - linewidth=2, label=r'$\phi$') # label=r'$\phi='+self.phi.__name__) - leg = ax.legend(loc='lower right') + linewidth=2, label=r"$\phi$") # label=r"$\phi="+self.phi.__name__) + leg = ax.legend(loc="lower right") - return {'ax': ax, 'n': n, 'bins': bins, 'patches': patches, 'lines': lines, 'leg': leg} + return {"ax": ax, "n": n, "bins": bins, "patches": patches, "lines": lines, "leg": leg} def plotWeightImg(self, ax=None): if self.sparse: @@ -409,9 +409,9 @@ def plotWeightImg(self, ax=None): fig = plt.figure() ax = fig.add_subplot(1, 1, 1) - img = ax.imshow(hw[self.nIn:,:], interpolation='none') + img = ax.imshow(hw[self.nIn:,:], interpolation="none") - return {'ax': ax, 'img': img} + return {"ax": ax, "img": img} class ESNReservoir(EchoStateNetworkReservoir): @@ -454,7 +454,7 @@ def train(self, x, g, readoutClass=RidgeRegression, **kwargs): gf = g.ravel() if self.verbose: - print('Training readout layer...') + print("Training readout layer...") self.readout = readoutClass(actf[self.transient:], gf[self.transient:], **kwargs) @@ -484,7 +484,7 @@ def addSideTrack(self, x, act): if self.sideTrack: if self.verbose: - print('adding side track...') + print("adding side track...") return np.concatenate((x, act), axis=2) else: return act @@ -534,12 +534,12 @@ def demoESP(): fig = plt.figure() impulseAx = fig.add_subplot(2, 1, 1) - impulseAx.plot(sig, color='grey', linewidth=3) - impulseAx.plot(sigi, color='red') + impulseAx.plot(sig, color="grey", linewidth=3) + impulseAx.plot(sigi, color="red") actAx = fig.add_subplot(2, 1, 2) - actAx.plot(act, color='black', linewidth=2) - actAx.plot(acti, color='red') + actAx.plot(act, color="black", linewidth=2) + actAx.plot(acti, color="red") def demoESNTXOR(): def xor(a, b): @@ -601,13 +601,13 @@ def demoESNSine(): fig = plt.figure() ax = fig.add_subplot(1, 1, 1) - ax.plot(time, s, color='blue') - ax.plot(time[1:], pred[0], color='red') + ax.plot(time, s, color="blue") + ax.plot(time[1:], pred[0], color="red") ax.autoscale(tight=True) -if __name__ == '__main__': +if __name__ == "__main__": #demoESP() demoESNTXOR() #demoESNSine() diff --git a/cebl/ml/nnet/forward.py b/cebl/ml/nnet/forward.py index 601bac6..f7820b0 100644 --- a/cebl/ml/nnet/forward.py +++ b/cebl/ml/nnet/forward.py @@ -354,39 +354,39 @@ def demoFN1d(): fig = plt.figure(figsize=(16, 12)) axFit = fig.add_subplot(3, 2, 1) - axFit.plot(x, gClean, linewidth=2, color='blue') - axFit.plot(x, g, linewidth=2, color='black') - axFit.plot(x, model.eval(x), linewidth=2, color='red') - axFit.legend(['True Target', 'Noisy Target', 'Network Output']) - axFit.set_title('Network Output') - axFit.set_xlabel('Input') - axFit.set_ylabel('Output') + axFit.plot(x, gClean, linewidth=2, color="blue") + axFit.plot(x, g, linewidth=2, color="black") + axFit.plot(x, model.eval(x), linewidth=2, color="red") + axFit.legend(["True Target", "Noisy Target", "Network Output"]) + axFit.set_title("Network Output") + axFit.set_xlabel("Input") + axFit.set_ylabel("Output") axError = fig.add_subplot(3, 2, 2) - axError.plot(results['eTrace']) - axError.set_title('Training Error') - axError.set_xlabel('Epoch') - axError.set_ylabel('Mean-Squared Error') + axError.plot(results["eTrace"]) + axError.set_title("Training Error") + axError.set_xlabel("Epoch") + axError.set_ylabel("Mean-Squared Error") axHResponse = fig.add_subplot(3, 2, 3) axHResponse.plot(x, model.evalHiddens(x)[0], linewidth=2) - axHResponse.set_title('Hidden Unit Response') - axHResponse.set_xlabel('Input') - axHResponse.set_ylabel('Hidden Unit Output') + axHResponse.set_title("Hidden Unit Response") + axHResponse.set_xlabel("Input") + axHResponse.set_ylabel("Hidden Unit Output") axHWeight = fig.add_subplot(3, 2, 4) - img = axHWeight.imshow(model.hws[0], aspect='auto', - interpolation='none', cmap=plt.cm.winter) + img = axHWeight.imshow(model.hws[0], aspect="auto", + interpolation="none", cmap=plt.cm.winter) cbar = plt.colorbar(img) - cbar.set_label('Weight') - axHWeight.set_title('Hidden Weights') - axHWeight.set_xlabel('Hidden Unit') - axHWeight.set_ylabel('Input') + cbar.set_label("Weight") + axHWeight.set_title("Hidden Weights") + axHWeight.set_xlabel("Hidden Unit") + axHWeight.set_ylabel("Input") axHWeight.set_yticks(range(model.hws[0].shape[0])) - axHWeight.set_yticklabels(list(range(1, model.hws[0].shape[0])) + ['bias']) + axHWeight.set_yticklabels(list(range(1, model.hws[0].shape[0])) + ["bias"]) - pTrace = np.array(results['pTrace']) - #sTrace = np.array(results['sTrace']) + pTrace = np.array(results["pTrace"]) + #sTrace = np.array(results["sTrace"]) hwTrace = pTrace[:,:model.hws[0].size] #hwTrace = sTrace[:,:model.hws[0].size] #hwTrace = sTrace @@ -394,15 +394,15 @@ def demoFN1d(): axHWTrace = fig.add_subplot(3, 2, 5) axHWTrace.plot(hwTrace) - axHWTrace.set_title('Hidden Weight Trace') - axHWTrace.set_xlabel('Epoch') - axHWTrace.set_ylabel('Weight') + axHWTrace.set_title("Hidden Weight Trace") + axHWTrace.set_xlabel("Epoch") + axHWTrace.set_ylabel("Weight") axVWTrace = fig.add_subplot(3, 2, 6) axVWTrace.plot(vwTrace) - axVWTrace.set_title('Visible Weight Trace') - axVWTrace.set_xlabel('Epoch') - axVWTrace.set_ylabel('Weight') + axVWTrace.set_title("Visible Weight Trace") + axVWTrace.set_xlabel("Epoch") + axVWTrace.set_ylabel("Weight") fig.tight_layout(pad=0.4) @@ -445,37 +445,37 @@ def radialSinc(x): fig = plt.figure() - axTargSurf = fig.add_subplot(2, 3, 1, projection='3d') + axTargSurf = fig.add_subplot(2, 3, 1, projection="3d") targSurf = axTargSurf.plot_surface(xx1, xx2, gg, linewidth=0.0, cmap=plt.cm.jet) - targSurf.set_edgecolor('black') + targSurf.set_edgecolor("black") axTargCont = fig.add_subplot(2, 3, 2) - axTargCont.contour(x1, x2, gg, 40, color='black', - marker='o', s=400, linewidth=3, cmap=plt.cm.jet) + axTargCont.contour(x1, x2, gg, 40, color="black", + marker="o", s=400, linewidth=3, cmap=plt.cm.jet) - eTrace = results['eTrace'] + eTrace = results["eTrace"] axError = fig.add_subplot(2, 3, 3) axError.plot(eTrace) - axError.set_title('Training Error') - axError.set_xlabel('Epoch') - axError.set_ylabel('Mean-Squared Error') + axError.set_title("Training Error") + axError.set_xlabel("Epoch") + axError.set_ylabel("Mean-Squared Error") - axPredSurf = fig.add_subplot(2, 3, 4, projection='3d') + axPredSurf = fig.add_subplot(2, 3, 4, projection="3d") predSurf = axPredSurf.plot_surface(xx1, xx2, yy, linewidth=0.0, cmap=plt.cm.jet) - predSurf.set_edgecolor('black') + predSurf.set_edgecolor("black") axPredCont = fig.add_subplot(2, 3, 5) - axPredCont.contour(x1, x2, yy, 40, color='black', - marker='o', s=400, linewidth=3, cmap=plt.cm.jet) + axPredCont.contour(x1, x2, yy, 40, color="black", + marker="o", s=400, linewidth=3, cmap=plt.cm.jet) - pTrace = np.array(results['pTrace']) + pTrace = np.array(results["pTrace"]) axHWTrace = fig.add_subplot(2, 3, 6) axHWTrace.plot(pTrace) - axHWTrace.set_title('Weight Trace') - axHWTrace.set_xlabel('Epoch') - axHWTrace.set_ylabel('Weight') + axHWTrace.set_title("Weight Trace") + axHWTrace.set_xlabel("Epoch") + axHWTrace.set_ylabel("Weight") -if __name__ == '__main__': +if __name__ == "__main__": demoFN1d() #demoFN2d() plt.show() diff --git a/cebl/ml/nnet/multielman.py b/cebl/ml/nnet/multielman.py index e1ebee3..92d74ee 100644 --- a/cebl/ml/nnet/multielman.py +++ b/cebl/ml/nnet/multielman.py @@ -215,7 +215,7 @@ def gradient(self, x, g, unrollSteps=10, returnError=True): deltaf = delta.reshape((-1, delta.shape[-1])) hgs[l][...] = r1cf.T.dot(deltaf) - #print('hg %d: %f' % (l, np.sqrt(np.mean(hgs[l]**2)))) + #print("hg %d: %f" % (l, np.sqrt(np.mean(hgs[l]**2)))) w = self.iws[l] @@ -251,7 +251,7 @@ def xor(a, b): x = std.apply(x) g = std.apply(g) - #options = {'maxfev': 1000} + #options = {"maxfev": 1000} #net = MERN(x[None,...], g[None,...], recs=(10,10,), net = MERN((x[0:250],x[250:500]), (g[0:250],g[250:500]), recs=(20,10), #iwInitFunc=lambda size: np.random.uniform(-0.2, 0.2, size=size), @@ -259,7 +259,7 @@ def xor(a, b): #vwInitFunc=lambda size: np.random.uniform(-0.2, 0.2, size=size), phi=transfer.tanhTwist, unrollSteps=unrollSteps, transient=transient, - #optimFunc=optim.sciopt, method='Powell', + #optimFunc=optim.sciopt, method="Powell", optimFunc=optim.scg, #optimFunc=optim.rprop, stepUp=1.02, stepDown=0.4, #optimFunc=optim.pso, nParticles=20, vInit=0.01, momentum=0.85, pAttract=0.2, gAttract=0.6, @@ -286,14 +286,14 @@ def xor(a, b): axTarg.bar(range(len(g)), g) axTarg.set_xlim((0, len(g))) axTarg.set_ylim((-1.5,1.5)) - axTarg.set_title('Test Targets') + axTarg.set_title("Test Targets") # network output axOut = fig.add_subplot(4,2,3) axOut.bar(range(len(g)), out) axOut.set_xlim((0, len(g))) axOut.set_ylim((-1.5,1.5)) - axOut.set_title('Test Outputs') + axOut.set_title("Test Outputs") # first layer output axH1 = fig.add_subplot(4,2,5) @@ -301,7 +301,7 @@ def xor(a, b): axH1.plot(hout[1] + util.colsep(hout[1])) #axH1.set_xlim((0, len(g))) #axH1.set_ylim((0.0,1.0)) - axH1.set_title('H2') + axH1.set_title("H2") # second layer output axH2 = fig.add_subplot(4,2,7) @@ -309,30 +309,30 @@ def xor(a, b): axH2.plot(hout[0] + util.colsep(hout[0])) #axH2.set_xlim((0, len(g))) #axH2.set_ylim((0.0,1.0)) - axH2.set_title('H1') + axH2.set_title("H1") axETrace = fig.add_subplot(4,2,2) - axETrace.plot(np.array(net.trainResult['eTrace'])) + axETrace.plot(np.array(net.trainResult["eTrace"])) axPTrace = fig.add_subplot(4,2,4) - axPTrace.plot(np.array(net.trainResult['pTrace'])) + axPTrace.plot(np.array(net.trainResult["pTrace"])) axH1Dens = fig.add_subplot(4,2,6) t = np.arange(0.0,4.0,0.01) nb, bins, patches = axH1Dens.hist(hout[0].ravel(), normed=True, - orientation='horizontal', label='Activations') + orientation="horizontal", label="Activations") axH1Dens.plot(np.linspace(0.0,np.max(nb),t.size), np.tanh(t-2.0), - linewidth=2, label=r'$\phi$') # label=r'$\phi='+self.phi.__name__) - axH1Dens.legend(loc='lower right') + linewidth=2, label=r"$\phi$") # label=r"$\phi="+self.phi.__name__) + axH1Dens.legend(loc="lower right") axH2Dens = fig.add_subplot(4,2,8) t = np.arange(0.0,4.0,0.01) nb, bins, patches = axH2Dens.hist(hout[0].ravel(), normed=True, - orientation='horizontal', label='Activations') + orientation="horizontal", label="Activations") axH2Dens.plot(np.linspace(0.0,np.max(nb),t.size), np.tanh(t-2.0), - linewidth=2, label=r'$\phi$') # label=r'$\phi='+self.phi.__name__) - axH2Dens.legend(loc='lower right') + linewidth=2, label=r"$\phi$") # label=r"$\phi="+self.phi.__name__) + axH2Dens.legend(loc="lower right") -if __name__ == '__main__': +if __name__ == "__main__": demoMERNTXOR() plt.show() diff --git a/cebl/ml/nnet/softmax.py b/cebl/ml/nnet/softmax.py index b42b8ea..15b2ed3 100644 --- a/cebl/ml/nnet/softmax.py +++ b/cebl/ml/nnet/softmax.py @@ -345,9 +345,9 @@ def demoFNS2d(): ## batchSize=15, maxRound=5, maxIter=5, ## transFunc=transfer.lecun, precision=1.0e-10, ## verbose=1) - ##print('ca:', model.ca(classData)) - ##print('bca:', model.bca(classData)) - ##print('confusion:\n', model.confusion(classData)) + ##print("ca:", model.ca(classData)) + ##print("bca:", model.bca(classData)) + ##print("confusion:\n", model.confusion(classData)) ##model.train(classData, optimFunc=optim.scg, ## maxIter=10, precision=1.0e-10, verbose=True) @@ -360,21 +360,21 @@ def demoFNS2d(): print(model.probs(classData[1]).dtype) print(model.probs(classData[2]).dtype) - print('red labels\n-------') + print("red labels\n-------") print(redLabel) - print('\ngreen labels\n-------') + print("\ngreen labels\n-------") print(greenLabel) - print('\nblue labels\n-------') + print("\nblue labels\n-------") print(blueLabel) - print('ca:', model.ca(classData)) - print('bca:', model.bca(classData)) - print('confusion:\n', model.confusion(classData)) + print("ca:", model.ca(classData)) + print("bca:", model.bca(classData)) + print("confusion:\n", model.confusion(classData)) # first figure shows training data and class intersections fig = plt.figure() ax = fig.add_subplot(2, 2, 1) - ax.set_title('Class Data') + ax.set_title("Class Data") # training data ax.scatter(red[:,0], red[:,1], color="red") @@ -401,13 +401,13 @@ def demoFNS2d(): diffRG = pRed - pGreen diffRB = pRed - pBlue diffGB = pGreen - pBlue - ##ax.contour(x, y, diffRG, colors='black', levels=(0,)) - ##ax.contour(x, y, diffRB, colors='black', levels=(0,)) - ##ax.contour(x, y, diffGB, colors='black', levels=(0,)) + ##ax.contour(x, y, diffRG, colors="black", levels=(0,)) + ##ax.contour(x, y, diffRB, colors="black", levels=(0,)) + ##ax.contour(x, y, diffGB, colors="black", levels=(0,)) # second figure shows 3d plots of probability densities - ax = fig.add_subplot(2, 2, 2, projection='3d') - ax.set_title('P(C = k)') + ax = fig.add_subplot(2, 2, 2, projection="3d") + ax.set_title("P(C = k)") # straight class colors for suface plots color = np.reshape([pRed, pGreen, pBlue], (3, x.shape[0], x.shape[1])) @@ -427,33 +427,33 @@ def demoFNS2d(): #surf = ax.plot_surface(x, y, pMax, cmap=matplotlib.cm.jet, linewidth=0) surf = ax.plot_surface(x, y, pMax, facecolors=colorFlip, linewidth=0.02, shade=True) - surf.set_edgecolor('black') # add edgecolor back in, bug? + surf.set_edgecolor("black") # add edgecolor back in, bug? # third figure shows contours and color image of probability densities ax = fig.add_subplot(2, 2, 3) - ax.set_title('max_K P(C = k)') + ax.set_title("max_K P(C = k)") #ax.pcolor(x, y, pMax) - ax.imshow(colorFlip, origin='lower', - extent=(mn[0], mx[0], mn[1], mx[1]), aspect='auto') + ax.imshow(colorFlip, origin="lower", + extent=(mn[0], mx[0], mn[1], mx[1]), aspect="auto") # contours nLevel = 4 - cs = ax.contour(x, y, pMax, colors='black', + cs = ax.contour(x, y, pMax, colors="black", levels=np.linspace(np.min(pMax), np.max(pMax), nLevel)) cs.clabel(fontsize=6) # fourth figure - ax = fig.add_subplot(2, 2, 4, projection='3d') - ax.set_title('argmax_K P(C = k)') + ax = fig.add_subplot(2, 2, 4, projection="3d") + ax.set_title("argmax_K P(C = k)") labels = model.label(z) lMax = np.reshape(labels, x.shape) surf = ax.plot_surface(x, y, lMax, facecolors=colorFlip, linewidth=0.02)#, antialiased=False) - surf.set_edgecolor('black') + surf.set_edgecolor("black") -if __name__ == '__main__': +if __name__ == "__main__": demoFNS2d() plt.show() diff --git a/cebl/ml/nnet/transfer.py b/cebl/ml/nnet/transfer.py index 49df93d..eff7f34 100644 --- a/cebl/ml/nnet/transfer.py +++ b/cebl/ml/nnet/transfer.py @@ -32,7 +32,7 @@ def tanh(x, prime=0): return 1.0 - util.tanh(x)**2 else: - raise NotImplementedError('%d order derivative not implemented.' % int(prime)) + raise NotImplementedError("%d order derivative not implemented." % int(prime)) def tanhTwist(x, prime=0, **kwargs): return _twist(x, tanh, prime, **kwargs) @@ -45,7 +45,7 @@ def lecun(x, prime=0): return 1.7159 * (2.0/3.0) * (1.0 - util.tanh((2.0/3.0) * x)**2) else: - raise NotImplementedError('%d order derivative not implemented.' % int(prime)) + raise NotImplementedError("%d order derivative not implemented." % int(prime)) def lecunTwist(x, prime=0, **kwargs): return _twist(x, lecun, prime, **kwargs) @@ -71,7 +71,7 @@ def logistic(x, prime=0): return logistic(x) * (1.0 - logistic(x)) else: - raise NotImplementedError('%d order derivative not implemented.' % int(prime)) + raise NotImplementedError("%d order derivative not implemented." % int(prime)) def logisticTwist(x, prime=0, **kwargs): return _twist(x, logistic, prime, **kwargs) @@ -84,7 +84,7 @@ def gaussian(x, prime=0): return -2.0*x*np.exp(-x**2) else: - raise NotImplementedError('%d order derivative not implemented.' % int(prime)) + raise NotImplementedError("%d order derivative not implemented." % int(prime)) def gaussianTwist(x, prime=0, **kwargs): return _twist(x, logistic, prime, **kwargs) @@ -122,7 +122,7 @@ def softplus(x, prime=0): return logistic(x) else: - raise NotImplementedError('%d order derivative not implemented.' % int(prime)) + raise NotImplementedError("%d order derivative not implemented." % int(prime)) def softplusTwist(x, prime=0, **kwargs): return _twist(x, softplus, prime, **kwargs) diff --git a/cebl/ml/optim/alopex.py b/cebl/ml/optim/alopex.py index cf99c50..08e2f00 100644 --- a/cebl/ml/optim/alopex.py +++ b/cebl/ml/optim/alopex.py @@ -95,12 +95,12 @@ def alopex(optable, stepSize=0.0075, tempInit=10000, tempIter=20, errorTrace = [error] # termination reason - reason = '' + reason = "" iteration = 0 if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -130,7 +130,7 @@ def alopex(optable, stepSize=0.0075, tempInit=10000, tempIter=20, iteration += 1 if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -149,22 +149,22 @@ def alopex(optable, stepSize=0.0075, tempInit=10000, tempIter=20, # terminate if maximum iterations reached if iteration >= maxIter: - reason = 'maxiter' + reason = "maxiter" break # terminate if desired accuracy reached if error < accuracy: - reason = 'accuracy' + reason = "accuracy" break # terminate if desired precision reached if np.abs(error - errorPrev) < precision: - reason = 'precision' + reason = "precision" break # terminate if the error function diverges if error > divergeThresh: - reason = 'diverge' + reason = "diverge" break # current change in error @@ -184,7 +184,7 @@ def alopex(optable, stepSize=0.0075, tempInit=10000, tempIter=20, corrRun = 0.0 if verbose: - print('Cooling: %f' % temp) + print("Cooling: %f" % temp) # probability of taking negative step # is drawn from the Boltzman Distribution @@ -195,14 +195,14 @@ def alopex(optable, stepSize=0.0075, tempInit=10000, tempIter=20, # save result into a dictionary result = {} - result['params'] = params - result['error'] = error - result['iteration'] = iteration - result['reason'] = reason + result["params"] = params + result["error"] = error + result["iteration"] = iteration + result["reason"] = reason - if pTrace: result['pTrace'] = paramTrace - if tTrace: result['tTrace'] = tTrace - if eTrace: result['eTrace'] = errorTrace + if pTrace: result["pTrace"] = paramTrace + if tTrace: result["tTrace"] = tTrace + if eTrace: result["eTrace"] = errorTrace return result @@ -289,12 +289,12 @@ def alopexb(optable, stepSize=0.005, forgetFactor=0.5, errorTrace = [error] # termination reason - reason = '' + reason = "" iteration = 0 if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -324,7 +324,7 @@ def alopexb(optable, stepSize=0.005, forgetFactor=0.5, iteration += 1 if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -339,22 +339,22 @@ def alopexb(optable, stepSize=0.005, forgetFactor=0.5, # terminate if maximum iterations reached if iteration >= maxIter: - reason = 'maxiter' + reason = "maxiter" break # terminate if desired accuracy reached if error < accuracy: - reason = 'accuracy' + reason = "accuracy" break # terminate if desired precision reached if np.abs(error - errorPrev) < precision: - reason = 'precision' + reason = "precision" break # terminate if the error function diverges if error > divergeThresh: - reason = 'diverge' + reason = "diverge" break # current change in error @@ -375,14 +375,14 @@ def alopexb(optable, stepSize=0.005, forgetFactor=0.5, # save result into a dictionary result = {} - result['params'] = params - result['error'] = error - result['iteration'] = iteration - result['reason'] = reason + result["params"] = params + result["error"] = error + result["iteration"] = iteration + result["reason"] = reason - if pTrace: result['pTrace'] = paramTrace - if tTrace: result['tTrace'] = tTrace - if eTrace: result['eTrace'] = errorTrace + if pTrace: result["pTrace"] = paramTrace + if tTrace: result["tTrace"] = tTrace + if eTrace: result["eTrace"] = errorTrace return result @@ -392,6 +392,6 @@ def demoALOPEX(): rosen.plot() -if __name__ == '__main__': +if __name__ == "__main__": demoALOPEX() plt.show() diff --git a/cebl/ml/optim/alopexas.py b/cebl/ml/optim/alopexas.py index f46a4c4..0189b07 100644 --- a/cebl/ml/optim/alopexas.py +++ b/cebl/ml/optim/alopexas.py @@ -118,12 +118,12 @@ def alopex(optable, errorTrace = [error] # termination reason - reason = '' + reason = "" iteration = 0 if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -167,7 +167,7 @@ def alopex(optable, iteration += 1 if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -190,22 +190,22 @@ def alopex(optable, # terminate if maximum iterations reached if iteration >= maxIter: - reason = 'maxiter' + reason = "maxiter" break # terminate if desired accuracy reached if error < accuracy: - reason = 'accuracy' + reason = "accuracy" break # terminate if desired precision reached if np.abs(error - errorPrev) < precision: - reason = 'precision' + reason = "precision" break # terminate if the error function diverges if error > divergeThresh: - reason = 'diverge' + reason = "diverge" break # current change in error @@ -225,7 +225,7 @@ def alopex(optable, corrRun = 0.0 if verbose: - print('Cooling: %f' % temp) + print("Cooling: %f" % temp) # probability of taking negative step # is drawn from the Boltzman Distribution @@ -236,15 +236,15 @@ def alopex(optable, # save result into a dictionary result = {} - result['params'] = params - result['error'] = error - result['iteration'] = iteration - result['reason'] = reason + result["params"] = params + result["error"] = error + result["iteration"] = iteration + result["reason"] = reason - if pTrace: result['pTrace'] = paramTrace - if sTrace: result['sTrace'] = stepTrace - if tTrace: result['tTrace'] = tTrace - if eTrace: result['eTrace'] = errorTrace + if pTrace: result["pTrace"] = paramTrace + if sTrace: result["sTrace"] = stepTrace + if tTrace: result["tTrace"] = tTrace + if eTrace: result["eTrace"] = errorTrace return result @@ -364,12 +364,12 @@ def alopexas(optable, errorTrace = [error] # termination reason - reason = '' + reason = "" iteration = 0 if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -398,7 +398,7 @@ def alopexas(optable, iteration += 1 if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -421,22 +421,22 @@ def alopexas(optable, # terminate if maximum iterations reached if iteration >= maxIter: - reason = 'maxiter' + reason = "maxiter" break # terminate if desired accuracy reached if error < accuracy: - reason = 'accuracy' + reason = "accuracy" break # terminate if desired precision reached if np.abs(error - errorPrev) < precision: - reason = 'precision' + reason = "precision" break # terminate if the error function diverges if error > divergeThresh: - reason = 'diverge' + reason = "diverge" break # current change in error @@ -469,7 +469,7 @@ def alopexas(optable, corrRun = 0.0 if verbose: - print('Cooling: %f' % temp) + print("Cooling: %f" % temp) # probability of taking negative step # is drawn from the Boltzman Distribution @@ -480,15 +480,15 @@ def alopexas(optable, # save result into a dictionary result = {} - result['params'] = params - result['error'] = error - result['iteration'] = iteration - result['reason'] = reason + result["params"] = params + result["error"] = error + result["iteration"] = iteration + result["reason"] = reason - if pTrace: result['pTrace'] = paramTrace - if sTrace: result['sTrace'] = stepTrace - if tTrace: result['tTrace'] = tTrace - if eTrace: result['eTrace'] = errorTrace + if pTrace: result["pTrace"] = paramTrace + if sTrace: result["sTrace"] = stepTrace + if tTrace: result["tTrace"] = tTrace + if eTrace: result["eTrace"] = errorTrace return result @@ -600,12 +600,12 @@ def alopexb(optable, errorTrace = [error] # termination reason - reason = '' + reason = "" iteration = 0 if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -648,7 +648,7 @@ def alopexb(optable, iteration += 1 if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -667,22 +667,22 @@ def alopexb(optable, # terminate if maximum iterations reached if iteration >= maxIter: - reason = 'maxiter' + reason = "maxiter" break # terminate if desired accuracy reached if error < accuracy: - reason = 'accuracy' + reason = "accuracy" break # terminate if desired precision reached if np.abs(error - errorPrev) < precision: - reason = 'precision' + reason = "precision" break # terminate if the error function diverges if error > divergeThresh: - reason = 'diverge' + reason = "diverge" break # current change in error @@ -703,15 +703,15 @@ def alopexb(optable, # save result into a dictionary result = {} - result['params'] = params - result['error'] = error - result['iteration'] = iteration - result['reason'] = reason + result["params"] = params + result["error"] = error + result["iteration"] = iteration + result["reason"] = reason - if pTrace: result['pTrace'] = paramTrace - if sTrace: result['sTrace'] = stepTrace - if tTrace: result['tTrace'] = tTrace - if eTrace: result['eTrace'] = errorTrace + if pTrace: result["pTrace"] = paramTrace + if sTrace: result["sTrace"] = stepTrace + if tTrace: result["tTrace"] = tTrace + if eTrace: result["eTrace"] = errorTrace return result @@ -719,7 +719,7 @@ def demoALOPEX(): rosen = tests.Rosen(optimFunc=alopex, accuracy=0.01, maxIter=np.inf, tempIter=20, stepInit=0.005, sTrace=True, verbose=True) - #plt.plot(rosen.trainResult['sTrace']) + #plt.plot(rosen.trainResult["sTrace"]) rosen.plot() def arcticFox(optable, @@ -751,12 +751,12 @@ def arcticFox(optable, errorTrace = [] # termination reason - reason = '' + reason = "" iteration = 0 if verbose: - print('%d %6f %6f' % (iteration, stepSize, error)) + print("%d %6f %6f" % (iteration, stepSize, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -798,29 +798,29 @@ def arcticFox(optable, iteration += 1 if verbose: - print('%d %6f %6f' % (iteration, curStepSize, error)) + print("%d %6f %6f" % (iteration, curStepSize, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) # terminate if maximum iterations reached if iteration >= maxIter: - reason = 'maxiter' + reason = "maxiter" break # terminate if desired accuracy reached if error < accuracy: - reason = 'accuracy' + reason = "accuracy" break # terminate if desired precision reached if np.abs(error - errorPrev) < precision: - reason = 'precision' + reason = "precision" break # terminate if the error function diverges if error > divergeThresh: - reason = 'diverge' + reason = "diverge" break # sign of current change in error @@ -835,14 +835,14 @@ def arcticFox(optable, # save result into a dictionary result = {} - result['params'] = params - result['error'] = error - result['iteration'] = iteration - result['reason'] = reason + result["params"] = params + result["error"] = error + result["iteration"] = iteration + result["reason"] = reason - if pTrace: result['pTrace'] = paramTrace - if sTrace: result['sTrace'] = stepTrace - if eTrace: result['eTrace'] = errorTrace + if pTrace: result["pTrace"] = paramTrace + if sTrace: result["sTrace"] = stepTrace + if eTrace: result["eTrace"] = errorTrace return result @@ -851,7 +851,7 @@ def demoArcticFox(): stepSize=0.01, stepSizeFinal=0.005, forgetFactor=0.5, verbose=True) rosen.plot() -if __name__ == '__main__': +if __name__ == "__main__": demoALOPEX() #demoArcticFox() plt.show() diff --git a/cebl/ml/optim/minibatch.py b/cebl/ml/optim/minibatch.py index d63ac71..c43a00a 100644 --- a/cebl/ml/optim/minibatch.py +++ b/cebl/ml/optim/minibatch.py @@ -6,11 +6,11 @@ def minibatch(optable, x, g, batchSize=10, maxRound=10, maxTotalIter=np.inf, pTrace=False, eTrace=False, miniOptimFunc=scg, verbose=0, *args, **kwargs): - ''' + """ only works for supervised problems, must have x and g - args and kwargs are passed to miniOptimFunc, doesn't pass extra args to optable.error - ''' + args and kwargs are passed to miniOptimFunc, doesn"t pass extra args to optable.error + """ # make sure x and g are numpy arrays x = np.asarray(x) g = np.asarray(g) @@ -49,11 +49,11 @@ def minibatch(optable, x, g, batchSize=10, maxRound=10, done = False while not done: if verbose > 0: - print('=======') - print('iterations: %d' % totalIter) - print('error: %.5f' % optable.error(x=x, g=g)) - print('round: %d' % curRound) - print('=======') + print("=======") + print("iterations: %d" % totalIter) + print("error: %.5f" % optable.error(x=x, g=g)) + print("round: %d" % curRound) + print("=======") # start index into current minibatch start = 0 @@ -67,12 +67,12 @@ def minibatch(optable, x, g, batchSize=10, maxRound=10, curBatch = 0 while True: if verbose > 0: - print('minibatch: %d' % curBatch) + print("minibatch: %d" % curBatch) # end index into current minibatch end = start + batchSize - # don't process last minibatch + # don"t process last minibatch # if smaller than batchSize if end > nObs: break @@ -90,16 +90,16 @@ def minibatch(optable, x, g, batchSize=10, maxRound=10, # keep parameter history if requested if pTrace: - #paramTrace += miniResult['pTrace'] + #paramTrace += miniResult["pTrace"] paramTrace.append(optable.parameters().copy()) # keep error function history if requested if eTrace: - #errorTrace += miniResult['eTrace'] + #errorTrace += miniResult["eTrace"] errorTrace.append(optable.error(x=x, g=g)) # increment total iterations - totalIter += miniResult['iteration'] + totalIter += miniResult["iteration"] # increment batch counters curBatch += 1 @@ -107,7 +107,7 @@ def minibatch(optable, x, g, batchSize=10, maxRound=10, # terminate if maximum total iterations reached if totalIter >= maxTotalIter: - reason = 'maxiter' + reason = "maxiter" done = True break @@ -119,13 +119,13 @@ def minibatch(optable, x, g, batchSize=10, maxRound=10, # terminate if maximum total rounds is reached if curRound >= maxRound: - reason = 'maxround' + reason = "maxround" done = True if verbose > 0: - print('reason: %s' % reason) - print('round: %d' % curRound) - print('iterations: %d' % totalIter) + print("reason: %s" % reason) + print("round: %d" % curRound) + print("iterations: %d" % totalIter) # save result into a dictionary result = { diff --git a/cebl/ml/optim/optable.py b/cebl/ml/optim/optable.py index d170689..7c26dcb 100644 --- a/cebl/ml/optim/optable.py +++ b/cebl/ml/optim/optable.py @@ -11,25 +11,25 @@ def parameters(self): view will be modified in place. This method MUST be overridden by ALL implementations of Optable. """ - raise NotImplementedError('parameters not implemented.') + raise NotImplementedError("parameters not implemented.") def error(self): """Return a scalar error metric for the current state of the Optable implementation. This method MUST be overridden by ALL implementations of Optable. """ - raise NotImplementedError('error not implemented.') + raise NotImplementedError("error not implemented.") def gradient(self, returnError=True): """Return a 1d numpy array holding the gradient of the parameters to optimize. This method must be overridden in order to use optimization routines that require a first-order gradient. """ - raise NotImplementedError('gradient not implemented.') + raise NotImplementedError("gradient not implemented.") def gradient2(self, returnError=True): """Return a 1d numpy array holding the 2nd order gradient of the parameters to optimize. This method must be overridden in order to use optimization routines that require a second-order gradient. """ - raise NotImplementedError('gradient2 not implemented.') + raise NotImplementedError("gradient2 not implemented.") diff --git a/cebl/ml/optim/pso.py b/cebl/ml/optim/pso.py index 5673e1a..1de3eae 100644 --- a/cebl/ml/optim/pso.py +++ b/cebl/ml/optim/pso.py @@ -45,12 +45,12 @@ def pso(optable, *args, paramTrace = [np.vstack(pParams)] # termination reason - reason = '' + reason = "" iteration = 0 if verbose: - print('%d %6f' % (iteration, gError)) + print("%d %6f" % (iteration, gError)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -80,7 +80,7 @@ def pso(optable, *args, iteration += 1 if verbose: - print('%d %3f %6f' % (iteration, np.max(pVeloc), gError)) + print("%d %3f %6f" % (iteration, np.max(pVeloc), gError)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -95,22 +95,22 @@ def pso(optable, *args, # terminate if maximum iterations reached if iteration >= maxIter: - reason = 'maxiter' + reason = "maxiter" break # terminate if desired accuracy reached if gError < accuracy: - reason = 'accuracy' + reason = "accuracy" break # terminate if desired precision reached if np.abs(gError - gError) < precision: - reason = 'precision' + reason = "precision" break # terminate if the error function diverges if gError > divergeThresh: - reason = 'diverge' + reason = "diverge" break params[...] = gBest @@ -150,23 +150,23 @@ def demoPSO(): zz = values.reshape((xx.shape[0], yy.shape[1])) fig = plt.figure(figsize=(12, 6)) - axSurf = fig.add_subplot(1, 2, 1, projection='3d') + axSurf = fig.add_subplot(1, 2, 1, projection="3d") surf = axSurf.plot_surface(xx, yy, zz, linewidth=1.0, cmap=pltcm.jet) - surf.set_edgecolor('black') + surf.set_edgecolor("black") axCont = fig.add_subplot(1, 2, 2) - axCont.contour(x, y, zz, 40, color='black') - axCont.scatter(rosen.a, rosen.a**2, color='black', marker='o', s=400, linewidth=3) - axCont.scatter(*rosen.solution, color='red', marker='x', s=400, linewidth=3) + axCont.contour(x, y, zz, 40, color="black") + axCont.scatter(rosen.a, rosen.a**2, color="black", marker="o", s=400, linewidth=3) + axCont.scatter(*rosen.solution, color="red", marker="x", s=400, linewidth=3) - paramTrace = np.array(rosen.trainResult['pTrace']) + paramTrace = np.array(rosen.trainResult["pTrace"]) for i in range(paramTrace.shape[1]): axCont.plot(paramTrace[:,i:,0], paramTrace[:,i:,1], color=plt.cm.jet(i/float(paramTrace.shape[1])), linewidth=1) fig.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoPSO() plt.show() diff --git a/cebl/ml/optim/rprop.py b/cebl/ml/optim/rprop.py index 84135a3..6ab66df 100644 --- a/cebl/ml/optim/rprop.py +++ b/cebl/ml/optim/rprop.py @@ -112,7 +112,7 @@ def rprop(optable, *args, errorTrace = [] # termination reason - reason = '' + reason = "" iteration = 0 while True: @@ -122,7 +122,7 @@ def rprop(optable, *args, error, grad = optable.gradient(*args, returnError=True, **kwargs) if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -141,22 +141,22 @@ def rprop(optable, *args, # terminate if maximum iterations reached if iteration >= maxIter: - reason = 'maxiter' + reason = "maxiter" break # terminate if desired accuracy reached if error < accuracy: - reason = 'accuracy' + reason = "accuracy" break # terminate if desired precision reached if np.abs(error - errorPrev) < precision: - reason = 'precision' + reason = "precision" break # terminate if the error function diverges if error > divergeThresh: - reason = 'diverge' + reason = "diverge" break flips = grad * gradPrev @@ -318,7 +318,7 @@ def irprop(optable, *args, errorTrace = [] # termination reason - reason = '' + reason = "" iteration = 0 while True: @@ -328,7 +328,7 @@ def irprop(optable, *args, error, grad = optable.gradient(*args, returnError=True, **kwargs) if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -347,22 +347,22 @@ def irprop(optable, *args, # terminate if maximum iterations reached if iteration >= maxIter: - reason = 'maxiter' + reason = "maxiter" break # terminate if desired accuracy reached if error < accuracy: - reason = 'accuracy' + reason = "accuracy" break # terminate if desired precision reached if np.abs(error - errorPrev) < precision: - reason = 'precision' + reason = "precision" break # terminate if the error function diverges if error > divergeThresh: - reason = 'diverge' + reason = "diverge" break flips = grad * gradPrev @@ -371,7 +371,7 @@ def irprop(optable, *args, if error > errorPrev: if verbose: - print('No success.') + print("No success.") # backtrack flipped steps params[flipsNeg] += steps[flipsNeg] * np.sign(grad[flipsNeg]) @@ -422,6 +422,6 @@ def demoIRProp(): rosen.plot() -if __name__ == '__main__': +if __name__ == "__main__": demoRProp() plt.show() diff --git a/cebl/ml/optim/sciopt.py b/cebl/ml/optim/sciopt.py index ca0f5aa..198b656 100644 --- a/cebl/ml/optim/sciopt.py +++ b/cebl/ml/optim/sciopt.py @@ -7,7 +7,7 @@ def sciopt(optable, *args, - method='CG', options=None, + method="CG", options=None, maxIter=1000, precision=1.0e-10, pTrace=False, eTrace=False, callback=None, verbose=False, @@ -24,14 +24,14 @@ def errFunc(p): params.flat[...] = p return optable.error(*args, **kwargs) - if method in ('CG', 'BFGS', 'Newton-CG', 'dogleg', 'trust-ncg'): + if method in ("CG", "BFGS", "Newton-CG", "dogleg", "trust-ncg"): def gradFunc(p): params.flat[...] = p return optable.gradient(*args, returnError=False, **kwargs) else: gradFunc = None - if method == 'Newton-CG': + if method == "Newton-CG": def grad2Func(p): params.flat[...] = p return optable.gradient2(*args, returnError=False, **kwargs) @@ -45,7 +45,7 @@ def cb(p): error = optable.error(*args, **kwargs) if verbose: - print('%d %6f' % (cb.iteration, error)) + print("%d %6f" % (cb.iteration, error)) # keep parameter history if requested if pTrace: @@ -62,7 +62,7 @@ def cb(p): if options is None: options = {} - options['maxiter'] = maxIter + options["maxiter"] = maxIter optres = spopt.minimize(fun=errFunc, method=method, x0=params, tol=precision, @@ -71,9 +71,9 @@ def cb(p): if verbose: print(optres) - print('\n') + print("\n") - params.flat[...] = optres['x'] + params.flat[...] = optres["x"] result = { "error": optres["fun"], @@ -89,25 +89,25 @@ def cb(p): return result def demoScioptPowell(): - """Demonstration of Powell's method for gradient-free optimization. + """Demonstration of Powell"s method for gradient-free optimization. """ - rosen = tests.Rosen(optimFunc=sciopt, method='Powell', verbose=True, options={'maxfev': 1000}) + rosen = tests.Rosen(optimFunc=sciopt, method="Powell", verbose=True, options={"maxfev": 1000}) rosen.plot() def demoScioptBFGS(): """Demonstration of BFGS optimization. """ - rosen = tests.Rosen(optimFunc=sciopt, method='BFGS', verbose=True) + rosen = tests.Rosen(optimFunc=sciopt, method="BFGS", verbose=True) rosen.plot() def demoScioptCG(): """Demonstration of conjugate gradient optimization with line search. """ - rosen = tests.Rosen(optimFunc=sciopt, method='CG', verbose=True) + rosen = tests.Rosen(optimFunc=sciopt, method="CG", verbose=True) rosen.plot() -if __name__ == '__main__': +if __name__ == "__main__": #demoScioptPowell() #demoScioptBFGS() demoScioptCG() diff --git a/cebl/ml/optim/sgd.py b/cebl/ml/optim/sgd.py index 27273d4..7d07f28 100644 --- a/cebl/ml/optim/sgd.py +++ b/cebl/ml/optim/sgd.py @@ -42,7 +42,7 @@ def sgd(optable, x, g, batchSize=30, start = 0 np.random.shuffle(batchInd) if verbose: - print('batch: %d' % batch) + print("batch: %d" % batch) xShuff = x[batchInd] gShuff = g[batchInd] @@ -66,7 +66,7 @@ def sgd(optable, x, g, batchSize=30, velocity[...] = momentum * velocity + curLearningRate * grad if verbose: - print('%d %3f %6f' % (iteration, curLearningRate, error)) + print("%d %3f %6f" % (iteration, curLearningRate, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -81,22 +81,22 @@ def sgd(optable, x, g, batchSize=30, # terminate if desired accuracy reached if error < accuracy: - reason = 'accuracy' + reason = "accuracy" break # terminate if desired precision reached if np.abs(error - errorPrev) < precision: - reason = 'precision' + reason = "precision" break # terminate if the error function diverges if error > divergeThresh: - reason = 'diverge' + reason = "diverge" break # terminate if maximum iterations reached if iteration >= maxIter: - reason = 'maxiter' + reason = "maxiter" break # move in direction of negative gradient diff --git a/cebl/ml/optim/steepest.py b/cebl/ml/optim/steepest.py index 0407a3a..f01bafc 100644 --- a/cebl/ml/optim/steepest.py +++ b/cebl/ml/optim/steepest.py @@ -90,7 +90,7 @@ def steepest(optable, errorTrace = [] # termination reason - reason = '' + reason = "" iteration = 0 while True: @@ -102,7 +102,7 @@ def steepest(optable, #curLearningRate = learningRate + learningRateDecay * iteration if verbose: - print('%d %3f %6f' % (iteration, curLearningRate, error)) + print("%d %3f %6f" % (iteration, curLearningRate, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -117,22 +117,22 @@ def steepest(optable, # terminate if desired accuracy reached if error < accuracy: - reason = 'accuracy' + reason = "accuracy" break # terminate if desired precision reached if np.abs(error - errorPrev) < precision: - reason = 'precision' + reason = "precision" break # terminate if the error function diverges if error > divergeThresh: - reason = 'diverge' + reason = "diverge" break # terminate if maximum iterations reached if iteration >= maxIter: - reason = 'maxiter' + reason = "maxiter" break # move in direction of negative gradient @@ -239,7 +239,7 @@ def steepestl(optable, stepInitial=0.1, lineSearchPrecision=1.0e-4, errorTrace = [] # termination reason - reason = '' + reason = "" iteration = 0 while True: @@ -248,7 +248,7 @@ def steepestl(optable, stepInitial=0.1, lineSearchPrecision=1.0e-4, error, grad = optable.gradient(*args, returnError=True, **kwargs) if verbose: - print('%d %6f' % (iteration, error)) + print("%d %6f" % (iteration, error)) if callback is not None: callback(optable, iteration, paramTrace, errorTrace) @@ -263,22 +263,22 @@ def steepestl(optable, stepInitial=0.1, lineSearchPrecision=1.0e-4, # terminate if desired accuracy reached if error < accuracy: - reason = 'accuracy' + reason = "accuracy" break # terminate if desired precision reached if np.abs(error - errorPrev) < precision: - reason = 'precision' + reason = "precision" break # terminate if the error function diverges if error > divergeThresh: - reason = 'diverge' + reason = "diverge" break # terminate if maximum iterations reached if iteration >= maxIter: - reason = 'maxiter' + reason = "maxiter" break # initialize line search parameters @@ -300,7 +300,7 @@ def steepestl(optable, stepInitial=0.1, lineSearchPrecision=1.0e-4, break if verbose: - print('ls: %3f %6f' % (stepSize, lsError)) + print("ls: %3f %6f" % (stepSize, lsError)) # if error got worse if lsError > lsErrorPrev: @@ -332,8 +332,8 @@ def steepestl(optable, stepInitial=0.1, lineSearchPrecision=1.0e-4, } # pylint: disable=multiple-statements - if pTrace: result['pTrace'] = paramTrace - if eTrace: result['eTrace'] = errorTrace + if pTrace: result["pTrace"] = paramTrace + if eTrace: result["eTrace"] = errorTrace return result @@ -347,6 +347,6 @@ def demoSteepestl(): quad.plot() -if __name__ == '__main__': +if __name__ == "__main__": demoSteepestl() plt.show() diff --git a/cebl/ml/optim/tests.py b/cebl/ml/optim/tests.py index 2867f94..bd76008 100644 --- a/cebl/ml/optim/tests.py +++ b/cebl/ml/optim/tests.py @@ -55,14 +55,14 @@ def plot(self, n=200, rng=(-1.0,1.0, -1.0,1.0)): fig = plt.figure() axCont = fig.add_subplot(1,1,1) - axCont.contour(x, y, zz, 20, color='black') - #axCont.scatter(0.0, 0.0, color='black', marker='o', s=300, linewidth=3) + axCont.contour(x, y, zz, 20, color="black") + #axCont.scatter(0.0, 0.0, color="black", marker="o", s=300, linewidth=3) - paramTrace = np.array(self.trainResult['pTrace']) - axCont.plot(paramTrace[:,0], paramTrace[:,1], color='red', marker='o', linewidth=2) + paramTrace = np.array(self.trainResult["pTrace"]) + axCont.plot(paramTrace[:,0], paramTrace[:,1], color="red", marker="o", linewidth=2) - axCont.set_xlabel(r'$w_1$') - axCont.set_ylabel(r'$w_2$') + axCont.set_xlabel(r"$w_1$") + axCont.set_ylabel(r"$w_2$") fig.tight_layout() @@ -122,18 +122,18 @@ def plot(self, n=200, rng=(-3.0,3.0, -4.0,8.0)): zz = values.reshape((xx.shape[0], yy.shape[1])) fig = plt.figure(figsize=(12,6)) - axSurf = fig.add_subplot(1,2,1, projection='3d') + axSurf = fig.add_subplot(1,2,1, projection="3d") surf = axSurf.plot_surface(xx, yy, zz, linewidth=1.0, cmap=pltcm.jet) - surf.set_edgecolor('black') + surf.set_edgecolor("black") axCont = fig.add_subplot(1,2,2) - axCont.contour(x, y, zz, 40, color='black') - axCont.scatter(self.a, self.a**2, color='black', marker='o', s=400, linewidth=3) - axCont.scatter(*self.solution, color='red', marker='x', s=400, linewidth=3) + axCont.contour(x, y, zz, 40, color="black") + axCont.scatter(self.a, self.a**2, color="black", marker="o", s=400, linewidth=3) + axCont.scatter(*self.solution, color="red", marker="x", s=400, linewidth=3) - paramTrace = np.array(self.trainResult['pTrace']) - axCont.plot(paramTrace[:,0], paramTrace[:,1], color='red', linewidth=2) + paramTrace = np.array(self.trainResult["pTrace"]) + axCont.plot(paramTrace[:,0], paramTrace[:,1], color="red", linewidth=2) fig.tight_layout() @@ -182,20 +182,20 @@ def plot(self, n=500, rng=(-5.0,5.0, -5.0,5.0)): zz = values.reshape((xx.shape[0], yy.shape[1])) fig = plt.figure() - axSurf = fig.add_subplot(1,2,1, projection='3d') + axSurf = fig.add_subplot(1,2,1, projection="3d") surf = axSurf.plot_surface(xx, yy, zz, linewidth=1.0, cmap=pltcm.jet) - surf.set_edgecolor('black') + surf.set_edgecolor("black") axCont = fig.add_subplot(1,2,2) - axCont.contour(x, y, zz, 40, color='black') - axCont.scatter(0.0, 0.0, color='black', marker='o', s=400, linewidth=3) - axCont.scatter(*self.solution, color='red', marker='x', s=400, linewidth=3) + axCont.contour(x, y, zz, 40, color="black") + axCont.scatter(0.0, 0.0, color="black", marker="o", s=400, linewidth=3) + axCont.scatter(*self.solution, color="red", marker="x", s=400, linewidth=3) - paramTrace = np.array(self.trainResult['pTrace']) - axCont.plot(paramTrace[:,0], paramTrace[:,1], color='red', linewidth=2) + paramTrace = np.array(self.trainResult["pTrace"]) + axCont.plot(paramTrace[:,0], paramTrace[:,1], color="red", linewidth=2) -if __name__ == '__main__': - test = Ackley(method='Powell', verbose=True) +if __name__ == "__main__": + test = Ackley(method="Powell", verbose=True) test.plot() plt.show() diff --git a/cebl/ml/paraminit.py b/cebl/ml/paraminit.py index 359211c..ab16200 100644 --- a/cebl/ml/paraminit.py +++ b/cebl/ml/paraminit.py @@ -29,7 +29,7 @@ def nguyen(size, scale=(-1.0, 1.0), overlap=0.3): return weights # XXX - idfah """ - raise NotImplementedError('nguyen not yet implemented') + raise NotImplementedError("nguyen not yet implemented") def runif(size, low=-0.01, high=0.01): """Random uniform. diff --git a/cebl/ml/part.py b/cebl/ml/part.py index 348f77b..7e73633 100644 --- a/cebl/ml/part.py +++ b/cebl/ml/part.py @@ -92,7 +92,7 @@ def stratified(x, g, nFold): ng = len(g) // nFold if nx != ng: - raise RuntimeError('size of x and g do not match.') + raise RuntimeError("size of x and g do not match.") for fold in range(nFold): start = fold * nx diff --git a/cebl/ml/regression.py b/cebl/ml/regression.py index b302cd2..1ecd03b 100644 --- a/cebl/ml/regression.py +++ b/cebl/ml/regression.py @@ -10,10 +10,10 @@ def __init__(self, nIn, nOut): self.nOut = nOut def train(self, x, g): - raise NotImplementedError('train not implemented.') + raise NotImplementedError("train not implemented.") def eval(self, x): - raise NotImplementedError('eval not implemented.') + raise NotImplementedError("eval not implemented.") def evals(self, xs, *args, **kwargs): xs = np.asarray(xs) diff --git a/cebl/ml/som.py b/cebl/ml/som.py index 18895cc..1d18ee8 100644 --- a/cebl/ml/som.py +++ b/cebl/ml/som.py @@ -13,7 +13,7 @@ class SelfOrganizingMap: """Self-Organizing Map (SOM). """ def __init__(self, x, latticeSize=(64, 64), maxIter=5000, - distMetric='euclidean', learningRate=0.02, learningRateFinal=None, + distMetric="euclidean", learningRate=0.02, learningRateFinal=None, radius=None, radiusFinal=None, weightRange=(0.0, 1.0), callback=None, verbose=False): """Construct a new Self-Organizing Map (SOM). @@ -120,7 +120,7 @@ def train(self, x): neighborHood[...,None] * (curObs[None,None,:] - self.weights) if self.verbose: - print('%d %.3f %.3f' % (iteration, curLearningRate, curRadius)) + print("%d %.3f %.3f" % (iteration, curLearningRate, curRadius)) if self.callback is not None: self.callback(iteration, self.weights, curLearningRate, curRadius) @@ -159,7 +159,7 @@ def animFunc(iteration, weights, learningRate, radius): animFunc.fig = plt.figure(figsize=(10, 10)) animFunc.ax = animFunc.fig.add_subplot(1, 1, 1) animFunc.wimg = animFunc.ax.imshow(weights, - interpolation='none', origin='lower', animated=True) + interpolation="none", origin="lower", animated=True) animFunc.fig.tight_layout() animFunc.fig.show() @@ -177,7 +177,7 @@ def animFunc(iteration, weights, learningRate, radius): animFunc.ax.draw_artist(animFunc.wimg) animFunc.fig.canvas.blit(animFunc.ax.bbox) - plt.savefig('frame-%04d.png' % animFunc.frame, dpi=200) + plt.savefig("frame-%04d.png" % animFunc.frame, dpi=200) animFunc.frame += 1 som = SOM(data, latticeSize=(32, 32), maxIter=20000, @@ -193,12 +193,12 @@ def animFunc(iteration, weights, learningRate, radius): xlim = animFunc.ax.get_xlim() ylim = animFunc.ax.get_ylim() - #plt.plot(bindex, linestyle='', marker='p', color=('red', 'green', 'blue')) + #plt.plot(bindex, linestyle="", marker="p", color=("red", "green", "blue")) animFunc.ax.scatter(bindex[:,1], bindex[:,0], c=data, s=100) animFunc.ax.set_xlim(xlim) animFunc.ax.set_ylim(ylim) -if __name__ == '__main__': +if __name__ == "__main__": demoSOM() plt.show() diff --git a/cebl/ml/stand.py b/cebl/ml/stand.py index 076c1c5..2c002dc 100644 --- a/cebl/ml/stand.py +++ b/cebl/ml/stand.py @@ -4,7 +4,7 @@ class Standardizer: - def __init__(self, x, method='zmus'): + def __init__(self, x, method="zmus"): """ Args: @@ -14,12 +14,12 @@ def __init__(self, x, method='zmus'): range: Range of [-1,1] """ method = method.lower() - if method == 'zmus': + if method == "zmus": self.initZmus(x) - elif method == 'range': + elif method == "range": self.initRange(x) else: - raise RuntimeError('Unknown method: %s.' % method) + raise RuntimeError("Unknown method: %s." % method) def initZmus(self, x): x = np.asarray(x) @@ -30,7 +30,7 @@ def initZmus(self, x): # best way to handle this? XXX - idfah if np.any(np.isclose(self.scale, 0.0)): - print('Standardizer Warning: Some dimensions are constant, capping zeros.') + print("Standardizer Warning: Some dimensions are constant, capping zeros.") self.scale = util.capZero(self.scale) def initRange(self, x): diff --git a/cebl/ml/strans/csp.py b/cebl/ml/strans/csp.py index c45de67..5361b4e 100644 --- a/cebl/ml/strans/csp.py +++ b/cebl/ml/strans/csp.py @@ -93,11 +93,11 @@ def demoCSP(): axS1 = fig.add_subplot(4,1, 1) axS1.plot(s1 + util.colsep(s1)) - axS1.set_title('Class 1 Signal') + axS1.set_title("Class 1 Signal") axS2 = fig.add_subplot(4,1, 2) axS2.plot(s2 + util.colsep(s2)) - axS2.set_title('Class 2 Signal') + axS2.set_title("Class 2 Signal") cspFilt = CSP(s1, s2, lags=0) @@ -109,12 +109,12 @@ def demoCSP(): axCSP1 = fig.add_subplot(4,1, 3) axCSP1.plot(csp1 + util.colsep(csp1)) - axCSP1.set_title('Class 1 Signal CSP') + axCSP1.set_title("Class 1 Signal CSP") axCSP2 = fig.add_subplot(4,1, 4) axCSP2.plot(csp2 + util.colsep(csp2)) - axCSP2.set_title('Class 2 Signal CSP') + axCSP2.set_title("Class 2 Signal CSP") -if __name__ == '__main__': +if __name__ == "__main__": demoCSP() plt.show() diff --git a/cebl/ml/strans/ica.py b/cebl/ml/strans/ica.py index b2914c4..d02a2e7 100644 --- a/cebl/ml/strans/ica.py +++ b/cebl/ml/strans/ica.py @@ -51,7 +51,7 @@ class IndependentComponentsAnalysis(STrans): publisher={MIT Press} } """ - def __init__(self, s, lags=0, kurtosis='adapt', + def __init__(self, s, lags=0, kurtosis="adapt", learningRate=1.5, tolerance=1.0e-6, maxIter=10000, callback=None, verbose=False, *args, **kwargs): STrans.__init__(self, s, lags=lags, *args, **kwargs) @@ -74,11 +74,11 @@ def train(self, s, kurtosis, learningRate, tolerance, maxIter, callback, verbose while True: y = s.dot(self.w) - if kurtosis == 'sub': + if kurtosis == "sub": k = -1 - elif kurtosis == 'super': + elif kurtosis == "super": k = 1 - elif kurtosis == 'adapt': + elif kurtosis == "adapt": #k = np.sign(np.mean(1.0-util.tanh(y)**2, axis=0) * # np.mean(y**2, axis=0) - # np.mean(y*util.tanh(y), axis=0)) @@ -94,27 +94,27 @@ def train(self, s, kurtosis, learningRate, tolerance, maxIter, callback, verbose wtol = np.max(np.abs(wPrev-self.w)) if verbose: - print('%d %6f' % (iteration, wtol)) + print("%d %6f" % (iteration, wtol)) if callback is not None: callback(iteration, wtol) if wtol < tolerance: - self.reason = 'tolerance' + self.reason = "tolerance" break elif np.max(np.abs(self.w)) > 1.0e100: - self.reason = 'diverge' + self.reason = "diverge" break if iteration >= maxIter: - self.reason = 'maxiter' + self.reason = "maxiter" break iteration += 1 if verbose: - print('Reason: ' + self.reason) + print("Reason: " + self.reason) self.w /= np.sqrt(np.sum(self.w**2, axis=0)) self.wInv[...] = np.linalg.pinv(self.w) @@ -135,33 +135,33 @@ def demoICA(): sMixed = s.dot(m) - icaFilt = ICA(sMixed, kurtosis='sub', verbose=True) + icaFilt = ICA(sMixed, kurtosis="sub", verbose=True) fig = plt.figure() axOrig = fig.add_subplot(4, 1, 1) axOrig.plot(s+util.colsep(s)) - axOrig.set_title('Unmixed Signal') + axOrig.set_title("Unmixed Signal") axOrig.autoscale(tight=True) axMixed = fig.add_subplot(4, 1, 2) axMixed.plot(sMixed+util.colsep(sMixed)) - axMixed.set_title('Mixed Signal (random transform)') + axMixed.set_title("Mixed Signal (random transform)") axMixed.autoscale(tight=True) axUnmixed = fig.add_subplot(4, 1, 3) icaFilt.plotTransform(sMixed, ax=axUnmixed) - axUnmixed.set_title('ICA Components') + axUnmixed.set_title("ICA Components") axUnmixed.autoscale(tight=True) axCleaned = fig.add_subplot(4, 1, 4) icaFilt.plotFilter(sMixed, comp=(0, 1,), ax=axCleaned) - axCleaned.set_title('Cleaned Signal (First two components kept)') + axCleaned.set_title("Cleaned Signal (First two components kept)") axCleaned.autoscale(tight=True) fig.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoICA() plt.show() diff --git a/cebl/ml/strans/msf.py b/cebl/ml/strans/msf.py index 193f35e..c7ff667 100644 --- a/cebl/ml/strans/msf.py +++ b/cebl/ml/strans/msf.py @@ -90,27 +90,27 @@ def demoMSF(): axOrig = fig.add_subplot(4, 1, 1) axOrig.plot(s+util.colsep(s)) - axOrig.set_title('Unmixed Signal') + axOrig.set_title("Unmixed Signal") axOrig.autoscale(tight=True) axMixed = fig.add_subplot(4, 1, 2) axMixed.plot(sMixed+util.colsep(sMixed)) - axMixed.set_title('Mixed Signal (random transform)') + axMixed.set_title("Mixed Signal (random transform)") axMixed.autoscale(tight=True) axUnmixed = fig.add_subplot(4, 1, 3) msfFilt.plotTransform(sMixed, ax=axUnmixed) - axUnmixed.set_title('MSF Components') + axUnmixed.set_title("MSF Components") axUnmixed.autoscale(tight=True) axCleaned = fig.add_subplot(4, 1, 4) msfFilt.plotFilter(sMixed, comp=(2,), remove=True, ax=axCleaned) - axCleaned.set_title('Cleaned Signal (Last Component Removed)') + axCleaned.set_title("Cleaned Signal (Last Component Removed)") axCleaned.autoscale(tight=True) fig.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoMSF() plt.show() diff --git a/cebl/ml/strans/pca.py b/cebl/ml/strans/pca.py index 6bf1f00..1cee47e 100644 --- a/cebl/ml/strans/pca.py +++ b/cebl/ml/strans/pca.py @@ -56,15 +56,15 @@ def plotMags(self, standardize=True, ax=None, **kwargs): result = {} if ax is None: fig = plt.figure() - result['fig'] = fig + result["fig"] = fig ax = fig.add_subplot(1, 1, 1) - result['ax'] = ax + result["ax"] = ax mags = self.mags / np.sum(self.mags) if standardize else self.mags sep = np.arange(len(mags)) bars = plt.bar(sep, mags, **kwargs) - result['bars'] = bars + result["bars"] = bars return result @@ -111,22 +111,22 @@ def demoPCA(): axOrig = fig.add_subplot(4, 1, 1) axOrig.plot(s+util.colsep(s)) - axOrig.set_title('Unmixed Signal') + axOrig.set_title("Unmixed Signal") axOrig.autoscale(tight=True) axMixed = fig.add_subplot(4, 1, 2) axMixed.plot(sMixed+util.colsep(sMixed)) - axMixed.set_title('Mixed Signal (3d rotation)') + axMixed.set_title("Mixed Signal (3d rotation)") axMixed.autoscale(tight=True) axUnmixed = fig.add_subplot(4, 1, 3) pcaFilt.plotTransform(sMixed, ax=axUnmixed) - axUnmixed.set_title('PCA Components') + axUnmixed.set_title("PCA Components") axUnmixed.autoscale(tight=True) axCleaned = fig.add_subplot(4, 1, 4) pcaFilt.plotFilter(sMixed, comp=(1, 2,), ax=axCleaned) - axCleaned.set_title('Cleaned Signal (First Component Removed)') + axCleaned.set_title("Cleaned Signal (First Component Removed)") axCleaned.autoscale(tight=True) fig.tight_layout() @@ -151,12 +151,12 @@ def demoPCA2d(): ax.scatter(s[:,0], s[:,1]) ax.arrow(0.0, 0.0, pca.wInv[0,0]/pca.mags[0], pca.wInv[0,1]/pca.mags[0], - head_width=0.05, head_length=0.1, color='red') + head_width=0.05, head_length=0.1, color="red") ax.arrow(0.0, 0.0, pca.wInv[1,0]/pca.mags[1], pca.wInv[1,1]/pca.mags[1], - head_width=0.05, head_length=0.1, color='red') + head_width=0.05, head_length=0.1, color="red") ax.grid() -if __name__ == '__main__': +if __name__ == "__main__": demoPCA() #demoPCA2d() plt.show() diff --git a/cebl/ml/strans/strans.py b/cebl/ml/strans/strans.py index 72e478c..999160d 100644 --- a/cebl/ml/strans/strans.py +++ b/cebl/ml/strans/strans.py @@ -74,7 +74,7 @@ def plotTransform(self, s, comp=None, remove=False, ax=None, **kwargs): y = self.transform(s, comp=comp, remove=remove) lines = ax.plot(y+util.colsep(y), **kwargs) - return {'ax': ax, 'lines': lines} + return {"ax": ax, "lines": lines} def plotFilter(self, s, comp, remove=False, ax=None, **kwargs): if ax is None: @@ -84,4 +84,4 @@ def plotFilter(self, s, comp, remove=False, ax=None, **kwargs): filt = self.filter(s, comp=comp, remove=remove) lines = ax.plot(filt+util.colsep(filt), **kwargs) - return {'ax': ax, 'lines': lines} + return {"ax": ax, "lines": lines} diff --git a/cebl/sig/__init__.py b/cebl/sig/__init__.py index 163bd54..4c32b21 100644 --- a/cebl/sig/__init__.py +++ b/cebl/sig/__init__.py @@ -1,6 +1,5 @@ """Signal processing. """ - from .bandpass import * from .cwt import * from .psd import * diff --git a/cebl/sig/bandpass.py b/cebl/sig/bandpass.py index c625f78..fb3d2e6 100644 --- a/cebl/sig/bandpass.py +++ b/cebl/sig/bandpass.py @@ -29,7 +29,7 @@ def __init__(self, lowFreq, highFreq, sampRate=1.0, dtype=None): self.high: High corner as a fraction of the nyquist rate. self.bandType: String containing the "band type" of the filter. - One of: 'highpass', 'lowpass' or 'bandpass'. + One of: "highpass", "lowpass" or "bandpass". """ self.dtype = dtype @@ -41,42 +41,42 @@ def __init__(self, lowFreq, highFreq, sampRate=1.0, dtype=None): self.low = lowFreq / self.nyquist if self.low > 1.0: - raise RuntimeError('Invalid lowFreq: ' + str(lowFreq) + '. Above nyquist rate.') + raise RuntimeError("Invalid lowFreq: " + str(lowFreq) + ". Above nyquist rate.") if self.low < 0.0: - raise RuntimeError('Invalid lowFreq: ' + str(lowFreq) + '. Not positive.') + raise RuntimeError("Invalid lowFreq: " + str(lowFreq) + ". Not positive.") self.high = highFreq / self.nyquist if self.high != np.Inf: if self.high > 1.0: - raise RuntimeError('Invalid highFreq: ' + str(highFreq) + '. Above nyquist rate.') + raise RuntimeError("Invalid highFreq: " + str(highFreq) + ". Above nyquist rate.") if self.high < 0.0: - raise RuntimeError('Invalid highFreq: ' + str(highFreq) + '. Not positive.') + raise RuntimeError("Invalid highFreq: " + str(highFreq) + ". Not positive.") if np.isclose(self.low, 0.0) and self.high == np.inf: - self.bandType = 'allpass' + self.bandType = "allpass" elif np.isclose(self.low, self.high): - self.bandType = 'allstop' + self.bandType = "allstop" elif np.isclose(self.low, 0.0) and self.high != np.Inf: - self.bandType = 'lowpass' + self.bandType = "lowpass" elif self.low > 0.0 and self.high == np.Inf: - self.bandType = 'highpass' + self.bandType = "highpass" elif self.low > 0.0 and self.high != np.Inf and self.low < self.high: - self.bandType = 'bandpass' + self.bandType = "bandpass" elif self.low > 0.0 and self.high != np.Inf and self.high < self.low: - self.bandType = 'bandstop' + self.bandType = "bandstop" else: - raise RuntimeError('Invalid filter corners: ' + - str(lowFreq) + ', ' + str(highFreq) + '.') + raise RuntimeError("Invalid filter corners: " + + str(lowFreq) + ", " + str(highFreq) + ".") def frequencyResponse(self, freqs=None): - raise NotImplementedError('frequencyResponse not implemented.') + raise NotImplementedError("frequencyResponse not implemented.") def filter(self, s, axis=0): - raise NotImplementedError('filter not implemented.') + raise NotImplementedError("filter not implemented.") - def plotFreqResponse(self, freqs=None, scale='linear', + def plotFreqResponse(self, freqs=None, scale="linear", showCorners=True, - label='Frequency Response', + label="Frequency Response", ax=None, **kwargs): """Plot the frequency response of the filter. """ @@ -89,24 +89,24 @@ def plotFreqResponse(self, freqs=None, scale='linear', responseMags = np.abs(responses) scale = scale.lower() - if scale == 'linear': - ax.set_ylabel('Gain') - elif scale == 'log': - ax.set_ylabel('Gain') - ax.set_yscale('symlog') - elif scale == 'db': + if scale == "linear": + ax.set_ylabel("Gain") + elif scale == "log": + ax.set_ylabel("Gain") + ax.set_yscale("symlog") + elif scale == "db": responseMags = 10.0*np.log10(util.capZero(responseMags**2)) - ax.set_ylabel('Gain (dB)') + ax.set_ylabel("Gain (dB)") else: - raise RuntimeError('Invalid scale: ' + str(scale) + '.') + raise RuntimeError("Invalid scale: " + str(scale) + ".") lines = ax.plot(freqs, responseMags, label=label, **kwargs) - result = {'ax': ax, 'lines': lines} + result = {"ax": ax, "lines": lines} if showCorners: - if scale == 'db': + if scale == "db": halfPow = 10.0*np.log10(0.5) halfAmp = 10.0*np.log10(0.5**2) mn = np.min(responseMags) @@ -120,25 +120,25 @@ def plotFreqResponse(self, freqs=None, scale='linear', mx = np.max((mx, 1.0)) halfPowerLines = ax.hlines(halfPow, 0.0, 0.5*self.sampRate, - color='red', linestyle='-.', label='Half Power') - result['halfPowerLines'] = halfPowerLines + color="red", linestyle="-.", label="Half Power") + result["halfPowerLines"] = halfPowerLines halfAmpLines = ax.hlines(halfAmp, 0.0, 0.5*self.sampRate, - color='orange', linestyle=':', label='Half Amplitude') - result['halfAmpLines'] = halfAmpLines + color="orange", linestyle=":", label="Half Amplitude") + result["halfAmpLines"] = halfAmpLines cornerLines = ax.vlines((self.lowFreq, self.highFreq), - mn, mx, color='violet', linestyle='--', label='Corners') - result['cornerLines'] = cornerLines + mn, mx, color="violet", linestyle="--", label="Corners") + result["cornerLines"] = cornerLines - ax.set_xlabel('Frequency (Hz)') + ax.set_xlabel("Frequency (Hz)") ax.set_ylim((0.0, 1.0)) return result - def plotPhaseResponse(self, freqs=None, scale='radians', + def plotPhaseResponse(self, freqs=None, scale="radians", showCorners=True, - label='Frequency Response', + label="Frequency Response", ax=None, **kwargs): """Plot the frequency response of the filter. """ @@ -151,29 +151,29 @@ def plotPhaseResponse(self, freqs=None, scale='radians', responseAngles = np.unwrap(np.angle(responses)) scale = scale.lower() - if scale == 'radians': - ax.set_ylabel('Phase (Radians)') - elif scale == 'cycles': - ax.set_ylabel('Phase (Cycles)') + if scale == "radians": + ax.set_ylabel("Phase (Radians)") + elif scale == "cycles": + ax.set_ylabel("Phase (Cycles)") responseAngles /= 2.0*np.pi - elif scale == 'degrees': - ax.set_ylabel('Phase (Degrees)') + elif scale == "degrees": + ax.set_ylabel("Phase (Degrees)") responseAngles = responseAngles*180.0 / np.pi else: - raise RuntimeError('Invalid scale: ' + str(scale) + '.') + raise RuntimeError("Invalid scale: " + str(scale) + ".") lines = ax.plot(freqs, responseAngles, label=label, **kwargs) - result = {'ax': ax, 'lines': lines} + result = {"ax": ax, "lines": lines} if showCorners: cornerLines = ax.vlines((self.lowFreq, self.highFreq), np.min(responseAngles), np.max(responseAngles), - color='violet', linestyle='--', label='Corners') - result['cornerLines'] = cornerLines + color="violet", linestyle="--", label="Corners") + result["cornerLines"] = cornerLines - ax.set_xlabel('Frequency (Hz)') + ax.set_xlabel("Frequency (Hz)") return result @@ -182,7 +182,7 @@ class BandpassFilterIIR(BandpassFilterBase): """Infinite Impulse Response (IIR) bandpass filter. """ def __init__(self, lowFreq, highFreq, sampRate=1.0, - order=3, filtType='butter', zeroPhase=True, + order=3, filtType="butter", zeroPhase=True, dtype=None, **kwargs): """Construct a new IIR bandpass filter. """ @@ -192,17 +192,17 @@ def __init__(self, lowFreq, highFreq, sampRate=1.0, self.filtType = filtType.lower() self.zeroPhase = zeroPhase - if self.bandType not in ('allpass', 'allstop'): - if self.bandType == 'lowpass': + if self.bandType not in ("allpass", "allstop"): + if self.bandType == "lowpass": self.Wn = self.high - elif self.bandType == 'highpass': + elif self.bandType == "highpass": self.Wn = self.low - elif self.bandType == 'bandpass': + elif self.bandType == "bandpass": self.Wn = (self.low, self.high) - elif self.bandType == 'bandstop': + elif self.bandType == "bandstop": self.Wn = (self.high, self.low) else: - raise RuntimeError('Invalid bandType: ' + str(self.bandType)) + raise RuntimeError("Invalid bandType: " + str(self.bandType)) self.numCoef, self.denomCoef = spsig.iirfilter(order, self.Wn, ftype=filtType, btype=self.bandType, **kwargs) @@ -215,7 +215,7 @@ def __init__(self, lowFreq, highFreq, sampRate=1.0, def initZi(self): # lfilter_zi does not preserve dtype of arguments, # bug that should be reported XXX - idfah - # if above was fixed, use don't need astype below + # if above was fixed, use don"t need astype below self.zi = spsig.lfilter_zi(self.numCoef, self.denomCoef).astype(self.dtype, copy=False) def scaleZi(self, s, axis): @@ -233,9 +233,9 @@ def scaleZi(self, s, axis): return zi * s0 def frequencyResponse(self, freqs=None): - if self.bandType == 'allpass': + if self.bandType == "allpass": return spsig.freqz(1, worN=freqs) - if self.bandType == 'allstop': + if self.bandType == "allstop": return spsig.freqz(0, worN=freqs) numCoef = self.numCoef @@ -252,9 +252,9 @@ def frequencyResponse(self, freqs=None): def filter(self, s, axis=0): """Filter new data. """ - if self.bandType == 'allpass': + if self.bandType == "allpass": return s - if self.bandType == 'allstop': + if self.bandType == "allstop": return np.zeros_like(s) ## #Should be very close to filtfilt, padding? XXX - idfah @@ -273,7 +273,7 @@ def filter(self, s, axis=0): # need astype below since filtfilt calls lfilter_zi, which does not preserve dtype XXX - idfah return spsig.filtfilt(self.numCoef, self.denomCoef, - s, axis=axis, padtype='even').astype(self.dtype, copy=False) + s, axis=axis, padtype="even").astype(self.dtype, copy=False) else: ziScaled = self.scaleZi(s, axis) @@ -281,7 +281,7 @@ def filter(self, s, axis=0): # even padding to help reduce edge effects nPad = 3*max(len(self.numCoef), len(self.denomCoef)) # XXX could use edge for constant padding? - sPad = np.apply_along_axis(np.pad, axis, s, pad_width=nPad, mode='reflect') + sPad = np.apply_along_axis(np.pad, axis, s, pad_width=nPad, mode="reflect") slc = [slice(nPad, -nPad) if i == axis else slice(None) for i in range(s.ndim)] y, newZi = spsig.lfilter(self.numCoef, self.denomCoef, sPad, axis=axis, zi=ziScaled) @@ -290,16 +290,16 @@ def filter(self, s, axis=0): class BandpassFilterIIRStateful(BandpassFilterIIR): def __init__(self, *args, **kwargs): - if ('zeroPhase' in kwargs) and (kwargs['zeroPhase'] is True): - raise RuntimeError('Stateful IIR filter cannot have linear phase.') + if ("zeroPhase" in kwargs) and (kwargs["zeroPhase"] is True): + raise RuntimeError("Stateful IIR filter cannot have linear phase.") BandpassFilterIIR.__init__(self, *args, zeroPhase=False, **kwargs) self.ziSaved = False def filter(self, s, axis=0): - if self.bandType == 'allpass': + if self.bandType == "allpass": return s - if self.bandType == 'allstop': + if self.bandType == "allstop": return np.zeros_like(s) if not self.ziSaved: @@ -320,46 +320,46 @@ def demoBandpassFilterIIR(): zeroPhase = True butter = BandpassFilter(lowFreq, highFreq, sampRate, order, - filtType='butter', zeroPhase=zeroPhase) + filtType="butter", zeroPhase=zeroPhase) #cheby1 = BandpassFilter(0.0, highFreq, sampRate, order, - # filtType='cheby1', rp=1.0, zeroPhase=zeroPhase) + # filtType="cheby1", rp=1.0, zeroPhase=zeroPhase) cheby2 = BandpassFilter(lowFreq, highFreq, sampRate, order, - filtType='cheby2', rs=20.0, zeroPhase=zeroPhase) + filtType="cheby2", rs=20.0, zeroPhase=zeroPhase) ellip = BandpassFilter(lowFreq, highFreq, sampRate, order, - filtType='ellip', rp=1.0, rs=20.0, zeroPhase=zeroPhase) + filtType="ellip", rp=1.0, rs=20.0, zeroPhase=zeroPhase) bessel = BandpassFilter(lowFreq, highFreq, sampRate, order, - filtType='bessel', zeroPhase=zeroPhase) + filtType="bessel", zeroPhase=zeroPhase) fig = plt.figure(figsize=(18, 10)) - fig.canvas.set_window_title('IIR Bandpass Filter Demo') + fig.canvas.set_window_title("IIR Bandpass Filter Demo") axLn = fig.add_subplot(2, 2, 1) axDb = fig.add_subplot(2, 2, 2) - for ax, scale in zip((axLn, axDb), ('linear', 'db')): - butter.plotFreqResponse(showCorners=False, scale=scale, label='Butterworth', ax=ax, linewidth=2) - #cheby1.plotFreqResponse(showCorners=False, scale=scale, label='Cbebyshev-I', ax=ax, linewidth=2) - cheby2.plotFreqResponse(showCorners=False, scale=scale, label='Cbebyshev-II', ax=ax, linewidth=2) - ellip.plotFreqResponse(showCorners=False, scale=scale, label='Elliptical', ax=ax, linewidth=2) - bessel.plotFreqResponse(showCorners=True, scale=scale, label='Bessel', ax=ax, linewidth=2) + for ax, scale in zip((axLn, axDb), ("linear", "db")): + butter.plotFreqResponse(showCorners=False, scale=scale, label="Butterworth", ax=ax, linewidth=2) + #cheby1.plotFreqResponse(showCorners=False, scale=scale, label="Cbebyshev-I", ax=ax, linewidth=2) + cheby2.plotFreqResponse(showCorners=False, scale=scale, label="Cbebyshev-II", ax=ax, linewidth=2) + ellip.plotFreqResponse(showCorners=False, scale=scale, label="Elliptical", ax=ax, linewidth=2) + bessel.plotFreqResponse(showCorners=True, scale=scale, label="Bessel", ax=ax, linewidth=2) #ax.grid() axLn.autoscale(tight=True) - axLn.set_title('Amplitude Response') - axLn.legend(loc='upper right') + axLn.set_title("Amplitude Response") + axLn.legend(loc="upper right") axDb.set_xlim((0.0, nyquist)) axDb.set_ylim((-100.0, 0.0)) - axDb.set_title('Power Response') + axDb.set_title("Power Response") axPh = fig.add_subplot(2, 2, 3) - scale = 'radians' - butter.plotPhaseResponse(showCorners=False, scale=scale, label='Butterworth', ax=axPh, linewidth=2) - #cheby1.plotPhaseResponse(showCorners=False, scale=scale, label='Chebyshev-I', ax=axPh, linewidth=2) - cheby2.plotPhaseResponse(showCorners=False, scale=scale, label='Chebyshev-II', ax=axPh, linewidth=2) - ellip.plotPhaseResponse(showCorners=False, scale=scale, label='Elliptical', ax=axPh, linewidth=2) - bessel.plotPhaseResponse(showCorners=True, scale=scale, label='Bessel', ax=axPh, linewidth=2) + scale = "radians" + butter.plotPhaseResponse(showCorners=False, scale=scale, label="Butterworth", ax=axPh, linewidth=2) + #cheby1.plotPhaseResponse(showCorners=False, scale=scale, label="Chebyshev-I", ax=axPh, linewidth=2) + cheby2.plotPhaseResponse(showCorners=False, scale=scale, label="Chebyshev-II", ax=axPh, linewidth=2) + ellip.plotPhaseResponse(showCorners=False, scale=scale, label="Elliptical", ax=axPh, linewidth=2) + bessel.plotPhaseResponse(showCorners=True, scale=scale, label="Bessel", ax=axPh, linewidth=2) axPh.autoscale(tight=True) - axPh.set_title('Phase Response') + axPh.set_title("Phase Response") t = np.linspace(0.0, 2.0, 2.0*sampRate, endpoint=False) f = np.linspace(0.0, nyquist, 2.0*sampRate, endpoint=False) @@ -376,15 +376,15 @@ def demoBandpassFilterIIR(): axCh = fig.add_subplot(2, 2, 4) axCh.plot(f, chirpAll) - axCh.vlines(lowFreq, 1, -9, color='violet', linestyle='--') - axCh.vlines(highFreq, 1, -9, color='violet', linestyle='--') + axCh.vlines(lowFreq, 1, -9, color="violet", linestyle="--") + axCh.vlines(highFreq, 1, -9, color="violet", linestyle="--") axCh.set_yticks([]) - axCh.set_xlabel('Frequency (Hz)') - axCh.set_ylabel('Chirp') + axCh.set_xlabel("Frequency (Hz)") + axCh.set_ylabel("Chirp") axCh.autoscale(tight=True) axChTwiny = axCh.twiny() - axChTwiny.hlines(sep, 0.0, t[-1], linestyle='--', color='black') - axChTwiny.set_xlabel('Time (s)') + axChTwiny.hlines(sep, 0.0, t[-1], linestyle="--", color="black") + axChTwiny.set_xlabel("Time (s)") fig.tight_layout() @@ -393,54 +393,54 @@ class BandpassFilterFIR(BandpassFilterBase): """Finite Impulse Response (FIR) bandpass filter. """ def __init__(self, lowFreq, highFreq, sampRate=1.0, - order=20, filtType='sinc-blackman', dtype=None): + order=20, filtType="sinc-blackman", dtype=None): """Construct a new FIR bandpass filter. """ BandpassFilterBase.__init__(self, lowFreq, highFreq, sampRate, dtype) if order % 2 != 0: - raise RuntimeError('Invalid order: ' + str(order) + - ' Must be an even integer.') + raise RuntimeError("Invalid order: " + str(order) + + " Must be an even integer.") self.order = order self.radius = order//2 self.taps = np.linspace(-self.radius, self.radius, self.order+1) self.filtType = filtType.lower() - if self.filtType == 'lanczos': + if self.filtType == "lanczos": self.initImpulseResponse(windows.lanczos(self.order+1, radius=self.radius)) - elif self.filtType == 'sinc-blackman': + elif self.filtType == "sinc-blackman": self.initImpulseResponse(windows.blackman(self.order+1)) - elif self.filtType == 'sinc-hamming': + elif self.filtType == "sinc-hamming": self.initImpulseResponse(windows.hamming(self.order+1)) - elif self.filtType == 'sinc-hann': + elif self.filtType == "sinc-hann": self.initImpulseResponse(windows.hann(self.order+1)) else: - raise RuntimeError('Invalid filtType: ' + str(filtType)) + raise RuntimeError("Invalid filtType: " + str(filtType)) def initImpulseResponse(self, window): - if self.bandType == 'allpass': + if self.bandType == "allpass": self.impulseResponse = windows.kroneckerDelta(self.order+1) - elif self.bandType == 'allstop': + elif self.bandType == "allstop": self.impulseResponse = np.zeros_like(window) - elif self.bandType == 'lowpass': + elif self.bandType == "lowpass": hightaps = self.high*self.taps self.impulseResponse = self.high*np.sinc(hightaps) * window - elif self.bandType == 'highpass': + elif self.bandType == "highpass": lowtaps = self.low*self.taps self.impulseResponse = (-self.low*np.sinc(lowtaps) * window + windows.kroneckerDelta(self.order+1)) - elif self.bandType == 'bandpass': + elif self.bandType == "bandpass": lowtaps = self.low*self.taps hightaps = self.high*self.taps self.impulseResponse = (self.high*np.sinc(hightaps) - self.low*np.sinc(lowtaps)) * window - elif self.bandType == 'bandstop': + elif self.bandType == "bandstop": lowtaps = self.low*self.taps hightaps = self.high*self.taps self.impulseResponse = ((self.high*np.sinc(hightaps) - @@ -448,14 +448,14 @@ def initImpulseResponse(self, window): windows.kroneckerDelta(self.order+1)) else: - raise RuntimeError('Invalid bandType: ' + str(self.bandType)) + raise RuntimeError("Invalid bandType: " + str(self.bandType)) self.impulseResponse = self.impulseResponse.astype(self.dtype, copy=False) def frequencyResponse(self, freqs=None): return spsig.freqz(self.impulseResponse, worN=freqs) - def filter(self, s, axis=0, mode='same'): + def filter(self, s, axis=0, mode="same"): return np.apply_along_axis(lambda v: np.convolve(v, self.impulseResponse, mode=mode), axis=axis, arr=s) @@ -466,39 +466,39 @@ def demoBandpassFilterFIR(): lowFreq = 1.5 highFreq = 40.0 - sincBla = BandpassFilter(lowFreq, highFreq, sampRate, order, filtType='sinc-blackman') - sincHan = BandpassFilter(lowFreq, highFreq, sampRate, order, filtType='sinc-hann') - sincHam = BandpassFilter(lowFreq, highFreq, sampRate, order, filtType='sinc-hamming') - lanczos = BandpassFilter(lowFreq, highFreq, sampRate, order, filtType='lanczos') + sincBla = BandpassFilter(lowFreq, highFreq, sampRate, order, filtType="sinc-blackman") + sincHan = BandpassFilter(lowFreq, highFreq, sampRate, order, filtType="sinc-hann") + sincHam = BandpassFilter(lowFreq, highFreq, sampRate, order, filtType="sinc-hamming") + lanczos = BandpassFilter(lowFreq, highFreq, sampRate, order, filtType="lanczos") fig = plt.figure(figsize=(18, 10)) - fig.canvas.set_window_title('FIR Bandpass Filter Demo') + fig.canvas.set_window_title("FIR Bandpass Filter Demo") axLn = fig.add_subplot(2, 2, 1) axDb = fig.add_subplot(2, 2, 2) - for ax, scale in zip((axLn, axDb), ('linear', 'db')): - sincBla.plotFreqResponse(showCorners=True, scale=scale, label='Sinc-Blackman', ax=ax, linewidth=2) - sincHan.plotFreqResponse(showCorners=False, scale=scale, label='Sinc-Hann', ax=ax, linewidth=2) - sincHam.plotFreqResponse(showCorners=False, scale=scale, label='Sinc-Hamming', ax=ax, linewidth=2) - lanczos.plotFreqResponse(showCorners=False, scale=scale, label='Lanczos', ax=ax, linewidth=2) + for ax, scale in zip((axLn, axDb), ("linear", "db")): + sincBla.plotFreqResponse(showCorners=True, scale=scale, label="Sinc-Blackman", ax=ax, linewidth=2) + sincHan.plotFreqResponse(showCorners=False, scale=scale, label="Sinc-Hann", ax=ax, linewidth=2) + sincHam.plotFreqResponse(showCorners=False, scale=scale, label="Sinc-Hamming", ax=ax, linewidth=2) + lanczos.plotFreqResponse(showCorners=False, scale=scale, label="Lanczos", ax=ax, linewidth=2) #ax.grid() axLn.autoscale(tight=True) - axLn.set_title('Amplitude Response') - axLn.legend(loc='upper right') + axLn.set_title("Amplitude Response") + axLn.legend(loc="upper right") axDb.set_xlim((0.0, nyquist)) axDb.set_ylim((-100.0, 0.0)) - axDb.set_title('Power Response') + axDb.set_title("Power Response") axPh = fig.add_subplot(2, 2, 3) - scale = 'radians' - sincBla.plotPhaseResponse(showCorners=False, scale=scale, label='Sinc-Blackman', ax=axPh, linewidth=2) - sincHan.plotPhaseResponse(showCorners=False, scale=scale, label='Sinc-Hann', ax=axPh, linewidth=2) - sincHam.plotPhaseResponse(showCorners=False, scale=scale, label='Sinc-Hamming', ax=axPh, linewidth=2) - lanczos.plotPhaseResponse(showCorners=True, scale=scale, label='Lanczos', ax=axPh, linewidth=2) + scale = "radians" + sincBla.plotPhaseResponse(showCorners=False, scale=scale, label="Sinc-Blackman", ax=axPh, linewidth=2) + sincHan.plotPhaseResponse(showCorners=False, scale=scale, label="Sinc-Hann", ax=axPh, linewidth=2) + sincHam.plotPhaseResponse(showCorners=False, scale=scale, label="Sinc-Hamming", ax=axPh, linewidth=2) + lanczos.plotPhaseResponse(showCorners=True, scale=scale, label="Lanczos", ax=axPh, linewidth=2) axPh.autoscale(tight=True) - axPh.set_title('Phase Response') + axPh.set_title("Phase Response") t = np.linspace(0.0, 2.0, 2.0*sampRate, endpoint=False) f = np.linspace(0.0, nyquist, 2.0*sampRate, endpoint=False) @@ -514,32 +514,32 @@ def demoBandpassFilterFIR(): axCh = fig.add_subplot(2, 2, 4) axCh.plot(f, chirpAll) - axCh.vlines(lowFreq, 1, -9, color='violet', linestyle='--') - axCh.vlines(highFreq, 1, -9, color='violet', linestyle='--') + axCh.vlines(lowFreq, 1, -9, color="violet", linestyle="--") + axCh.vlines(highFreq, 1, -9, color="violet", linestyle="--") axCh.set_yticks([]) - axCh.set_xlabel('Frequency (Hz)') - axCh.set_ylabel('Chirp') + axCh.set_xlabel("Frequency (Hz)") + axCh.set_ylabel("Chirp") axCh.autoscale(tight=True) axChTwiny = axCh.twiny() - axChTwiny.hlines(sep, 0.0, t[-1], linestyle='--', color='black') - axChTwiny.set_xlabel('Time (s)') + axChTwiny.hlines(sep, 0.0, t[-1], linestyle="--", color="black") + axChTwiny.set_xlabel("Time (s)") fig.tight_layout() # wrapper around class constructors # pylint: disable=invalid-name -def BandpassFilter(lowFreq, highFreq, sampRate=1.0, order=None, filtType='butter', **kwargs): +def BandpassFilter(lowFreq, highFreq, sampRate=1.0, order=None, filtType="butter", **kwargs): filtType = filtType.lower() - if filtType in ('butter', 'cheby1', 'cheby2', 'ellip', 'bessel'): + if filtType in ("butter", "cheby1", "cheby2", "ellip", "bessel"): if order is None: order = 3 return BandpassFilterIIR(lowFreq=lowFreq, highFreq=highFreq, sampRate=sampRate, order=order, filtType=filtType, **kwargs) - elif filtType in ('lanczos', 'sinc-blackman', 'sinc-hamming', 'sinc-hann'): + elif filtType in ("lanczos", "sinc-blackman", "sinc-hamming", "sinc-hann"): if order is None: order = 20 @@ -547,13 +547,13 @@ def BandpassFilter(lowFreq, highFreq, sampRate=1.0, order=None, filtType='butter sampRate=sampRate, order=order, filtType=filtType, **kwargs) else: - raise RuntimeError('Invalid filter type: ' + str(filtType) + '.') + raise RuntimeError("Invalid filter type: " + str(filtType) + ".") def BP(*args, **kwargs): return BandpassFilter(*args, **kwargs) -if __name__ == '__main__': +if __name__ == "__main__": demoBandpassFilterIIR() demoBandpassFilterFIR() plt.show() diff --git a/cebl/sig/psd.py b/cebl/sig/psd.py index 9ae1734..264934b 100644 --- a/cebl/sig/psd.py +++ b/cebl/sig/psd.py @@ -33,7 +33,7 @@ def getPowers(self): def getFreqsPowers(self): return self.freqs, self.powers - def plotPower(self, scale='log', ax=None, **kwargs): + def plotPower(self, scale="log", ax=None, **kwargs): """Plot a PSD estimate on a log scale. Returns @@ -47,40 +47,40 @@ def plotPower(self, scale='log', ax=None, **kwargs): scale = scale.lower() if ax is None: fig = plt.figure() - result['fig'] = fig + result["fig"] = fig ax = fig.add_subplot(1, 1, 1) - result['ax'] = ax + result["ax"] = ax ax.grid() - ax.set_title('Power Spectral Density') - ax.set_xlabel(r'Freqency ($Hz$)') + ax.set_title("Power Spectral Density") + ax.set_xlabel(r"Freqency ($Hz$)") ax.set_xlim((np.min(self.freqs), np.max(self.freqs))) - if scale in ('linear', 'log'): - ax.set_ylabel(r'Power Density ($\mu V^2 / Hz$)') - elif scale in ('db', 'decibels'): - ax.set_ylabel(r'Power Density (dB)') - if scale == 'log': - ax.set_yscale('log') - - if scale in ('linear', 'log'): + if scale in ("linear", "log"): + ax.set_ylabel(r"Power Density ($\mu V^2 / Hz$)") + elif scale in ("db", "decibels"): + ax.set_ylabel(r"Power Density (dB)") + if scale == "log": + ax.set_yscale("log") + + if scale in ("linear", "log"): scaledPowers = self.powers - elif scale in ('db', 'decibels'): + elif scale in ("db", "decibels"): scaledPowers = 10.0*np.log10(self.powers/np.max(self.powers)) else: - raise RuntimeError('Invalid scale %s.' % str(scale)) + raise RuntimeError("Invalid scale %s." % str(scale)) lines = ax.plot(self.freqs, scaledPowers, **kwargs) - result['lines'] = lines + result["lines"] = lines return result class WelchPSD(PSDBase): - """PSD smoothed using Welch's method. + """PSD smoothed using Welch"s method. """ def __init__(self, s, sampRate=1.0, span=3.0, overlap=0.5, windowFunc=windows.hann, pad=False): - """Construct a new PSD using Welch's method. + """Construct a new PSD using Welch"s method. Args: s: Numpy array with shape (observations[,dimensions]) @@ -126,10 +126,10 @@ def __init__(self, s, sampRate=1.0, span=3.0, overlap=0.5, windowFunc=windows.ha # check span parameter if wObs > nObs: - raise RuntimeError('Span of %.2f exceedes length of input %.2f.' % + raise RuntimeError("Span of %.2f exceedes length of input %.2f." % (span, nObs/float(sampRate))) if wObs < 7: - raise RuntimeError('Span of %.2f is too small.' % span) + raise RuntimeError("Span of %.2f is too small." % span) if pad: # find next largest power of two @@ -161,7 +161,7 @@ def __init__(self, s, sampRate=1.0, span=3.0, overlap=0.5, windowFunc=windows.ha dft = dft[:,:int(np.ceil(nPad/2.0)),:] # scale to power/Hz - # numpy fft doesn't support complex64 so can't preserve float32 dtype XXX - idfah + # numpy fft doesn"t support complex64 so can"t preserve float32 dtype XXX - idfah dftmag = np.abs(dft).astype(s.dtype, copy=False) powers = 2.0*(dftmag**2)/scaleDenom @@ -362,16 +362,16 @@ def __init__(self, s, sampRate=1.0, order=20, freqs=None, **kwargs): # wrapper around class constructors # pylint: disable=invalid-name -def PowerSpectralDensity(s, method='welch', **kwargs): +def PowerSpectralDensity(s, method="welch", **kwargs): method = method.lower() - if method == 'welch': + if method == "welch": return WelchPSD(s, **kwargs) - elif method in ('raw', 'fft'): + elif method in ("raw", "fft"): return RawPSD(s, **kwargs) - elif method in ('ar', 'autoreg'): + elif method in ("ar", "autoreg"): return AutoRegPSD(s, **kwargs) else: - raise RuntimeError('Unknown PSD estimation method: ' + str(method)) + raise RuntimeError("Unknown PSD estimation method: " + str(method)) def PSD(*args, **kwargs): return PowerSpectralDensity(*args, **kwargs) @@ -389,26 +389,26 @@ def demoPSD(): noise2 = np.random.normal(loc=0.0, scale=0.2, size=s.shape) y = np.vstack((np.sin(2*f1*np.pi*s)+noise1, 10.0*np.sin(2*f2*np.pi*s)+noise2)).T - print('True max power: ', np.mean(y**2, axis=0)) + print("True max power: ", np.mean(y**2, axis=0)) - scale = 'log' + scale = "log" raw = RawPSD(y, sampRate) - ax = raw.plotPower(scale=scale, label='raw')['ax'] - print('Raw max power: ', np.max(raw.getPowers()*sampRate, axis=0)) + ax = raw.plotPower(scale=scale, label="raw")["ax"] + print("Raw max power: ", np.max(raw.getPowers()*sampRate, axis=0)) welch = WelchPSD(y, sampRate, span=4) - welch.plotPower(scale=scale, ax=ax, label='welch') - print('Welch max power: ', np.max(welch.getPowers()*sampRate, axis=0)) + welch.plotPower(scale=scale, ax=ax, label="welch") + print("Welch max power: ", np.max(welch.getPowers()*sampRate, axis=0)) autoreg = AutoRegPSD(y, sampRate, order=10) - autoreg.plotPower(scale=scale, ax=ax, label='autoreg') - print('AR max power: ', np.max(autoreg.getPowers()*sampRate, axis=0)) + autoreg.plotPower(scale=scale, ax=ax, label="autoreg") + print("AR max power: ", np.max(autoreg.getPowers()*sampRate, axis=0)) ax.legend() ax.autoscale(tight=True) -if __name__ == '__main__': +if __name__ == "__main__": demoPSD() plt.show() diff --git a/cebl/sig/resamp.py b/cebl/sig/resamp.py index 348fdb1..f89b50b 100644 --- a/cebl/sig/resamp.py +++ b/cebl/sig/resamp.py @@ -47,7 +47,7 @@ def upsample(s, factor): s = util.colmat(s) - sup = s.repeat(factor).reshape((-1, s.shape[1]), order='F') + sup = s.repeat(factor).reshape((-1, s.shape[1]), order="F") if flattenOut: sup = sup.ravel() @@ -65,7 +65,7 @@ def decimate(s, factor, lowpassFrac=0.625, **kwargs): return downsample(sFiltered, factor) -def interpolate(s, factor, order=8, filtType='lanczos'): +def interpolate(s, factor, order=8, filtType="lanczos"): """Interpolate (upsample) a discrete signal by a given factor using a Finite Impulse Response (FIR) filter. @@ -107,8 +107,8 @@ def interpolate(s, factor, order=8, filtType='lanczos'): New observations (length 4*2=8): |**|**|**|** """ if order % 2 != 0: - raise RuntimeError('Invalid order: ' + str(order) + - ' Must be an even integer.') + raise RuntimeError("Invalid order: " + str(order) + + " Must be an even integer.") # ensure we have a numpy array s = np.asarray(s) @@ -136,19 +136,19 @@ def interpolate(s, factor, order=8, filtType='lanczos'): taps = np.linspace(-radius, radius, newOrder+1).astype(s.dtype, copy=False) # generate FIR filter - if filtType == 'lanczos': + if filtType == "lanczos": impulseResponse = np.sinc(taps) * windows.lanczos(newOrder+1).astype(s.dtype, copy=False) - elif filtType == 'sinc-blackman': + elif filtType == "sinc-blackman": impulseResponse = np.sinc(taps) * windows.blackman(newOrder+1).astype(s.dtype, copy=False) else: - raise RuntimeError('Invalid filtType: ' + str(filtType)) + raise RuntimeError("Invalid filtType: " + str(filtType)) # convolve with FIR filter to smooth across zero padding # NOTE: there is potential for performance improvement here since # zeros could be excluded from the computation XXX - idfah # spsig.fftconvolve might also be faster for long signals XXX - idfah return np.apply_along_axis(lambda v: - np.convolve(v, impulseResponse, mode='same'), + np.convolve(v, impulseResponse, mode="same"), axis=0, arr=sl) def resample(s, factorDown, factorUp=1, interpKwargs=None, **decimKwargs): @@ -207,11 +207,11 @@ def demoInterpolate(): sLanczos8 = interpolate(s, factor, order=8) sLanczos16 = interpolate(s, factor, order=16) - plt.plot(t, s, marker='o', color='lightgrey', linewidth=3, label='Original') - plt.plot(tInterp, sLanczos4, color='blue', label='Lanczos4') - plt.plot(tInterp, sLanczos8, color='red', label='Lanczos8') - plt.plot(tInterp, sLanczos16, color='green', label='Lanczos16') - plt.title('Interpolation of a Random Walk') + plt.plot(t, s, marker="o", color="lightgrey", linewidth=3, label="Original") + plt.plot(tInterp, sLanczos4, color="blue", label="Lanczos4") + plt.plot(tInterp, sLanczos8, color="red", label="Lanczos8") + plt.plot(tInterp, sLanczos16, color="green", label="Lanczos16") + plt.title("Interpolation of a Random Walk") plt.legend() plt.tight_layout() @@ -229,69 +229,69 @@ def demoResample(): fig = plt.figure() ##axChirp = fig.add_subplot(4, 1, 1) - ##axChirp.plot(f, chirp, color='black') - ##axChirp.set_title('Chirp') - ##axChirp.set_xlabel('Frequency (Hz)') - ##axChirp.set_ylabel('Signal') + ##axChirp.plot(f, chirp, color="black") + ##axChirp.set_title("Chirp") + ##axChirp.set_xlabel("Frequency (Hz)") + ##axChirp.set_ylabel("Signal") ##axChirp.autoscale(tight=True) #axChirpTwiny = axChirp.twiny() #axChirpTwiny.plot(t, chirp, alpha=0.0) - #axChirpTwiny.set_xlabel('Time (s)') + #axChirpTwiny.set_xlabel("Time (s)") fDown = downsample(f, factor) chirpDown = downsample(chirp, factor) axDown = fig.add_subplot(2, 2, 1) - axDown.plot(f, chirp, color='lightgrey', linewidth=2) - axDown.plot(fDown, chirpDown, color='red') + axDown.plot(f, chirp, color="lightgrey", linewidth=2) + axDown.plot(fDown, chirpDown, color="red") axDown.vlines(nyquist/factor, -1.0, 1.0, linewidth=2, - linestyle='--', color='green', label='New Nyquist') - axDown.set_title('Downsample factor %d' % factor) - axDown.set_xlabel('Frequency (Hz)') - axDown.set_ylabel('Signal') + linestyle="--", color="green", label="New Nyquist") + axDown.set_title("Downsample factor %d" % factor) + axDown.set_xlabel("Frequency (Hz)") + axDown.set_ylabel("Signal") axDown.autoscale(tight=True) chirpDeci = decimate(chirp, factor) axDeci = fig.add_subplot(2, 2, 3) - axDeci.plot(f, chirp, color='lightgrey', linewidth=2) - axDeci.plot(fDown, chirpDeci, color='red') + axDeci.plot(f, chirp, color="lightgrey", linewidth=2) + axDeci.plot(fDown, chirpDeci, color="red") axDeci.vlines(nyquist/factor, -1.0, 1.0, linewidth=2, - linestyle='--', color='green', label='New Nyquist') - axDeci.set_title('Decimation factor %d' % factor) - axDeci.set_xlabel('Frequency (Hz)') - axDeci.set_ylabel('Signal') + linestyle="--", color="green", label="New Nyquist") + axDeci.set_title("Decimation factor %d" % factor) + axDeci.set_xlabel("Frequency (Hz)") + axDeci.set_ylabel("Signal") axDeci.autoscale(tight=True) fInterp = np.linspace(0.0, nyquist, 2.0*sampRate*factor, endpoint=False) chirpInterp = interpolate(chirp, factor) axInterp = fig.add_subplot(2, 2, 2) - axInterp.plot(f, chirp, color='lightgrey', linewidth=2) - axInterp.plot(fInterp, chirpInterp, color='red') + axInterp.plot(f, chirp, color="lightgrey", linewidth=2) + axInterp.plot(fInterp, chirpInterp, color="red") #axInterp.vlines(nyquist*factor, -1.0, 1.0, linewidth=2, - # linestyle='--', color='green', label='New Nyquist') - axInterp.set_title('Interpolation factor %d' % factor) - axInterp.set_xlabel('Frequency (Hz)') - axInterp.set_ylabel('Signal') + # linestyle="--", color="green", label="New Nyquist") + axInterp.set_title("Interpolation factor %d" % factor) + axInterp.set_xlabel("Frequency (Hz)") + axInterp.set_ylabel("Signal") axInterp.autoscale(tight=True) fResamp = np.linspace(0.0, nyquist, 2.0*sampRate*(2.0/factor), endpoint=False) chirpResamp = resample(chirp, factorUp=2, factorDown=factor) axResamp = fig.add_subplot(2, 2, 4) - axResamp.plot(f, chirp, color='lightgrey', linewidth=2) - axResamp.plot(fResamp, chirpResamp, color='red') + axResamp.plot(f, chirp, color="lightgrey", linewidth=2) + axResamp.plot(fResamp, chirpResamp, color="red") axResamp.vlines((2.0/factor)*nyquist, -1.0, 1.0, linewidth=2, - linestyle='--', color='green', label='New Nyquist') - axResamp.set_title('Resample factor 2/%d' % factor) - axResamp.set_xlabel('Frequency (Hz)') - axResamp.set_ylabel('Signal') + linestyle="--", color="green", label="New Nyquist") + axResamp.set_title("Resample factor 2/%d" % factor) + axResamp.set_xlabel("Frequency (Hz)") + axResamp.set_ylabel("Signal") axResamp.autoscale(tight=True) fig.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoInterpolate() demoResample() plt.show() diff --git a/cebl/sig/smooth.py b/cebl/sig/smooth.py index aa6e36e..0dbf7d0 100644 --- a/cebl/sig/smooth.py +++ b/cebl/sig/smooth.py @@ -37,7 +37,7 @@ def movingAverage(s, width=2, kernelFunc=windows.boxcar, **kwargs): return np.apply_along_axis( np.convolve, axis=0, arr=s, - v=kernel, mode='same') + v=kernel, mode="same") def savitzkyGolay(s, *args, **kwargs): """Savitzky Golay filter. @@ -113,33 +113,33 @@ def demoSmooth(): fig = plt.figure() axMA = fig.add_subplot(2, 2, 1) - axMA.plot(x, y+sep, color='grey', linewidth=3) + axMA.plot(x, y+sep, color="grey", linewidth=3) axMA.plot(x, yMA+sep) - axMA.set_title('Moving Average %d' % maWidth) + axMA.set_title("Moving Average %d" % maWidth) axMA.autoscale(tight=True) axGA = fig.add_subplot(2, 2, 2) - axGA.plot(x, y+sep, color='grey', linewidth=3) + axGA.plot(x, y+sep, color="grey", linewidth=3) axGA.plot(x, yGA+sep) - axGA.set_title('Gaussian Moving Average %d' % gaWidth) + axGA.set_title("Gaussian Moving Average %d" % gaWidth) axGA.autoscale(tight=True) axSG = fig.add_subplot(2, 2, 3) - axSG.plot(x, y+sep, color='grey', linewidth=3) + axSG.plot(x, y+sep, color="grey", linewidth=3) axSG.plot(x, ySG+sep) - axSG.set_title('Savitzky-Golay %d %d' % (sgWidth, sgOrder)) + axSG.set_title("Savitzky-Golay %d %d" % (sgWidth, sgOrder)) axSG.autoscale(tight=True) axWN = fig.add_subplot(2, 2, 4) - axWN.plot(x, y+sep, color='grey', linewidth=3) + axWN.plot(x, y+sep, color="grey", linewidth=3) axWN.plot(x, yWN+sep) - axWN.set_title('Wiener %d %3.2f' % (wnSize, wnNoise)) - #axWN.set_title('Wiener') + axWN.set_title("Wiener %d %3.2f" % (wnSize, wnNoise)) + #axWN.set_title("Wiener") axWN.autoscale(tight=True) fig.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoSmooth() plt.show() diff --git a/cebl/sig/spatial.py b/cebl/sig/spatial.py index ff659dc..b7653d8 100644 --- a/cebl/sig/spatial.py +++ b/cebl/sig/spatial.py @@ -88,7 +88,7 @@ def demoSharpen(): ss = np.sin(0.2*t) #s = (np.vstack((s3, s1+ss, s1+ss, s1+ss, s2+ss, s2+ss, s2, s2, s2, s3, s3, s3))).T s = (np.vstack((s1+ss, s1+ss, s1+ss, s2+ss, s2+ss, s2, s2, s2, s3, s3, s3))).T - colors = ['blue',]*3 + ['orange',]*2 + ['green',]*3 + ['red',]*3 + colors = ["blue",]*3 + ["orange",]*2 + ["green",]*3 + ["red",]*3 sepDist = 3.0 sep = -np.arange(s.shape[1])*sepDist @@ -103,14 +103,14 @@ def demoSharpen(): axSig.plot(t, s+sep) axSig.set_xlim(xlim) axSig.set_ylim(ylim) - axSig.set_title('Orignial') + axSig.set_title("Orignial") axCar = fig.add_subplot(1, 3, 2) axCar.set_color_cycle(colors) axCar.plot(t, commonAverageReference(s)+sep) axCar.set_xlim(xlim) axCar.set_ylim(ylim) - axCar.set_title('Common Average Reference') + axCar.set_title("Common Average Reference") axGaus = fig.add_subplot(1, 3, 3) axGaus.set_color_cycle(colors) @@ -118,17 +118,17 @@ def demoSharpen(): axGaus.plot(t, sharpen(s, radius=1.0)+sep) axGaus.set_xlim(xlim) axGaus.set_ylim(ylim) - axGaus.set_title('Gaussian Sharpen') + axGaus.set_title("Gaussian Sharpen") #axLanc = fig.add_subplot(1, 3, 3) #axLanc.plot(t, sharpen(s, kernelFunc=util.lanczos, freq=0.99, radius=3.0, normalize=False)+sep) #axLanc.set_xlim(xlim) #axLanc.set_ylim(ylim) - #axLanc.set_title('Lanczos Sharpen') + #axLanc.set_title("Lanczos Sharpen") fig.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoSharpen() plt.show() diff --git a/cebl/sig/specgram.py b/cebl/sig/specgram.py index 647007b..fa8c1da 100644 --- a/cebl/sig/specgram.py +++ b/cebl/sig/specgram.py @@ -49,17 +49,17 @@ def getTimes(self): def getPowers(self): return self.powers - def getPhases(self, scale='radians'): + def getPhases(self, scale="radians"): scale = scale.lower() - if scale == 'radians': + if scale == "radians": return self.phases - elif scale == 'cycles': + elif scale == "cycles": return self.phases / (2.0*np.pi) - elif scale == 'degrees': + elif scale == "degrees": return self.phases * 180.0 / np.pi else: - raise RuntimeError('Invalid phase scale: %s.' % str(scale)) + raise RuntimeError("Invalid phase scale: %s." % str(scale)) def getFreqsPowers(self): return self.freqs, self.powers @@ -67,28 +67,28 @@ def getFreqsPowers(self): def getFreqsPowersPhases(self): return self.freqs, self.powers, self.phases - def plotPower(self, scale='log', chanNames=None, colorbar=True, axs=None): + def plotPower(self, scale="log", chanNames=None, colorbar=True, axs=None): if chanNames is None: chanNames = [str(i) for i in range(self.nChan)] - if scale == 'linear': + if scale == "linear": powers = self.powers norm = plt.Normalize(np.min(powers), np.max(powers)) - zlabel = (r'Power Density ($\mu V^2 / Hz$)') + zlabel = (r"Power Density ($\mu V^2 / Hz$)") - elif scale == 'log': + elif scale == "log": powers = self.powers norm = pltLogNorm(np.min(powers), np.max(powers)) - zlabel = (r'Power Density ($\mu V^2 / Hz$)') + zlabel = (r"Power Density ($\mu V^2 / Hz$)") - elif scale == 'db': + elif scale == "db": me = np.max((np.min(self.powers), np.finfo(self.powers.dtype).tiny)) powers = 10.0*np.log10(self.powers/me) norm = plt.Normalize(np.min(powers), np.max(powers)) - zlabel = 'Power (db)' + zlabel = "Power (db)" else: - raise RuntimeError('Invalid scale %s.' % str(scale)) + raise RuntimeError("Invalid scale %s." % str(scale)) nRows = int(np.sqrt(self.nChan)) if nRows*nRows < self.nChan: @@ -110,20 +110,20 @@ def plotPower(self, scale='log', chanNames=None, colorbar=True, axs=None): else: ax = axs[i] - img = ax.imshow(powers[:,:,i].T, interpolation='bicubic', origin='lower', - cmap=plt.cm.get_cmap('jet'), aspect='auto', norm=norm, + img = ax.imshow(powers[:,:,i].T, interpolation="bicubic", origin="lower", + cmap=plt.cm.get_cmap("jet"), aspect="auto", norm=norm, extent=(self.times[0], self.times[-1], self.freqs[0], self.freqs[-1])) imgs.append(img) - ax.set_xlabel('Time (s)') - ax.set_ylabel('Frequency (Hz)') + ax.set_xlabel("Time (s)") + ax.set_ylabel("Frequency (Hz)") if len(chanNames) > 1: ax.set_title(chanName) else: - ax.set_title('Spectrogram') + ax.set_title("Spectrogram") - result = {'axs': axs, 'imgs': imgs} + result = {"axs": axs, "imgs": imgs} if colorbar: if newAxs: @@ -133,7 +133,7 @@ def plotPower(self, scale='log', chanNames=None, colorbar=True, axs=None): else: cbar = axs[-1].colorbar(imgs[-1], norm=norm) cbar.set_label(zlabel) - result['cbar'] = cbar + result["cbar"] = cbar return result @@ -158,10 +158,10 @@ def __init__(self, s, sampRate=1.0, span=0.1, overlap=0.5, # check span parameter if wObs > nObs: - raise RuntimeError('Span of %.2f exceedes length of input %.2f.' % + raise RuntimeError("Span of %.2f exceedes length of input %.2f." % (span, nObs/float(sampRate))) if wObs < 7: - raise RuntimeError('Span of %.2f is too small.' % span) + raise RuntimeError("Span of %.2f is too small." % span) if pad: # find next largest power of two @@ -209,14 +209,14 @@ def __init__(self, s, sampRate=1.0, span=0.1, overlap=0.5, # wrapper around class constructors # pylint: disable=invalid-name -def Spectrogram(s, *args, method='cwt', **kwargs): +def Spectrogram(s, *args, method="cwt", **kwargs): method = method.lower() - if method == 'cwt': + if method == "cwt": return CWTSpectrogram(s, *args, **kwargs) - elif method in ('fft', 'stft', 'stfft'): + elif method in ("fft", "stft", "stfft"): return FFTSpectrogram(s, *args, **kwargs) else: - raise RuntimeError('Unknown Spectrogram estimation method: ' + str(method)) + raise RuntimeError("Unknown Spectrogram estimation method: " + str(method)) def demoCWT(): @@ -242,10 +242,10 @@ def demoCWT(): #s = s.astype(np.float32) #freqs, powers, phases = transform.apply(s) - #transform.plotPower(s, chanNames=('20Hz Sinusoid', 'Noisy 60Hz Sinusoid', 'Random Walk')) + #transform.plotPower(s, chanNames=("20Hz Sinusoid", "Noisy 60Hz Sinusoid", "Random Walk")) #plt.tight_layout() -if __name__ == '__main__': +if __name__ == "__main__": demoCWT() plt.show() diff --git a/cebl/sig/stat.py b/cebl/sig/stat.py index 097ea84..3e846df 100644 --- a/cebl/sig/stat.py +++ b/cebl/sig/stat.py @@ -19,7 +19,7 @@ def autoCorrelation(s): def ac1d(x): var = x.var() x = x - x.mean() - r = np.correlate(x[-x.size:], x, mode='full') + r = np.correlate(x[-x.size:], x, mode="full") return r[r.size//2:] / (var * np.arange(x.shape[0], 0, -1)) return np.apply_along_axis(ac1d, 0, s) diff --git a/cebl/util/__init__.py b/cebl/util/__init__.py index b52738d..bd3e39a 100644 --- a/cebl/util/__init__.py +++ b/cebl/util/__init__.py @@ -3,7 +3,7 @@ import sys -#if sys.platform.startswith('linux'): +#if sys.platform.startswith("linux"): # from .fasttanh import tanh #else: # from numpy import tanh diff --git a/cebl/util/arr.py b/cebl/util/arr.py index 4ae44c4..b4c85ab 100644 --- a/cebl/util/arr.py +++ b/cebl/util/arr.py @@ -67,7 +67,7 @@ def accum(x, n, accumf=np.sum, truncate=True, axis=None): [[3, 3, 3]]]) """ if n < 1: - raise ValueError('n must be >= 1') + raise ValueError("n must be >= 1") # make sure we have a numpy array x = np.asarray(x) diff --git a/cebl/util/cache.py b/cebl/util/cache.py index b702318..e269a26 100644 --- a/cebl/util/cache.py +++ b/cebl/util/cache.py @@ -60,36 +60,36 @@ def __str__(self): def __len__(self): return len(self.store) -if __name__ == '__main__': +if __name__ == "__main__": cache = Cache(3) - print('adding a: 1') - cache['a'] = 1 + print("adding a: 1") + cache["a"] = 1 - print('adding b: 2') - cache['b'] = 2 + print("adding b: 2") + cache["b"] = 2 - print('adding c: 3') - cache['c'] = 3 + print("adding c: 3") + cache["c"] = 3 print(cache) - print('getting b') - print(cache['b']) + print("getting b") + print(cache["b"]) - print('adding a') - cache['a'] = 10 + print("adding a") + cache["a"] = 10 print(cache) - print('adding d') - cache['d'] = 4 + print("adding d") + cache["d"] = 4 - print('getting x') - print(cache['x']) + print("getting x") + print(cache["x"]) print(cache) print(len(cache)) - print('d' in cache) - print('c' in cache) + print("d" in cache) + print("c" in cache) diff --git a/cebl/util/clsm.py b/cebl/util/clsm.py index 1eeb72e..e86f148 100644 --- a/cebl/util/clsm.py +++ b/cebl/util/clsm.py @@ -10,13 +10,13 @@ def roc(classProbs): """Receiver operating characteristic. """ if len(classProbs) > 2: - raise RuntimeError('roc is only valid for two-class problems.') + raise RuntimeError("roc is only valid for two-class problems.") probs = np.concatenate([cls[:,1] for cls in classProbs]) labels = np.ones(probs.size, dtype=np.bool) labels[:classProbs[0].shape[0]] = False - idx = np.argsort(probs, kind='mergesort')[::-1] + idx = np.argsort(probs, kind="mergesort")[::-1] labels = labels[idx] # pylint: disable=singleton-comparison @@ -32,7 +32,7 @@ def roc(classProbs): #def auc(classProbs): # if len(classProbs) > 2: -# raise RuntimeError('auc is only valid for two-class problems.') +# raise RuntimeError("auc is only valid for two-class problems.") # # fpr, tpr = roc(classProbs) # return np.sum((fpr[1:] - fpr[:-1]) * tpr[1:]) @@ -41,7 +41,7 @@ def auc(classProbs): """Area under the roc curve. """ if len(classProbs) > 2: - raise RuntimeError('auc is only implemented for two-class problems.') + raise RuntimeError("auc is only implemented for two-class problems.") denom = classProbs[0].shape[0]*classProbs[1].shape[0] if denom == 0: diff --git a/cebl/util/comp.py b/cebl/util/comp.py index c636ce5..341dbd0 100644 --- a/cebl/util/comp.py +++ b/cebl/util/comp.py @@ -5,9 +5,9 @@ import lzma -compressedExtensions = ('xz', 'bz2', 'gz') +compressedExtensions = ("xz", "bz2", "gz") -def openCompressedFile(fileName, mode='rb', **kwargs): +def openCompressedFile(fileName, mode="rb", **kwargs): """Open a compressed file using an algorithm derived from its file extension. @@ -36,11 +36,11 @@ def openCompressedFile(fileName, mode='rb', **kwargs): """ fileNameLower = fileName.lower() - if fileNameLower.endswith('.xz'): + if fileNameLower.endswith(".xz"): return lzma.open(fileName, mode, **kwargs) - elif fileNameLower.endswith('.bz2'): + elif fileNameLower.endswith(".bz2"): return bz2.open(fileName, mode, **kwargs) - elif fileNameLower.endswith('.gz'): + elif fileNameLower.endswith(".gz"): return gzip.open(fileName, mode, **kwargs) else: return open(fileName, mode, **kwargs) diff --git a/cebl/util/embed.py b/cebl/util/embed.py index 176817d..d74f788 100644 --- a/cebl/util/embed.py +++ b/cebl/util/embed.py @@ -9,7 +9,7 @@ def slidingWindow(s, span, stride=None, axis=0): """Sliding window. """ #s = np.ascontiguousarray(s) - s = np.require(s, requirements=['C', 'O']) + s = np.require(s, requirements=["C", "O"]) if stride is None: stride = span @@ -17,13 +17,13 @@ def slidingWindow(s, span, stride=None, axis=0): # catch some bad values since this is a common place for # bugs to crop up in other routines if span > s.shape[axis]: - raise ValueError('Span of %d exceeds input length of %d.' % (span, s.shape[axis])) + raise ValueError("Span of %d exceeds input length of %d." % (span, s.shape[axis])) if span < 0: - raise ValueError('Negative span of %d is invalid.' % span) + raise ValueError("Negative span of %d is invalid." % span) if stride < 1: - raise ValueError('Stride of %d is not positive.' % stride) + raise ValueError("Stride of %d is not positive." % stride) nWin = int(np.ceil((s.shape[axis]-span+1) / float(stride))) @@ -43,7 +43,7 @@ def timeEmbed(s, lags=1, stride=1, axis=0): Only copies s if necessary. """ #s = np.ascontiguousarray(s) - s = np.require(s, requirements=['C', 'O']) + s = np.require(s, requirements=["C", "O"]) if lags == 0: return s diff --git a/cebl/util/errm.py b/cebl/util/errm.py index b7a321a..8787bff 100644 --- a/cebl/util/errm.py +++ b/cebl/util/errm.py @@ -57,7 +57,7 @@ def gini(y, g, normalize=True): """Gini coefficient. """ if y.ndim > 1 or g.ndim > 1: - raise RuntimeError('Gini does not currently support more than one axis.') + raise RuntimeError("Gini does not currently support more than one axis.") if normalize: return gini(y, g, normalize=False) / gini(g, g, normalize=False) diff --git a/cebl/util/parallel.py b/cebl/util/parallel.py index b415158..7fadf93 100644 --- a/cebl/util/parallel.py +++ b/cebl/util/parallel.py @@ -43,7 +43,7 @@ def getWriteLock(self): class _CountLock: def __init__(self, lock): self.lock = lock - self.counter = mp.Value('i', 0) + self.counter = mp.Value("i", 0) self.counterLock = mp.Lock() def acquire(self): diff --git a/cebl/util/shuffle.py b/cebl/util/shuffle.py index 1df62c2..81a36ac 100644 --- a/cebl/util/shuffle.py +++ b/cebl/util/shuffle.py @@ -25,7 +25,7 @@ def blockShuffle(x, n, axis=None): if axis is None: if (x.size % n) != 0: - raise RuntimeError('x.size = %d is not a multiple of n = %d.' % (x.size, n)) + raise RuntimeError("x.size = %d is not a multiple of n = %d." % (x.size, n)) shp = x.shape @@ -37,7 +37,7 @@ def blockShuffle(x, n, axis=None): else: if (x.shape[axis] % n) != 0: - raise RuntimeError('x.shape[%d] = %d is not a multiple of n = %d.' % \ + raise RuntimeError("x.shape[%d] = %d is not a multiple of n = %d." % \ (axis, x.shape[axis], n)) lax = x.shape[axis] From ac94d0ca31ef3f59d347003828eaa6d15d171958 Mon Sep 17 00:00:00 2001 From: Elliott Forney Date: Wed, 13 Mar 2019 18:21:57 -0600 Subject: [PATCH 9/9] style: single quotes to double around strings --- cebl/rt/__init__.py | 2 +- cebl/rt/filters/__init__.py | 1 - cebl/rt/filters/bandpass.py | 92 ++++++++++++++++++------------------ cebl/rt/filters/bsmtcna.py | 12 ++--- cebl/rt/filters/bsmtpsd.py | 12 ++--- cebl/rt/filters/bsp3.py | 12 ++--- cebl/rt/filters/demean.py | 4 +- cebl/rt/filters/filt.py | 2 +- cebl/rt/filters/gnmtpsd.py | 6 +-- cebl/rt/filters/gnmttde.py | 10 ++-- cebl/rt/filters/gnp3.py | 6 +-- cebl/rt/filters/ica.py | 20 ++++---- cebl/rt/filters/moving.py | 16 +++---- cebl/rt/filters/msf.py | 2 +- cebl/rt/filters/pca.py | 2 +- cebl/rt/filters/reference.py | 4 +- cebl/rt/filters/strans.py | 36 +++++++------- cebl/rt/filters/test.py | 6 +-- cebl/rt/filters/wiener.py | 4 +- cebl/rt/logging.py | 8 ++-- cebl/rt/main.py | 20 ++++---- cebl/rt/manager.py | 4 +- 22 files changed, 140 insertions(+), 141 deletions(-) diff --git a/cebl/rt/__init__.py b/cebl/rt/__init__.py index 184b0fc..75a9f42 100644 --- a/cebl/rt/__init__.py +++ b/cebl/rt/__init__.py @@ -3,7 +3,7 @@ # note, the cebl startup script also sets the backend. This is just in case # cebl is started from the console. import matplotlib.pyplot as plt -plt.switch_backend('WXAgg') +plt.switch_backend("WXAgg") from . import events from . import logging diff --git a/cebl/rt/filters/__init__.py b/cebl/rt/filters/__init__.py index 5795288..0bc3491 100644 --- a/cebl/rt/filters/__init__.py +++ b/cebl/rt/filters/__init__.py @@ -1,6 +1,5 @@ """Real-time filters. """ - filterChoices = {} from .filt import * diff --git a/cebl/rt/filters/bandpass.py b/cebl/rt/filters/bandpass.py index 830867f..27db91d 100644 --- a/cebl/rt/filters/bandpass.py +++ b/cebl/rt/filters/bandpass.py @@ -10,8 +10,8 @@ from .filt import Filter, FilterConfigPanel -IIRBandpassName = 'IIR Bandpass Filter' -FIRBandpassName = 'FIR Bandpass Filter' +IIRBandpassName = "IIR Bandpass Filter" +FIRBandpassName = "FIR Bandpass Filter" class IIRBandpassConfigPanel(FilterConfigPanel): @@ -38,7 +38,7 @@ def initOptions(self): optionsSizer.Add(self.filtTypeComboBox, proportion=1, flag=wx.LEFT | wx.TOP | wx.RIGHT | wx.ALIGN_CENTER, border=20) - self.zeroPhaseCheckBox = wx.CheckBox(self, label='Zero Phase') + self.zeroPhaseCheckBox = wx.CheckBox(self, label="Zero Phase") self.zeroPhaseCheckBox.SetValue(self.flt.zeroPhase) self.Bind(wx.EVT_CHECKBOX, self.setZeroPhase, self.zeroPhaseCheckBox) optionsSizer.Add(self.zeroPhaseCheckBox, proportion=1, @@ -50,7 +50,7 @@ def setFiltType(self, event): filtType = self.filtTypeComboBox.GetValue() if filtType not in self.flt.filtMap.keys(): - raise RuntimeError('Invalid filter type: %s.' % str(filtType)) + raise RuntimeError("Invalid filter type: %s." % str(filtType)) self.flt.filtType = filtType self.updateResponse() @@ -62,8 +62,8 @@ def setZeroPhase(self, event): def initSliders(self): sliderSizer = wx.BoxSizer(wx.HORIZONTAL) - lowFreqControlBox = widgets.ControlBox(self, label='lowFreq', orient=wx.VERTICAL) - self.lowFreqText = wx.StaticText(self, label='%6.2f(Hz)' % self.flt.lowFreq) + lowFreqControlBox = widgets.ControlBox(self, label="lowFreq", orient=wx.VERTICAL) + self.lowFreqText = wx.StaticText(self, label="%6.2f(Hz)" % self.flt.lowFreq) lowFreqTextSizer = wx.BoxSizer(orient=wx.VERTICAL) lowFreqTextSizer.Add(self.lowFreqText, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL) self.lowFreqSlider = wx.Slider(self, style=wx.SL_VERTICAL | wx.SL_INVERSE, @@ -77,8 +77,8 @@ def initSliders(self): sliderSizer.Add(lowFreqControlBox, proportion=1, flag=wx.ALL | wx.EXPAND, border=10) - highFreqControlBox = widgets.ControlBox(self, label='highFreq', orient=wx.VERTICAL) - self.highFreqText = wx.StaticText(self, label='%6.2f(Hz)' % self.flt.highFreq) + highFreqControlBox = widgets.ControlBox(self, label="highFreq", orient=wx.VERTICAL) + self.highFreqText = wx.StaticText(self, label="%6.2f(Hz)" % self.flt.highFreq) highFreqTextSizer = wx.BoxSizer(orient=wx.VERTICAL) highFreqTextSizer.Add(self.highFreqText, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL) self.highFreqSlider = wx.Slider(self, style=wx.SL_VERTICAL | wx.SL_INVERSE, @@ -92,8 +92,8 @@ def initSliders(self): sliderSizer.Add(highFreqControlBox, proportion=1, flag=wx.ALL | wx.EXPAND, border=10) - orderControlBox = widgets.ControlBox(self, label='Order', orient=wx.VERTICAL) - self.orderText = wx.StaticText(self, label='%2d' % self.flt.order) + orderControlBox = widgets.ControlBox(self, label="Order", orient=wx.VERTICAL) + self.orderText = wx.StaticText(self, label="%2d" % self.flt.order) orderTextSizer = wx.BoxSizer(orient=wx.VERTICAL) orderTextSizer.Add(self.orderText, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL) self.orderSlider = wx.Slider(self, style=wx.SL_VERTICAL | wx.SL_INVERSE, @@ -111,17 +111,17 @@ def initSliders(self): def setLowFreq(self, event): self.flt.lowFreq = self.lowFreqSlider.GetValue() / 4.0 - self.lowFreqText.SetLabel('%6.2f(Hz)' % self.flt.lowFreq) + self.lowFreqText.SetLabel("%6.2f(Hz)" % self.flt.lowFreq) self.updateResponse() def setHighFreq(self, event): self.flt.highFreq = self.highFreqSlider.GetValue() / 4.0 - self.highFreqText.SetLabel('%6.2f(Hz)' % self.flt.highFreq) + self.highFreqText.SetLabel("%6.2f(Hz)" % self.flt.highFreq) self.updateResponse() def setOrder(self, event): self.flt.order = self.orderSlider.GetValue() - self.orderText.SetLabel('%2d' % self.flt.order) + self.orderText.SetLabel("%2d" % self.flt.order) self.updateResponse() def initResponse(self): @@ -140,14 +140,14 @@ def initResponse(self): responseSizer = wx.BoxSizer(wx.VERTICAL) freqResponseControlBox = widgets.ControlBox(self, - label='Freqency Response', orient=wx.VERTICAL) + label="Freqency Response", orient=wx.VERTICAL) freqResponseControlBox.Add(self.freqResponseCanvas, proportion=1, flag=wx.ALL | wx.EXPAND, border=8) responseSizer.Add(freqResponseControlBox, proportion=1, flag=wx.TOP | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=10) phaseResponseControlBox = widgets.ControlBox(self, - label='Phase Response', orient=wx.VERTICAL) + label="Phase Response", orient=wx.VERTICAL) phaseResponseControlBox.Add(self.phaseResponseCanvas, proportion=1, flag=wx.ALL | wx.EXPAND, border=8) responseSizer.Add(phaseResponseControlBox, proportion=1, @@ -190,12 +190,12 @@ def updateResponse(self): self.freqResponseAx.cla() self.flt.bp.plotFreqResponse(ax=self.freqResponseAx, linewidth=2) self.freqResponseAx.autoscale(tight=True) - self.freqResponseAx.legend(prop={'size': 12}) + self.freqResponseAx.legend(prop={"size": 12}) self.freqResponseCanvas.draw() self.phaseResponseAx.cla() self.flt.bp.plotPhaseResponse(ax=self.phaseResponseAx, linewidth=2) - self.phaseResponseAx.legend(prop={'size': 12}) + self.phaseResponseAx.legend(prop={"size": 12}) self.phaseResponseAx.autoscale(tight=True) #if self.flt.zeroPhase: @@ -213,13 +213,13 @@ def __init__(self, *args, **kwargs): configPanelClass=IIRBandpassConfigPanel, **kwargs) self.filtMap = odict() - self.filtMap['Butterworth'] = 'butter' - self.filtMap['Chebyshev I'] = 'cheby1' - self.filtMap['Chebyshev II'] = 'cheby2' - self.filtMap['Elliptic'] = 'ellip' - self.filtMap['Bessel'] = 'bessel' + self.filtMap["Butterworth"] = "butter" + self.filtMap["Chebyshev I"] = "cheby1" + self.filtMap["Chebyshev II"] = "cheby2" + self.filtMap["Elliptic"] = "ellip" + self.filtMap["Bessel"] = "bessel" - self.filtType = 'Butterworth' + self.filtType = "Butterworth" self.nyquist = self.inSampRate/2.0 self.lowFreq = 0.0 self.highFreq = self.nyquist @@ -248,10 +248,10 @@ def updateFilter(self): # should be configurable XXX - idfah kwargs = {} - if filtType in ('ellip', 'cheby1'): - kwargs['rp'] = self.rp - if filtType in ('ellip', 'cheby2'): - kwargs['rs'] = self.rs + if filtType in ("ellip", "cheby1"): + kwargs["rp"] = self.rp + if filtType in ("ellip", "cheby2"): + kwargs["rs"] = self.rs # need dtype argument XXX - idfah self.bp = sig.BandpassFilterIIR(lowFreq=lowFreq, highFreq=highFreq, order=self.order, @@ -295,7 +295,7 @@ def setFiltType(self, event): filtType = self.filtTypeComboBox.GetValue() if filtType not in self.flt.filtMap.keys(): - raise RuntimeError('Invalid filter type: %s.' % str(filtType)) + raise RuntimeError("Invalid filter type: %s." % str(filtType)) self.flt.filtType = filtType self.updateResponse() @@ -303,8 +303,8 @@ def setFiltType(self, event): def initSliders(self): sliderSizer = wx.BoxSizer(wx.HORIZONTAL) - lowFreqControlBox = widgets.ControlBox(self, label='lowFreq', orient=wx.VERTICAL) - self.lowFreqText = wx.StaticText(self, label='%6.2f(Hz)' % self.flt.lowFreq) + lowFreqControlBox = widgets.ControlBox(self, label="lowFreq", orient=wx.VERTICAL) + self.lowFreqText = wx.StaticText(self, label="%6.2f(Hz)" % self.flt.lowFreq) lowFreqTextSizer = wx.BoxSizer(orient=wx.VERTICAL) lowFreqTextSizer.Add(self.lowFreqText, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL) self.lowFreqSlider = wx.Slider(self, style=wx.SL_VERTICAL | wx.SL_INVERSE, @@ -318,8 +318,8 @@ def initSliders(self): sliderSizer.Add(lowFreqControlBox, proportion=1, flag=wx.ALL | wx.EXPAND, border=10) - highFreqControlBox = widgets.ControlBox(self, label='highFreq', orient=wx.VERTICAL) - self.highFreqText = wx.StaticText(self, label='%6.2f(Hz)' % self.flt.highFreq) + highFreqControlBox = widgets.ControlBox(self, label="highFreq", orient=wx.VERTICAL) + self.highFreqText = wx.StaticText(self, label="%6.2f(Hz)" % self.flt.highFreq) highFreqTextSizer = wx.BoxSizer(orient=wx.VERTICAL) highFreqTextSizer.Add(self.highFreqText, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL) self.highFreqSlider = wx.Slider(self, style=wx.SL_VERTICAL | wx.SL_INVERSE, @@ -333,8 +333,8 @@ def initSliders(self): sliderSizer.Add(highFreqControlBox, proportion=1, flag=wx.ALL | wx.EXPAND, border=10) - orderControlBox = widgets.ControlBox(self, label='Order', orient=wx.VERTICAL) - self.orderText = wx.StaticText(self, label='%2d' % self.flt.order) + orderControlBox = widgets.ControlBox(self, label="Order", orient=wx.VERTICAL) + self.orderText = wx.StaticText(self, label="%2d" % self.flt.order) orderTextSizer = wx.BoxSizer(orient=wx.VERTICAL) orderTextSizer.Add(self.orderText, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL) self.orderSlider = wx.Slider(self, style=wx.SL_VERTICAL | wx.SL_INVERSE, @@ -352,17 +352,17 @@ def initSliders(self): def setLowFreq(self, event): self.flt.lowFreq = self.lowFreqSlider.GetValue() / 4.0 - self.lowFreqText.SetLabel('%6.2f(Hz)' % self.flt.lowFreq) + self.lowFreqText.SetLabel("%6.2f(Hz)" % self.flt.lowFreq) self.updateResponse() def setHighFreq(self, event): self.flt.highFreq = self.highFreqSlider.GetValue() / 4.0 - self.highFreqText.SetLabel('%6.2f(Hz)' % self.flt.highFreq) + self.highFreqText.SetLabel("%6.2f(Hz)" % self.flt.highFreq) self.updateResponse() def setOrder(self, event): self.flt.order = self.orderSlider.GetValue() * 2 - self.orderText.SetLabel('%2d' % self.flt.order) + self.orderText.SetLabel("%2d" % self.flt.order) self.updateResponse() def initResponse(self): @@ -381,14 +381,14 @@ def initResponse(self): responseSizer = wx.BoxSizer(wx.VERTICAL) freqResponseControlBox = widgets.ControlBox(self, - label='Freqency Response', orient=wx.VERTICAL) + label="Freqency Response", orient=wx.VERTICAL) freqResponseControlBox.Add(self.freqResponseCanvas, proportion=1, flag=wx.ALL | wx.EXPAND, border=8) responseSizer.Add(freqResponseControlBox, proportion=1, flag=wx.TOP | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=10) phaseResponseControlBox = widgets.ControlBox(self, - label='Phase Response', orient=wx.VERTICAL) + label="Phase Response", orient=wx.VERTICAL) phaseResponseControlBox.Add(self.phaseResponseCanvas, proportion=1, flag=wx.ALL | wx.EXPAND, border=8) responseSizer.Add(phaseResponseControlBox, proportion=1, @@ -431,12 +431,12 @@ def updateResponse(self): self.freqResponseAx.cla() self.flt.bp.plotFreqResponse(ax=self.freqResponseAx, linewidth=2) self.freqResponseAx.autoscale(tight=True) - self.freqResponseAx.legend(prop={'size': 12}) + self.freqResponseAx.legend(prop={"size": 12}) self.freqResponseCanvas.draw() self.phaseResponseAx.cla() self.flt.bp.plotPhaseResponse(ax=self.phaseResponseAx, linewidth=2) - self.phaseResponseAx.legend(prop={'size': 12}) + self.phaseResponseAx.legend(prop={"size": 12}) self.phaseResponseAx.autoscale(tight=True) self.phaseResponseCanvas.draw() @@ -447,12 +447,12 @@ def __init__(self, *args, **kwargs): configPanelClass=FIRBandpassConfigPanel, **kwargs) self.filtMap = odict() - self.filtMap['Lanczos'] = 'lanczos' - self.filtMap['Sinc Blackman'] = 'sinc-blackman' - self.filtMap['Sinc Hamming'] = 'sinc-hamming' - self.filtMap['Sinc Hann'] = 'sinc-hann' + self.filtMap["Lanczos"] = "lanczos" + self.filtMap["Sinc Blackman"] = "sinc-blackman" + self.filtMap["Sinc Hamming"] = "sinc-hamming" + self.filtMap["Sinc Hann"] = "sinc-hann" - self.filtType = 'Sinc Blackman' + self.filtType = "Sinc Blackman" self.nyquist = self.inSampRate/2.0 self.lowFreq = 0.0 self.highFreq = self.nyquist diff --git a/cebl/rt/filters/bsmtcna.py b/cebl/rt/filters/bsmtcna.py index ce0bbc6..ecd938c 100644 --- a/cebl/rt/filters/bsmtcna.py +++ b/cebl/rt/filters/bsmtcna.py @@ -5,14 +5,14 @@ from .filt import Filter, FilterConfigPanel -BiosemiMTCNAName = 'BioSemiMTCNA' +BiosemiMTCNAName = "BioSemiMTCNA" class BiosemiMTCNAConfigPanel(FilterConfigPanel): def __init__(self, *args, **kwargs): FilterConfigPanel.__init__(self, *args, **kwargs) - self.hello = wx.StaticText(self, label='Hello World!') + self.hello = wx.StaticText(self, label="Hello World!") self.sizer.Add(self.hello, proportion=1, flag=wx.EXPAND, border=10) self.initLayout() @@ -23,10 +23,10 @@ def __init__(self, *args, **kwargs): configPanelClass=BiosemiMTCNAConfigPanel, **kwargs) def apply(self, cap): - cap.demean().reference(('EXG5','EXG6')).demean() - cap.keepChans(('F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2')) - #cap.keepChans(('C3', 'C4', 'CZ', 'F3', 'F4', 'F7', 'F8', 'FP1', 'FP2', - # 'FZ', 'O1', 'O2', 'P3', 'P4', 'P7', 'P8', 'PZ', 'T7', 'T8')) + cap.demean().reference(("EXG5","EXG6")).demean() + cap.keepChans(("F3", "F4", "C3", "C4", "P3", "P4", "O1", "O2")) + #cap.keepChans(("C3", "C4", "CZ", "F3", "F4", "F7", "F8", "FP1", "FP2", + # "FZ", "O1", "O2", "P3", "P4", "P7", "P8", "PZ", "T7", "T8")) cap.bandpass(0.0, 80.0, order=3).downsample(4) cap.bandpass(1.0, np.inf, order=3) diff --git a/cebl/rt/filters/bsmtpsd.py b/cebl/rt/filters/bsmtpsd.py index 00429e7..53b6887 100644 --- a/cebl/rt/filters/bsmtpsd.py +++ b/cebl/rt/filters/bsmtpsd.py @@ -5,14 +5,14 @@ from .filt import Filter, FilterConfigPanel -BiosemiMTPSDName = 'BioSemiMTPSD' +BiosemiMTPSDName = "BioSemiMTPSD" class BiosemiMTPSDConfigPanel(FilterConfigPanel): def __init__(self, *args, **kwargs): FilterConfigPanel.__init__(self, *args, **kwargs) - self.hello = wx.StaticText(self, label='Hello World!') + self.hello = wx.StaticText(self, label="Hello World!") self.sizer.Add(self.hello, proportion=1, flag=wx.EXPAND, border=10) self.initLayout() @@ -23,10 +23,10 @@ def __init__(self, *args, **kwargs): configPanelClass=BiosemiMTPSDConfigPanel, **kwargs) def apply(self, cap): - cap.demean().reference(('EXG5','EXG6')).demean() - cap.keepChans(('F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2')) - #cap.keepChans(('C3', 'C4', 'CZ', 'F3', 'F4', 'F7', 'F8', 'FP1', 'FP2', - # 'FZ', 'O1', 'O2', 'P3', 'P4', 'P7', 'P8', 'PZ', 'T7', 'T8')) + cap.demean().reference(("EXG5","EXG6")).demean() + cap.keepChans(("F3", "F4", "C3", "C4", "P3", "P4", "O1", "O2")) + #cap.keepChans(("C3", "C4", "CZ", "F3", "F4", "F7", "F8", "FP1", "FP2", + # "FZ", "O1", "O2", "P3", "P4", "P7", "P8", "PZ", "T7", "T8")) cap.bandpass(0.0, 80.0, order=3).downsample(4) cap.bandpass(1.0, np.inf, order=3).car() diff --git a/cebl/rt/filters/bsp3.py b/cebl/rt/filters/bsp3.py index 4003f3c..8cd4150 100644 --- a/cebl/rt/filters/bsp3.py +++ b/cebl/rt/filters/bsp3.py @@ -5,14 +5,14 @@ from .filt import Filter, FilterConfigPanel -BiosemiP3Name = 'BioSemiP3' +BiosemiP3Name = "BioSemiP3" class BiosemiP3ConfigPanel(FilterConfigPanel): def __init__(self, *args, **kwargs): FilterConfigPanel.__init__(self, *args, **kwargs) - self.hello = wx.StaticText(self, label='BioSemi P300 Filter.') + self.hello = wx.StaticText(self, label="BioSemi P300 Filter.") self.sizer.Add(self.hello, proportion=1, flag=wx.EXPAND, border=10) self.initLayout() @@ -23,10 +23,10 @@ def __init__(self, *args, **kwargs): configPanelClass=BiosemiP3ConfigPanel, **kwargs) def apply(self, cap): - cap.demean().reference(('EXG5','EXG6')).demean() - #cap.keepChans(('C3', 'C4', 'CZ', 'F3', 'F4', 'F7', 'F8', 'FP1', 'FP2', - # 'FZ', 'O1', 'O2', 'P3', 'P4', 'P7', 'P8', 'PZ', 'T7', 'T8')) - cap.keepChans(('Fz', 'Cz', 'P3', 'Pz', 'P4', 'P7', 'Oz', 'P8')) + cap.demean().reference(("EXG5","EXG6")).demean() + #cap.keepChans(("C3", "C4", "CZ", "F3", "F4", "F7", "F8", "FP1", "FP2", + # "FZ", "O1", "O2", "P3", "P4", "P7", "P8", "PZ", "T7", "T8")) + cap.keepChans(("Fz", "Cz", "P3", "Pz", "P4", "P7", "Oz", "P8")) # used in rt sub1 day1 #cap.bandpass(0.5, 10.0, order=2).downsample(32) diff --git a/cebl/rt/filters/demean.py b/cebl/rt/filters/demean.py index dca786c..e948e87 100644 --- a/cebl/rt/filters/demean.py +++ b/cebl/rt/filters/demean.py @@ -5,14 +5,14 @@ from .filt import Filter, FilterConfigPanel -DemeanName = 'Demean' +DemeanName = "Demean" class DemeanConfigPanel(FilterConfigPanel): def __init__(self, *args, **kwargs): FilterConfigPanel.__init__(self, *args, **kwargs) - self.hello = wx.StaticText(self, label='Hello World!') + self.hello = wx.StaticText(self, label="Hello World!") self.sizer.Add(self.hello, proportion=1, flag=wx.EXPAND, border=10) self.initLayout() diff --git a/cebl/rt/filters/filt.py b/cebl/rt/filters/filt.py index ed42011..79eafdd 100644 --- a/cebl/rt/filters/filt.py +++ b/cebl/rt/filters/filt.py @@ -67,7 +67,7 @@ def getName(self): return self.name def apply(self, s): - raise NotImplementedError('apply not implemented.') + raise NotImplementedError("apply not implemented.") def genConfigPanel(self, parent, pg, *args, **kwargs): """Generate an instance of the configPanelClass, given as an diff --git a/cebl/rt/filters/gnmtpsd.py b/cebl/rt/filters/gnmtpsd.py index b89fb8d..1b2cba1 100644 --- a/cebl/rt/filters/gnmtpsd.py +++ b/cebl/rt/filters/gnmtpsd.py @@ -5,14 +5,14 @@ from .filt import Filter, FilterConfigPanel -GNautilusMTPSDName = 'GNautilusMTPSD' +GNautilusMTPSDName = "GNautilusMTPSD" class GNautilusMTPSDConfigPanel(FilterConfigPanel): def __init__(self, *args, **kwargs): FilterConfigPanel.__init__(self, *args, **kwargs) - self.hello = wx.StaticText(self, label='Hello World!') + self.hello = wx.StaticText(self, label="Hello World!") self.sizer.Add(self.hello, proportion=1, flag=wx.EXPAND, border=10) self.initLayout() @@ -23,7 +23,7 @@ def __init__(self, *args, **kwargs): configPanelClass=GNautilusMTPSDConfigPanel, **kwargs) def apply(self, cap): - cap.keepChans(('F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'PO3', 'PO4')) + cap.keepChans(("F3", "F4", "C3", "C4", "P3", "P4", "PO3", "PO4")) cap.demean() cap.bandpass(0.0, 20.0, order=3) diff --git a/cebl/rt/filters/gnmttde.py b/cebl/rt/filters/gnmttde.py index 373e626..8cf14d9 100644 --- a/cebl/rt/filters/gnmttde.py +++ b/cebl/rt/filters/gnmttde.py @@ -5,14 +5,14 @@ from .filt import Filter, FilterConfigPanel -GNautilusMTTDEName = 'GNautilusMTTDE' +GNautilusMTTDEName = "GNautilusMTTDE" class GNautilusMTTDEConfigPanel(FilterConfigPanel): def __init__(self, *args, **kwargs): FilterConfigPanel.__init__(self, *args, **kwargs) - self.hello = wx.StaticText(self, label='Hello World!') + self.hello = wx.StaticText(self, label="Hello World!") self.sizer.Add(self.hello, proportion=1, flag=wx.EXPAND, border=10) self.initLayout() @@ -23,9 +23,9 @@ def __init__(self, *args, **kwargs): configPanelClass=GNautilusMTTDEConfigPanel, **kwargs) def apply(self, cap): - #cap.keepChans(('F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'PO3', 'PO4')) - cap.keepChans(('C3', 'C4', 'CZ', 'F3', 'F4', 'F7', 'F8', 'FP1', 'FP2', - 'FZ', 'Oz', 'P3', 'P4', 'P7', 'P8', 'PZ', 'T7', 'T8')) + #cap.keepChans(("F3", "F4", "C3", "C4", "P3", "P4", "PO3", "PO4")) + cap.keepChans(("C3", "C4", "CZ", "F3", "F4", "F7", "F8", "FP1", "FP2", + "FZ", "Oz", "P3", "P4", "P7", "P8", "PZ", "T7", "T8")) cap.demean() #cap.bandpass(0.0, 28.0, order=3).downsample(3) diff --git a/cebl/rt/filters/gnp3.py b/cebl/rt/filters/gnp3.py index 7698628..77b2b35 100644 --- a/cebl/rt/filters/gnp3.py +++ b/cebl/rt/filters/gnp3.py @@ -5,14 +5,14 @@ from .filt import Filter, FilterConfigPanel -GNautilusP3Name = 'GNautilusP3' +GNautilusP3Name = "GNautilusP3" class GNautilusP3ConfigPanel(FilterConfigPanel): def __init__(self, *args, **kwargs): FilterConfigPanel.__init__(self, *args, **kwargs) - self.hello = wx.StaticText(self, label='GNautilus P300 Filter.') + self.hello = wx.StaticText(self, label="GNautilus P300 Filter.") self.sizer.Add(self.hello, proportion=1, flag=wx.EXPAND, border=10) self.initLayout() @@ -24,7 +24,7 @@ def __init__(self, *args, **kwargs): def apply(self, cap): cap.demean() - cap.keepChans(('Fz', 'Cz', 'P3', 'Pz', 'P4', 'P7', 'Oz', 'P8')) + cap.keepChans(("Fz", "Cz", "P3", "Pz", "P4", "P7", "Oz", "P8")) #cap.bandpass(0.0, 80.0, order=3) diff --git a/cebl/rt/filters/ica.py b/cebl/rt/filters/ica.py index 45eb270..31a7090 100644 --- a/cebl/rt/filters/ica.py +++ b/cebl/rt/filters/ica.py @@ -5,7 +5,7 @@ from .strans import STrans, STransConfigPanel -IndependentComponentsName = 'Independent Components' +IndependentComponentsName = "Independent Components" class ICAConfigPanel(STransConfigPanel): @@ -17,15 +17,15 @@ def initOptions(self): optionsSizer = wx.BoxSizer(wx.HORIZONTAL) - kurtosisControlBox = widgets.ControlBox(self, label='Kurtosis', orient=wx.VERTICAL) - self.kurtosisComboBox = wx.ComboBox(self, choices=('Adapt', 'Sub', 'Super'), + kurtosisControlBox = widgets.ControlBox(self, label="Kurtosis", orient=wx.VERTICAL) + self.kurtosisComboBox = wx.ComboBox(self, choices=("Adapt", "Sub", "Super"), value=self.flt.kurtosis, style=wx.CB_DROPDOWN) self.Bind(wx.EVT_COMBOBOX, self.setKurtosis, self.kurtosisComboBox) kurtosisControlBox.Add(self.kurtosisComboBox, proportion=1, flag=wx.ALL, border=8) optionsSizer.Add(kurtosisControlBox, proportion=1, flag=wx.LEFT | wx.RIGHT, border=8) - maxIterControlBox = widgets.ControlBox(self, label='Max Iter.', orient=wx.HORIZONTAL) + maxIterControlBox = widgets.ControlBox(self, label="Max Iter.", orient=wx.HORIZONTAL) self.maxIterSpinCtrl = wx.SpinCtrl(self, value=str(self.flt.maxIter), min=50, max=3500) self.Bind(wx.EVT_SPINCTRL, self.setMaxIter, self.maxIterSpinCtrl) maxIterControlBox.Add(self.maxIterSpinCtrl, proportion=1, @@ -33,7 +33,7 @@ def initOptions(self): optionsSizer.Add(maxIterControlBox, proportion=0, flag=wx.RIGHT | wx.EXPAND, border=8) - #lrControlBox = widgets.ControlBox(self, label='Learning Rate.', orient=wx.HORIZONTAL) + #lrControlBox = widgets.ControlBox(self, label="Learning Rate.", orient=wx.HORIZONTAL) self.sizer.Add(optionsSizer, proportion=0) @@ -52,7 +52,7 @@ def __init__(self, *args, **kwargs): configPanelClass=ICAConfigPanel, **kwargs) def initICAConfig(self): - self.kurtosis = 'Adapt' + self.kurtosis = "Adapt" self.learningRate = 0.01 self.maxIter = 1500 @@ -63,17 +63,17 @@ def ICAWrapper(self, *args, **kwargs): def ICACallback(self, iteration, wtol): if (iteration % 50) == 0: percent = int(100*iteration/float(self.maxIter)) - self.dialog.Update(percent, 'Complete: %d%%\nwtol: %f' % (percent,wtol)) + self.dialog.Update(percent, "Complete: %d%%\nwtol: %f" % (percent,wtol)) def updateFilter(self): if self.trainCap is not None: - self.dialog = wx.ProgressDialog('Training ICA', - 'Training', maximum=101, + self.dialog = wx.ProgressDialog("Training ICA", + "Training", maximum=101, style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH) STrans.updateFilter(self) - #self.dialog.Update(101, 'Reason: %s' % self.stransFilter.reason) + #self.dialog.Update(101, "Reason: %s" % self.stransFilter.reason) self.dialog.Destroy() else: diff --git a/cebl/rt/filters/moving.py b/cebl/rt/filters/moving.py index c49a26e..8b16ea7 100644 --- a/cebl/rt/filters/moving.py +++ b/cebl/rt/filters/moving.py @@ -8,7 +8,7 @@ from .filt import Filter, FilterConfigPanel -MovingAverageName = 'Moving Average' +MovingAverageName = "Moving Average" class MovingAverageConfigPanel(FilterConfigPanel): @@ -23,7 +23,7 @@ def __init__(self, *args, **kwargs): def initOptions(self): optionsSizer = wx.BoxSizer(wx.HORIZONTAL) - kernTypeControlBox = widgets.ControlBox(self, label='Kernel Type', orient=wx.HORIZONTAL) + kernTypeControlBox = widgets.ControlBox(self, label="Kernel Type", orient=wx.HORIZONTAL) self.kernTypeComboBox = wx.ComboBox(self, choices=list(self.flt.kernMap.keys()), value=self.flt.kernType, style=wx.CB_DROPDOWN) self.Bind(wx.EVT_COMBOBOX, self.setKernType, self.kernTypeComboBox) @@ -31,7 +31,7 @@ def initOptions(self): optionsSizer.Add(kernTypeControlBox, proportion=1, flag=wx.ALL | wx.ALIGN_CENTER, border=8) - widthControlBox = widgets.ControlBox(self, label='Width', orient=wx.HORIZONTAL) + widthControlBox = widgets.ControlBox(self, label="Width", orient=wx.HORIZONTAL) self.widthSpinCtrl = wx.SpinCtrl(self, value=str(self.flt.width), min=2, max=100) self.Bind(wx.EVT_SPINCTRL, self.setWidth, self.widthSpinCtrl) widthControlBox.Add(self.widthSpinCtrl, proportion=1, flag=wx.ALL, border=8) @@ -44,7 +44,7 @@ def setKernType(self, event): kernType = self.kernTypeComboBox.GetValue() if kernType not in self.flt.kernMap.keys(): - raise RuntimeError('Invalid kernel type: %s.' % str(kernType)) + raise RuntimeError("Invalid kernel type: %s." % str(kernType)) self.flt.kernType = kernType @@ -65,11 +65,11 @@ def initConfig(self): self.width = 10 self.kernMap = odict() - self.kernMap['Boxcar'] = sig.windows.boxcar - self.kernMap['Gaussian'] = lambda w: sig.windows.gaussian(w, std=0.12*w) # configurable std XXX - idfah - self.kernMap['Triangular'] = sig.windows.triang + self.kernMap["Boxcar"] = sig.windows.boxcar + self.kernMap["Gaussian"] = lambda w: sig.windows.gaussian(w, std=0.12*w) # configurable std XXX - idfah + self.kernMap["Triangular"] = sig.windows.triang - self.kernType = 'Boxcar' + self.kernType = "Boxcar" def apply(self, cap): return cap.ma(width=self.width, kernelFunc=self.kernMap[self.kernType]) diff --git a/cebl/rt/filters/msf.py b/cebl/rt/filters/msf.py index ccff8d7..4f103b4 100644 --- a/cebl/rt/filters/msf.py +++ b/cebl/rt/filters/msf.py @@ -2,7 +2,7 @@ from .strans import STrans, STransConfigPanel -MaxSignalFractionName = 'Max Signal Fraction' +MaxSignalFractionName = "Max Signal Fraction" class MaxSignalFraction(STrans): diff --git a/cebl/rt/filters/pca.py b/cebl/rt/filters/pca.py index 44fa278..3b9d28a 100644 --- a/cebl/rt/filters/pca.py +++ b/cebl/rt/filters/pca.py @@ -2,7 +2,7 @@ from .strans import STrans, STransConfigPanel -PrincipalComponentsName = 'Principal Components' +PrincipalComponentsName = "Principal Components" class PrincipalComponents(STrans): diff --git a/cebl/rt/filters/reference.py b/cebl/rt/filters/reference.py index c77ccdb..b62011a 100644 --- a/cebl/rt/filters/reference.py +++ b/cebl/rt/filters/reference.py @@ -5,14 +5,14 @@ from .filt import Filter, FilterConfigPanel -ReferenceName = 'Reference' +ReferenceName = "Reference" class ReferenceConfigPanel(FilterConfigPanel): def __init__(self, *args, **kwargs): FilterConfigPanel.__init__(self, *args, **kwargs) - self.hello = wx.StaticText(self, label='Hello World!') + self.hello = wx.StaticText(self, label="Hello World!") self.sizer.Add(self.hello, proportion=1, flag=wx.EXPAND, border=10) self.initLayout() diff --git a/cebl/rt/filters/strans.py b/cebl/rt/filters/strans.py index 8ed1228..9af899c 100644 --- a/cebl/rt/filters/strans.py +++ b/cebl/rt/filters/strans.py @@ -77,24 +77,24 @@ def updateFiltTrain(self, event=None): def initOptions(self): optionsSizer = wx.BoxSizer(wx.HORIZONTAL) - collectControlBox = widgets.ControlBox(self, label='Collect Data', orient=wx.HORIZONTAL) - self.collectButton = wx.Button(self, label='Start') + collectControlBox = widgets.ControlBox(self, label="Collect Data", orient=wx.HORIZONTAL) + self.collectButton = wx.Button(self, label="Start") self.Bind(wx.EVT_BUTTON, self.toggleCollect, self.collectButton) collectControlBox.Add(self.collectButton, proportion=1, flag=wx.ALL, border=8) optionsSizer.Add(collectControlBox, proportion=0, flag=wx.ALL | wx.EXPAND, border=8) - trainControlBox = widgets.ControlBox(self, label='Retrain', orient=wx.HORIZONTAL) - self.trainButton = wx.Button(self, label='Update') + trainControlBox = widgets.ControlBox(self, label="Retrain", orient=wx.HORIZONTAL) + self.trainButton = wx.Button(self, label="Update") self.Bind(wx.EVT_BUTTON, self.updateFilt, self.trainButton) trainControlBox.Add(self.trainButton, proportion=1, flag=wx.ALL, border=8) optionsSizer.Add(trainControlBox, proportion=0, flag=wx.TOP | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=8) - viewControlBox = widgets.ControlBox(self, label='View', orient=wx.HORIZONTAL) - self.rawViewRbtn = wx.RadioButton(self, label='Raw', style=wx.RB_GROUP) + viewControlBox = widgets.ControlBox(self, label="View", orient=wx.HORIZONTAL) + self.rawViewRbtn = wx.RadioButton(self, label="Raw", style=wx.RB_GROUP) self.rawViewRbtn.SetValue(True) self.Bind(wx.EVT_RADIOBUTTON, self.setRawView, self.rawViewRbtn) - self.filteredViewRbtn = wx.RadioButton(self, label='Filtered') + self.filteredViewRbtn = wx.RadioButton(self, label="Filtered") self.Bind(wx.EVT_RADIOBUTTON, self.setFilteredView, self.filteredViewRbtn) viewControlBox.Add(self.rawViewRbtn, proportion=1, @@ -110,25 +110,25 @@ def initOptions(self): self.rawViewRbtn.Disable() self.filteredViewRbtn.Disable() - compControlBox = widgets.ControlBox(self, label='Components', orient=wx.HORIZONTAL) + compControlBox = widgets.ControlBox(self, label="Components", orient=wx.HORIZONTAL) self.compTextCtrl = wx.TextCtrl(parent=self, style=wx.TE_PROCESS_ENTER) self.Bind(wx.EVT_TEXT_ENTER, self.setComp, self.compTextCtrl) self.compTextCtrl.Bind(wx.EVT_KILL_FOCUS, self.setComp, self.compTextCtrl) compControlBox.Add(self.compTextCtrl, proportion=1, flag=wx.ALL | wx.EXPAND, border=10) - self.removeCheckBox = wx.CheckBox(self, label='Remove') + self.removeCheckBox = wx.CheckBox(self, label="Remove") self.removeCheckBox.SetValue(self.flt.remove) self.Bind(wx.EVT_CHECKBOX, self.setRemove, self.removeCheckBox) compControlBox.Add(self.removeCheckBox, proportion=1, flag=wx.TOP | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=8) - self.transformCheckBox = wx.CheckBox(self, label='Transform') + self.transformCheckBox = wx.CheckBox(self, label="Transform") self.transformCheckBox.SetValue(self.flt.transform) self.Bind(wx.EVT_CHECKBOX, self.setTransform, self.transformCheckBox) compControlBox.Add(self.transformCheckBox, proportion=1, flag=wx.TOP | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=8) - lagsControlBox = widgets.ControlBox(self, label='Lags', orient=wx.HORIZONTAL) + lagsControlBox = widgets.ControlBox(self, label="Lags", orient=wx.HORIZONTAL) self.lagsSpinCtrl = wx.SpinCtrl(self, value=str(self.flt.lags), min=0, max=20) self.Bind(wx.EVT_SPINCTRL, self.setLags, self.lagsSpinCtrl) lagsControlBox.Add(self.lagsSpinCtrl, proportion=1, @@ -156,7 +156,7 @@ def toggleCollect(self, event): self.updateFilt() # set button label to start - self.collectButton.SetLabel('Start') + self.collectButton.SetLabel("Start") # enable view buttons self.trainButton.Enable() @@ -169,7 +169,7 @@ def toggleCollect(self, event): else: # set button label to stop - self.collectButton.SetLabel('Stop') + self.collectButton.SetLabel("Stop") # start collecting self.pg.start() @@ -195,11 +195,11 @@ def setComp(self, event): if len(compStr) != 0: try: - toks = compStr.replace(' ', '').split(',') + toks = compStr.replace(" ", "").split(",") for tok in toks: - if '-' in tok: - low, high = (int(c) for c in tok.split('-')) + if "-" in tok: + low, high = (int(c) for c in tok.split("-")) comp = range(low, high+1) else: comp = [int(tok,)] @@ -208,7 +208,7 @@ def setComp(self, event): except Exception as e: self.flt.comp = [] - wx.LogWarning('Invalid component config: %s.' % str(compStr)) + wx.LogWarning("Invalid component config: %s." % str(compStr)) self.flt.updateFilteredTrain() self.updateTrace() @@ -285,6 +285,6 @@ def apply(self, cap): filteredCap = eeg.EEG(filteredData, sampRate=self.getOutSampRate(), chanNames=self.getOutChans(), markers=markers, - deviceName='Max Signal Fraction') + deviceName="Max Signal Fraction") return filteredCap diff --git a/cebl/rt/filters/test.py b/cebl/rt/filters/test.py index d50305b..cc37a8f 100644 --- a/cebl/rt/filters/test.py +++ b/cebl/rt/filters/test.py @@ -5,14 +5,14 @@ from .filt import Filter, FilterConfigPanel -TestName = 'Test' +TestName = "Test" class TestConfigPanel(FilterConfigPanel): def __init__(self, *args, **kwargs): FilterConfigPanel.__init__(self, *args, **kwargs) - self.hello = wx.StaticText(self, label='Test Filter.') + self.hello = wx.StaticText(self, label="Test Filter.") self.sizer.Add(self.hello, proportion=1, flag=wx.EXPAND, border=10) self.initLayout() @@ -23,5 +23,5 @@ def __init__(self, *args, **kwargs): configPanelClass=TestConfigPanel, **kwargs) def apply(self, cap): - cap.keepChans(('C1', 'C2')) + cap.keepChans(("C1", "C2")) return cap diff --git a/cebl/rt/filters/wiener.py b/cebl/rt/filters/wiener.py index d0d0685..1de80b3 100644 --- a/cebl/rt/filters/wiener.py +++ b/cebl/rt/filters/wiener.py @@ -8,7 +8,7 @@ from .filt import Filter, FilterConfigPanel -WienerName = 'Wiener' +WienerName = "Wiener" class WienerConfigPanel(FilterConfigPanel): @@ -24,7 +24,7 @@ def initOptions(self): # sizes should only be odd, easiest way is to implement this as slider with odd values XXX - idfah - sizeControlBox = widgets.ControlBox(self, label='Size', orient=wx.HORIZONTAL) + sizeControlBox = widgets.ControlBox(self, label="Size", orient=wx.HORIZONTAL) self.sizeSpinCtrl = wx.SpinCtrl(self, value=str(self.flt.size), min=3, max=100) self.Bind(wx.EVT_SPINCTRL, self.setSize, self.sizeSpinCtrl) sizeControlBox.Add(self.sizeSpinCtrl, proportion=1, flag=wx.ALL, border=8) diff --git a/cebl/rt/logging.py b/cebl/rt/logging.py index 664cf99..c943056 100644 --- a/cebl/rt/logging.py +++ b/cebl/rt/logging.py @@ -9,13 +9,13 @@ def __init__(self, *args, **kwargs): def DoLogTextAtLevel(self, level, msg): if level == wx.LOG_Warning: - caption = 'Warning' + caption = "Warning" elif level == wx.LOG_Error: - caption = 'Error' + caption = "Error" else: - caption = 'Message' + caption = "Message" - fullMessage = caption + ': ' + msg + '\n' + fullMessage = caption + ": " + msg + "\n" if level == wx.LOG_Error: sys.stderr.write(fullMessage) sys.stderr.flush() diff --git a/cebl/rt/main.py b/cebl/rt/main.py index f4b9644..65e7e9b 100644 --- a/cebl/rt/main.py +++ b/cebl/rt/main.py @@ -16,7 +16,7 @@ class CEBLApp(wx.App): def OnInit(self): """Create a new CEBLMain frame. """ - self.SetAppName('CEBL') + self.SetAppName("CEBL") self.main = CEBLMain() return True @@ -24,7 +24,7 @@ def OnExit(self): """Send gracefull exit notice. This should go in final release XXX - idfah """ - print('Gracefull exit.') + print("Gracefull exit.") return True class CEBLMain(wx.Frame): @@ -42,7 +42,7 @@ def __init__(self): displaySize = 1.2*displaySize[1], 0.75*displaySize[1] # call base class constructor - wx.Frame.__init__(self, parent=None, title='CEBL3', size=displaySize) + wx.Frame.__init__(self, parent=None, title="CEBL3", size=displaySize) # initialize the main notebook self.initNotebook() @@ -124,15 +124,15 @@ def updateStatusBar(self, event=None): curRollCount, curBuffFill = src.getBufferStats() sampRate = src.getEffectiveSampRate() - self.statusBar.SetStatusText('Source: %s' % str(src), 0) - self.statusBar.SetStatusText('Running Pages: %d' % self.mgr.getNRunningPages(), 1) - self.statusBar.SetStatusText('Buffer: %d/%d%%' % (curRollCount, int(curBuffFill*100)), 2) - self.statusBar.SetStatusText('Sampling Rate: %.2fHz' % sampRate, 3) - self.statusBar.SetStatusText('Version: 3.0.0a', 4) + self.statusBar.SetStatusText("Source: %s" % str(src), 0) + self.statusBar.SetStatusText("Running Pages: %d" % self.mgr.getNRunningPages(), 1) + self.statusBar.SetStatusText("Buffer: %d/%d%%" % (curRollCount, int(curBuffFill*100)), 2) + self.statusBar.SetStatusText("Sampling Rate: %.2fHz" % sampRate, 3) + self.statusBar.SetStatusText("Version: 3.0.0a", 4) class Splash(wx.adv.SplashScreen): def __init__(self, parent): - logo = wx.Image(os.path.dirname(__file__) + '/images/CEBL3_splash.png', + logo = wx.Image(os.path.dirname(__file__) + "/images/CEBL3_splash.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap() wx.adv.SplashScreen.__init__(self, @@ -143,5 +143,5 @@ def run(): bci = CEBLApp() bci.MainLoop() -if __name__ == '__main__': +if __name__ == "__main__": run() diff --git a/cebl/rt/manager.py b/cebl/rt/manager.py index 0a4581d..6db01b5 100644 --- a/cebl/rt/manager.py +++ b/cebl/rt/manager.py @@ -3,7 +3,7 @@ import wx #import warnings -#warnings.simplefilter('always') +#warnings.simplefilter("always") from . import events from . import logging @@ -12,7 +12,7 @@ def logExceptionHook(etype, e, trace): - wx.LogError(''.join(traceback.format_exception(etype, e, trace)) + 'Uncaught.\n') + wx.LogError("".join(traceback.format_exception(etype, e, trace)) + "Uncaught.\n") class Manager: def __init__(self, pageParent, statusPanel=None, sourceList=None,