text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset(self): """ Reset the state of the temporal pooler """
self._poolingActivation = numpy.zeros((self._numColumns), dtype="int32") self._poolingColumns = [] self._overlapDutyCycles = numpy.zeros(self._numColumns, dtype=realDType) self._activeDutyCycles = numpy.zeros(self._numColumns, dtype=realDType) self._minOverlapDutyCycles = numpy.zeros(self._numColumns, dtype=realDType) self._minActiveDutyCycles = numpy.zeros(self._numColumns, dtype=realDType) self._boostFactors = numpy.ones(self._numColumns, dtype=realDType)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute(self, inputVector, learn, activeArray, burstingColumns, predictedCells): """ This is the primary public method of the class. This function takes an input vector and outputs the indices of the active columns. New parameters defined here: @param inputVector: The active cells from a Temporal Memory @param learn: A Boolean specifying whether learning will be performed @param activeArray: An array representing the active columns produced by this method @param burstingColumns: A numpy array with numColumns elements having binary values with 1 representing a currently bursting column in Temporal Memory. @param predictedCells: A numpy array with numInputs elements. A 1 indicates that this cell switching from predicted state in the previous time step to active state in the current timestep """
assert (numpy.size(inputVector) == self._numInputs) assert (numpy.size(predictedCells) == self._numInputs) self._updateBookeepingVars(learn) inputVector = numpy.array(inputVector, dtype=realDType) predictedCells = numpy.array(predictedCells, dtype=realDType) inputVector.reshape(-1) if self._spVerbosity > 3: print " Input bits: ", inputVector.nonzero()[0] print " predictedCells: ", predictedCells.nonzero()[0] # Phase 1: Calculate overlap scores # The overlap score has 4 components: # (1) Overlap between correctly predicted input cells and pooling TP cells # (2) Overlap between active input cells and all TP cells # (like standard SP calculation) # (3) Overlap between correctly predicted input cells and all TP cells # (4) Overlap from bursting columns in TM and all TP cells # 1) Calculate pooling overlap if self.usePoolingRule: overlapsPooling = self._calculatePoolingActivity(predictedCells, learn) if self._spVerbosity > 4: print "usePoolingRule: Overlaps after step 1:" print " ", overlapsPooling else: overlapsPooling = 0 # 2) Calculate overlap between active input cells and connected synapses overlapsAllInput = self._calculateOverlap(inputVector) # 3) overlap with predicted inputs # NEW: Isn't this redundant with 1 and 2)? This looks at connected synapses # only. # If 1) is called with learning=False connected synapses are used and # it is somewhat redundant although there is a boosting factor in 1) which # makes 1's effect stronger. If 1) is called with learning=True it's less # redundant overlapsPredicted = self._calculateOverlap(predictedCells) if self._spVerbosity > 4: print "Overlaps with all inputs:" print " Number of On Bits: ", inputVector.sum() print " ", overlapsAllInput print "Overlaps with predicted inputs:" print " ", overlapsPredicted # 4) consider bursting columns if self.useBurstingRule: overlapsBursting = self._calculateBurstingColumns(burstingColumns) if self._spVerbosity > 4: print "Overlaps with bursting inputs:" print " ", overlapsBursting else: overlapsBursting = 0 overlaps = (overlapsPooling + overlapsPredicted + overlapsAllInput + overlapsBursting) # Apply boosting when learning is on if learn: boostedOverlaps = self._boostFactors * overlaps if self._spVerbosity > 4: print "Overlaps after boosting:" print " ", boostedOverlaps else: boostedOverlaps = overlaps # Apply inhibition to determine the winning columns activeColumns = self._inhibitColumns(boostedOverlaps) if learn: self._adaptSynapses(inputVector, activeColumns, predictedCells) self._updateDutyCycles(overlaps, activeColumns) self._bumpUpWeakColumns() self._updateBoostFactors() if self._isUpdateRound(): self._updateInhibitionRadius() self._updateMinDutyCycles() activeArray.fill(0) if activeColumns.size > 0: activeArray[activeColumns] = 1 # update pooling state of cells activeColumnIndices = numpy.where(overlapsPredicted[activeColumns] > 0)[0] activeColWithPredictedInput = activeColumns[activeColumnIndices] numUnPredictedInput = float(len(burstingColumns.nonzero()[0])) numPredictedInput = float(len(predictedCells)) fracUnPredicted = numUnPredictedInput / (numUnPredictedInput + numPredictedInput) self._updatePoolingState(activeColWithPredictedInput, fracUnPredicted) if self._spVerbosity > 2: activeColumns.sort() print "The following columns are finally active:" print " ", activeColumns print "The following columns are in pooling state:" print " ", self._poolingActivation.nonzero()[0] # print "Inputs to pooling columns" # print " ",overlapsPredicted[self._poolingColumns] return activeColumns
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def printParameters(self): """ Useful for debugging. """
print "------------PY TemporalPooler Parameters ------------------" print "numInputs = ", self.getNumInputs() print "numColumns = ", self.getNumColumns() print "columnDimensions = ", self._columnDimensions print "numActiveColumnsPerInhArea = ", self.getNumActiveColumnsPerInhArea() print "potentialPct = ", self.getPotentialPct() print "globalInhibition = ", self.getGlobalInhibition() print "localAreaDensity = ", self.getLocalAreaDensity() print "stimulusThreshold = ", self.getStimulusThreshold() print "synPermActiveInc = ", self.getSynPermActiveInc() print "synPermInactiveDec = ", self.getSynPermInactiveDec() print "synPermConnected = ", self.getSynPermConnected() print "minPctOverlapDutyCycle = ", self.getMinPctOverlapDutyCycles() print "dutyCyclePeriod = ", self.getDutyCyclePeriod() print "boostStrength = ", self.getBoostStrength() print "spVerbosity = ", self.getSpVerbosity() print "version = ", self._version
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def train(self, inputData, numIterations, reset=False): """ Trains the SparseNet, with the provided data. The reset parameter can be set to False if the network should not be reset before training (for example for continuing a previous started training). :param inputData: (array) Input data, of dimension (inputDim, numPoints) :param numIterations: (int) Number of training iterations :param reset: (bool) If set to True, reset basis and history """
if not isinstance(inputData, np.ndarray): inputData = np.array(inputData) if reset: self._reset() for _ in xrange(numIterations): self._iteration += 1 batch = self._getDataBatch(inputData) # check input dimension, change if necessary if batch.shape[0] != self.filterDim: raise ValueError("Batches and filter dimesions don't match!") activations = self.encode(batch) self._learn(batch, activations) if self._iteration % self.decayCycle == 0: self.learningRate *= self.learningRateDecay if self.verbosity >= 1: self.plotLoss() self.plotBasis()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encode(self, data, flatten=False): """ Encodes the provided input data, returning a sparse vector of activations. It solves a dynamic system to find optimal activations, as proposed by Rozell et al. (2008). :param data: (array) Data to be encoded (single point or multiple) :param flatten (bool) Whether or not the data needs to be flattened, in the case of images for example. Does not need to be enabled during training. :return: (array) Array of sparse activations (dimOutput, numPoints) """
if not isinstance(data, np.ndarray): data = np.array(data) # flatten if necessary if flatten: try: data = np.reshape(data, (self.filterDim, data.shape[-1])) except ValueError: # only one data point data = np.reshape(data, (self.filterDim, 1)) if data.shape[0] != self.filterDim: raise ValueError("Data does not have the correct dimension!") # if single data point, convert to 2-dimensional array for consistency if len(data.shape) == 1: data = data[:, np.newaxis] projection = self.basis.T.dot(data) representation = self.basis.T.dot(self.basis) - np.eye(self.outputDim) states = np.zeros((self.outputDim, data.shape[1])) threshold = 0.5 * np.max(np.abs(projection), axis=0) activations = self._thresholdNonLinearity(states, threshold) for _ in xrange(self.numLcaIterations): # update dynamic system states *= (1 - self.lcaLearningRate) states += self.lcaLearningRate * (projection - representation.dot(activations)) activations = self._thresholdNonLinearity(states, threshold) # decay threshold threshold *= self.thresholdDecay threshold[threshold < self.minThreshold] = self.minThreshold return activations
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plotLoss(self, filename=None): """ Plots the loss history. :param filename (string) Can be provided to save the figure """
plt.figure() plt.plot(self.losses.keys(), self.losses.values()) plt.xlabel("Iteration") plt.ylabel("Loss") plt.title("Learning curve for {}".format(self)) if filename is not None: plt.savefig(filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plotBasis(self, filename=None): """ Plots the basis functions, reshaped in 2-dimensional arrays. This representation makes the most sense for visual input. :param: filename (string) Can be provided to save the figure """
if np.floor(np.sqrt(self.filterDim)) ** 2 != self.filterDim: print "Basis visualization is not available if filterDim is not a square." return dim = int(np.sqrt(self.filterDim)) if np.floor(np.sqrt(self.outputDim)) ** 2 != self.outputDim: outDimJ = np.sqrt(np.floor(self.outputDim / 2)) outDimI = np.floor(self.outputDim / outDimJ) if outDimI > outDimJ: outDimI, outDimJ = outDimJ, outDimI else: outDimI = np.floor(np.sqrt(self.outputDim)) outDimJ = outDimI outDimI, outDimJ = int(outDimI), int(outDimJ) basis = - np.ones((1 + outDimI * (dim + 1), 1 + outDimJ * (dim + 1))) # populate array with basis values k = 0 for i in xrange(outDimI): for j in xrange(outDimJ): colorLimit = np.max(np.abs(self.basis[:, k])) mat = np.reshape(self.basis[:, k], (dim, dim)) / colorLimit basis[1 + i * (dim + 1) : 1 + i * (dim + 1) + dim, \ 1 + j * (dim + 1) : 1 + j * (dim + 1) + dim] = mat k += 1 plt.figure() plt.subplot(aspect="equal") plt.pcolormesh(basis) plt.axis([0, 1 + outDimJ * (dim + 1), 0, 1 + outDimI * (dim + 1)]) # remove ticks plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) plt.title("Basis functions for {0}".format(self)) if filename is not None: plt.savefig(filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _reset(self): """ Reinitializes basis functions, iteration number and loss history. """
self.basis = np.random.randn(self.filterDim, self.outputDim) self.basis /= np.sqrt(np.sum(self.basis ** 2, axis=0)) self._iteration = 0 self.losses = {}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(cls, proto): """ Reads deserialized data from proto object :param proto: (DynamicStructBuilder) Proto object :return (SparseNet) SparseNet instance """
sparsenet = object.__new__(cls) sparsenet.filterDim = proto.filterDim sparsenet.outputDim = proto.outputDim sparsenet.batchSize = proto.batchSize lossHistoryProto = proto.losses sparsenet.losses = {} for i in xrange(len(lossHistoryProto)): sparsenet.losses[lossHistoryProto[i].iteration] = lossHistoryProto[i].loss sparsenet._iteration = proto.iteration sparsenet.basis = np.reshape(proto.basis, newshape=(sparsenet.filterDim, sparsenet.outputDim)) # training parameters sparsenet.learningRate = proto.learningRate sparsenet.decayCycle = proto.decayCycle sparsenet.learningRateDecay = proto.learningRateDecay # LCA parameters sparsenet.numLcaIterations = proto.numLcaIterations sparsenet.lcaLearningRate = proto.lcaLearningRate sparsenet.thresholdDecay = proto.thresholdDecay sparsenet.minThreshold = proto.minThreshold sparsenet.thresholdType = proto.thresholdType # debugging sparsenet.verbosity = proto.verbosity sparsenet.showEvery = proto.showEvery sparsenet.seed = int(proto.seed) if sparsenet.seed is not None: np.random.seed(sparsenet.seed) random.seed(sparsenet.seed) return sparsenet
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self, proto): """ Writes serialized data to proto object :param proto: (DynamicStructBuilder) Proto object """
proto.filterDim = self.filterDim proto.outputDim = self.outputDim proto.batchSize = self.batchSize lossHistoryProto = proto.init("losses", len(self.losses)) i = 0 for iteration, loss in self.losses.iteritems(): iterationLossHistoryProto = lossHistoryProto[i] iterationLossHistoryProto.iteration = iteration iterationLossHistoryProto.loss = float(loss) i += 1 proto.iteration = self._iteration proto.basis = list( self.basis.flatten().astype(type('float', (float,), {})) ) # training parameters proto.learningRate = self.learningRate proto.decayCycle = self.decayCycle proto.learningRateDecay = self.learningRateDecay # LCA parameters proto.numLcaIterations = self.numLcaIterations proto.lcaLearningRate = self.lcaLearningRate proto.thresholdDecay = self.thresholdDecay proto.minThreshold = self.minThreshold proto.thresholdType = self.thresholdType # debugging proto.verbosity = self.verbosity proto.showEvery = self.showEvery proto.seed = self.seed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset(self): """ Reset the state of the Union Temporal Pooler. """
# Reset Union Temporal Pooler fields self._poolingActivation = numpy.zeros(self.getNumColumns(), dtype=REAL_DTYPE) self._unionSDR = numpy.array([], dtype=UINT_DTYPE) self._poolingTimer = numpy.ones(self.getNumColumns(), dtype=REAL_DTYPE) * 1000 self._poolingActivationInitLevel = numpy.zeros(self.getNumColumns(), dtype=REAL_DTYPE) self._preActiveInput = numpy.zeros(self.getNumInputs(), dtype=REAL_DTYPE) self._prePredictedActiveInput = numpy.zeros((self.getNumInputs(), self._historyLength), dtype=REAL_DTYPE) # Reset Spatial Pooler fields self.setOverlapDutyCycles(numpy.zeros(self.getNumColumns(), dtype=REAL_DTYPE)) self.setActiveDutyCycles(numpy.zeros(self.getNumColumns(), dtype=REAL_DTYPE)) self.setMinOverlapDutyCycles(numpy.zeros(self.getNumColumns(), dtype=REAL_DTYPE)) self.setBoostFactors(numpy.ones(self.getNumColumns(), dtype=REAL_DTYPE))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute(self, activeInput, predictedActiveInput, learn): """ Computes one cycle of the Union Temporal Pooler algorithm. @param activeInput (numpy array) A numpy array of 0's and 1's that comprises the input to the union pooler @param predictedActiveInput (numpy array) A numpy array of 0's and 1's that comprises the correctly predicted input to the union pooler @param learn (boolen) A boolen value indicating whether learning should be performed """
assert numpy.size(activeInput) == self.getNumInputs() assert numpy.size(predictedActiveInput) == self.getNumInputs() self._updateBookeepingVars(learn) # Compute proximal dendrite overlaps with active and active-predicted inputs overlapsActive = self._calculateOverlap(activeInput) overlapsPredictedActive = self._calculateOverlap(predictedActiveInput) totalOverlap = (overlapsActive * self._activeOverlapWeight + overlapsPredictedActive * self._predictedActiveOverlapWeight).astype(REAL_DTYPE) if learn: boostFactors = numpy.zeros(self.getNumColumns(), dtype=REAL_DTYPE) self.getBoostFactors(boostFactors) boostedOverlaps = boostFactors * totalOverlap else: boostedOverlaps = totalOverlap activeCells = self._inhibitColumns(boostedOverlaps) self._activeCells = activeCells # Decrement pooling activation of all cells self._decayPoolingActivation() # Update the poolingActivation of current active Union Temporal Pooler cells self._addToPoolingActivation(activeCells, overlapsPredictedActive) # update union SDR self._getMostActiveCells() if learn: # adapt permanence of connections from predicted active inputs to newly active cell # This step is the spatial pooler learning rule, applied only to the predictedActiveInput # Todo: should we also include unpredicted active input in this step? self._adaptSynapses(predictedActiveInput, activeCells, self.getSynPermActiveInc(), self.getSynPermInactiveDec()) # Increase permanence of connections from predicted active inputs to cells in the union SDR # This is Hebbian learning applied to the current time step self._adaptSynapses(predictedActiveInput, self._unionSDR, self._synPermPredActiveInc, 0.0) # adapt permenence of connections from previously predicted inputs to newly active cells # This is a reinforcement learning rule that considers previous input to the current cell for i in xrange(self._historyLength): self._adaptSynapses(self._prePredictedActiveInput[:,i], activeCells, self._synPermPreviousPredActiveInc, 0.0) # Homeostasis learning inherited from the spatial pooler self._updateDutyCycles(totalOverlap.astype(UINT_DTYPE), activeCells) self._bumpUpWeakColumns() self._updateBoostFactors() if self._isUpdateRound(): self._updateInhibitionRadius() self._updateMinDutyCycles() # save inputs from the previous time step self._preActiveInput = copy.copy(activeInput) self._prePredictedActiveInput = numpy.roll(self._prePredictedActiveInput,1,1) if self._historyLength > 0: self._prePredictedActiveInput[:, 0] = predictedActiveInput return self._unionSDR
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _decayPoolingActivation(self): """ Decrements pooling activation of all cells """
if self._decayFunctionType == 'NoDecay': self._poolingActivation = self._decayFunction.decay(self._poolingActivation) elif self._decayFunctionType == 'Exponential': self._poolingActivation = self._decayFunction.decay(\ self._poolingActivationInitLevel, self._poolingTimer) return self._poolingActivation
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _addToPoolingActivation(self, activeCells, overlaps): """ Adds overlaps from specified active cells to cells' pooling activation. @param activeCells: Indices of those cells winning the inhibition step @param overlaps: A current set of overlap values for each cell @return current pooling activation """
self._poolingActivation[activeCells] = self._exciteFunction.excite( self._poolingActivation[activeCells], overlaps[activeCells]) # increase pooling timers for all cells self._poolingTimer[self._poolingTimer >= 0] += 1 # reset pooling timer for active cells self._poolingTimer[activeCells] = 0 self._poolingActivationInitLevel[activeCells] = self._poolingActivation[activeCells] return self._poolingActivation
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _getMostActiveCells(self): """ Gets the most active cells in the Union SDR having at least non-zero activation in sorted order. @return: a list of cell indices """
poolingActivation = self._poolingActivation nonZeroCells = numpy.argwhere(poolingActivation > 0)[:,0] # include a tie-breaker before sorting poolingActivationSubset = poolingActivation[nonZeroCells] + \ self._poolingActivation_tieBreaker[nonZeroCells] potentialUnionSDR = nonZeroCells[numpy.argsort(poolingActivationSubset)[::-1]] topCells = potentialUnionSDR[0: self._maxUnionCells] if max(self._poolingTimer) > self._minHistory: self._unionSDR = numpy.sort(topCells).astype(UINT_DTYPE) else: self._unionSDR = [] return self._unionSDR
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def createNetwork(networkConfig): """ Create and initialize the specified network instance. @param networkConfig: (dict) the configuration of this network. @return network: (Network) The actual network """
registerAllResearchRegions() network = Network() if networkConfig["networkType"] == "L4L2Column": return createL4L2Column(network, networkConfig, "_0") elif networkConfig["networkType"] == "MultipleL4L2Columns": return createMultipleL4L2Columns(network, networkConfig) elif networkConfig["networkType"] == "MultipleL4L2ColumnsWithTopology": return createMultipleL4L2ColumnsWithTopology(network, networkConfig) elif networkConfig["networkType"] == "L2456Columns": return createL2456Columns(network, networkConfig) elif networkConfig["networkType"] == "L4L2TMColumn": return createL4L2TMColumn(network, networkConfig, "_0") elif networkConfig["networkType"] == "CombinedSequenceColumn": return createCombinedSequenceColumn(network, networkConfig, "_0")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def printNetwork(network): """ Given a network, print out regions sorted by phase """
print "The network has",len(network.regions.values()),"regions" for p in range(network.getMaxPhase()): print "=== Phase",p for region in network.regions.values(): if network.getPhases(region.name)[0] == p: print " ",region.name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit_params_to_1d_data(logX): """ Fit skewed normal distributions to 1-D capactity data, and return the distribution parameters. Args ---- logX: Logarithm of one-dimensional capacity data, indexed by module and phase resolution index """
m_max = logX.shape[0] p_max = logX.shape[1] params = np.zeros((m_max, p_max, 3)) for m_ in range(m_max): for p_ in range(p_max): params[m_,p_] = skewnorm.fit(logX[m_,p_]) return params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_interpolated_params(m_frac, ph, params): """ Get parameters describing a 1-D capactity distribution for fractional number of modules. """
slope, offset = np.polyfit(np.arange(1,4), params[:3,ph,0], deg=1) a = slope*m_frac + offset slope, offset = np.polyfit(np.arange(1,4), params[:3,ph,1], deg=1) loc = slope*m_frac + offset slope, offset = np.polyfit(np.arange(1,4), params[:3,ph,2], deg=1) scale = slope*m_frac + offset return (a, loc, scale)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rerunExperimentFromLogfile(logFilename): """ Create an experiment class according to the sequence of operations in logFile and return resulting experiment instance. The log file is created by setting the 'logCalls' constructor parameter to True """
callLog = LoggingDecorator.load(logFilename) # Assume first one is call to constructor exp = L246aNetwork(*callLog[0][1]["args"], **callLog[0][1]["kwargs"]) # Call subsequent methods, using stored parameters for call in callLog[1:]: method = getattr(exp, call[0]) method(*call[1]["args"], **call[1]["kwargs"]) return exp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def learn(self, objects): """ Learns all provided objects :param objects: dict mapping object name to array of sensations, where each sensation is composed of location and feature SDR for each column. For example: Note: Each column must have the same number of sensations as the other columns. :type objects: dict[str, array] """
self.setLearning(True) for objectName, sensationList in objects.iteritems(): self.sendReset() print "Learning :", objectName prevLoc = [None] * self.numColumns numFeatures = len(sensationList[0]) displacement = [0] * self.dimensions for sensation in xrange(numFeatures): for col in xrange(self.numColumns): location = np.array(sensationList[col][sensation][0]) feature = sensationList[col][sensation][1] # Compute displacement from previous location if prevLoc[col] is not None: displacement = location - prevLoc[col] prevLoc[col] = location # learn each pattern multiple times for _ in xrange(self.repeat): # Sense feature at location self.motorInput[col].addDataToQueue(displacement) self.sensorInput[col].addDataToQueue(feature, False, 0) # Only move to the location on the first sensation. displacement = [0] * self.dimensions self.network.run(self.repeat * numFeatures) # update L2 representations for the object self.learnedObjects[objectName] = self.getL2Representations()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getL2Representations(self): """ Returns the active representation in L2. """
return [set(L2.getSelf()._pooler.getActiveCells()) for L2 in self.L2Regions]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def excite(self, currentActivation, inputs): """ Increases current activation by amount. @param currentActivation (numpy array) Current activation levels for each cell @param inputs (numpy array) inputs for each cell """
currentActivation += self._minValue + (self._maxValue - self._minValue) / ( 1 + numpy.exp(-self._steepness * (inputs - self._xMidpoint))) return currentActivation
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot(self): """ plot the activation function """
plt.ion() plt.show() x = numpy.linspace(0, 15, 100) y = numpy.zeros(x.shape) y = self.excite(y, x) plt.plot(x, y) plt.xlabel('Input') plt.ylabel('Persistence') plt.title('Sigmoid Activation Function')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def computeAccuracyEnding(predictions, truths, iterations, resets=None, randoms=None, num=None, sequenceCounter=None): """ Compute accuracy on the sequence ending """
accuracy = [] numIteration = [] numSequences = [] for i in xrange(len(predictions) - 1): if num is not None and i > num: continue if truths[i] is None: continue # identify the end of sequence if resets is not None or randoms is not None: if not (resets[i+1] or randoms[i+1]): continue correct = truths[i] is None or truths[i] in predictions[i] accuracy.append(correct) numSequences.append(sequenceCounter[i]) numIteration.append(iterations[i]) return (accuracy, numIteration, numSequences)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def createValidationDataSampler(dataset, ratio): """ Create `torch.utils.data.Sampler`s used to split the dataset into 2 ramdom sampled subsets. The first should used for training and the second for validation. :param dataset: A valid torch.utils.data.Dataset (i.e. torchvision.datasets.MNIST) :param ratio: The percentage of the dataset to be used for training. The remaining (1-ratio)% will be used for validation :return: tuple with 2 torch.utils.data.Sampler. (train, validate) """
indices = np.random.permutation(len(dataset)) training_count = int(len(indices) * ratio) train = torch.utils.data.SubsetRandomSampler(indices=indices[:training_count]) validate = torch.utils.data.SubsetRandomSampler(indices=indices[training_count:]) return (train, validate)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_nonzero_counter(network, stats): """ Register forward hooks to count the number of nonzero floating points values from all the tensors used by the given network during inference. :param network: The network to attach the counter :param stats: Dictionary holding the counter. """
if hasattr(network, "__counter_nonzero__"): raise ValueError("nonzero counter was already registered for this network") if not isinstance(stats, dict): raise ValueError("stats must be a dictionary") network.__counter_nonzero__ = stats handles = [] for name, module in network.named_modules(): handles.append(module.register_forward_hook(_nonzero_counter_hook)) if network != module: if hasattr(module, "__counter_nonzero__"): raise ValueError("nonzero counter was already registered for this module") child_data = dict() network.__counter_nonzero__[name] = child_data module.__counter_nonzero__ = child_data network.__counter_nonzero_handles__ = handles
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initialize(self, params, repetition): """ Initialize experiment parameters and default values from configuration file. Called at the beginning of each experiment and each repetition. """
super(TinyCIFARExperiment, self).initialize(params, repetition) self.network_type = params.get("network_type", "sparse")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def logger(self, iteration, ret): """Print out relevant information at each epoch"""
print("Learning rate: {:f}".format(self.lr_scheduler.get_lr()[0])) entropies = getEntropies(self.model) print("Entropy and max entropy: ", float(entropies[0]), entropies[1]) print("Training time for epoch=", self.epoch_train_time) for noise in self.noise_values: print("Noise= {:3.2f}, loss = {:5.4f}, Accuracy = {:5.3f}%".format( noise, ret[noise]["loss"], 100.0*ret[noise]["accuracy"])) print("Full epoch time =", self.epoch_time) if ret[0.0]["accuracy"] > 0.7: self.best_noise_score = max(ret[0.1]["accuracy"], self.best_noise_score) self.best_epoch = iteration
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plotAccuracyAndMCsDuringDecrementChange(results, title="", yaxis=""): """ Plot accuracy vs decrement value """
decrementRange = [] mcRange = [] for r in results: if r["basalPredictedSegmentDecrement"] not in decrementRange: decrementRange.append(r["basalPredictedSegmentDecrement"]) if r["inputSize"] not in mcRange: mcRange.append(r["inputSize"]) decrementRange.sort() mcRange.sort() print decrementRange print mcRange ######################################################################## # # Accumulate all the results per column in a convergence array. # # accuracy[o,f] = accuracy with o objects in training # and f unique features. accuracy = numpy.zeros((len(mcRange), len(decrementRange))) TMAccuracy = numpy.zeros((len(mcRange), len(decrementRange))) totals = numpy.zeros((len(mcRange), len(decrementRange))) for r in results: dec = r["basalPredictedSegmentDecrement"] nf = r["inputSize"] accuracy[mcRange.index(nf), decrementRange.index(dec)] += r["objectAccuracyL2"] TMAccuracy[mcRange.index(nf), decrementRange.index(dec)] += r["sequenceCorrectClassificationsTM"] totals[mcRange.index(nf), decrementRange.index(dec)] += 1 for i,f in enumerate(mcRange): print i, f, accuracy[i] / totals[i] print i, f, TMAccuracy[i] / totals[i] print i, f, totals[i] print
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gen4(dirName): """Plots 4A and 4B"""
# Generate images similar to those used in the first plot for the section # "Simulations with Pure Temporal Sequences" try: resultsFig4A = os.path.join(dirName, "pure_sequences_example.pkl") with open(resultsFig4A, "rb") as f: results = cPickle.load(f) for trialNum, stat in enumerate(results["statistics"]): plotOneInferenceRun( stat, itemType="a single sequence", fields=[ ("L4 PredictedActive", "Predicted active cells in sensorimotor layer"), ("TM NextPredicted", "Predicted cells in temporal sequence layer"), ("TM PredictedActive", "Predicted active cells in temporal sequence layer"), ], basename="pure_sequences", trialNumber=trialNum, plotDir=os.path.join(os.path.dirname(os.path.realpath(__file__)), "detailed_plots") ) print "Plots for Fig 4A generated in 'detailed_plots'" except Exception, e: print "\nCould not generate plots for Fig 4A: " traceback.print_exc() print # Generate the second plot for the section "Simulations with Pure # Temporal Sequences" try: plotAccuracyDuringSequenceInference( dirName, title="Relative performance of layers while inferring temporal sequences", yaxis="Accuracy (%)") print "Plots for Fig 4B generated in 'plots'" except Exception, e: print "\nCould not generate plots for Fig 4B: " traceback.print_exc() print # Generate the accuracy vs number of sequences try: plotAccuracyVsSequencesDuringSequenceInference( dirName, title="Relative performance of layers while inferring temporal sequences", yaxis="Accuracy (%)") print "Plots for Fig 4C generated in 'plots'" except Exception, e: print "\nCould not generate plots for Fig 4C: " traceback.print_exc() print
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute(self, inputs, outputs): """ Get the next record from the queue and encode it. The fields for inputs and outputs are as defined in the spec above. """
if len(self.queue) > 0: # Take the top element of the data queue data = self.queue.pop() else: raise Exception("RawSensor: No data to encode: queue is empty ") # Copy data into output vectors outputs["resetOut"][0] = data["reset"] outputs["sequenceIdOut"][0] = data["sequenceId"] outputs["dataOut"][:] = 0 outputs["dataOut"][data["nonZeros"]] = 1 if self.verbosity > 1: print "RawSensor outputs:" print "sequenceIdOut: ", outputs["sequenceIdOut"] print "resetOut: ", outputs["resetOut"] print "dataOut: ", outputs["dataOut"].nonzero()[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convertSequenceMachineSequence(generatedSequences): """ Convert a sequence from the SequenceMachine into a list of sequences, such that each sequence is a list of set of SDRs. """
sequenceList = [] currentSequence = [] for s in generatedSequences: if s is None: sequenceList.append(currentSequence) currentSequence = [] else: currentSequence.append(s) return sequenceList
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generateSequences(n=2048, w=40, sequenceLength=5, sequenceCount=2, sharedRange=None, seed=42): """ Generate high order sequences using SequenceMachine """
# Lots of room for noise sdrs patternAlphabetSize = 10*(sequenceLength * sequenceCount) patternMachine = PatternMachine(n, w, patternAlphabetSize, seed) sequenceMachine = SequenceMachine(patternMachine, seed) numbers = sequenceMachine.generateNumbers(sequenceCount, sequenceLength, sharedRange=sharedRange ) generatedSequences = sequenceMachine.generateFromNumbers(numbers) return sequenceMachine, generatedSequences, numbers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def runInference(exp, sequences, enableFeedback=True): """ Run inference on this set of sequences and compute error """
if enableFeedback: print "Feedback enabled: " else: print "Feedback disabled: " error = 0 activityTraces = [] responses = [] for i,sequence in enumerate(sequences): (avgActiveCells, avgPredictedActiveCells, activityTrace, responsesThisSeq) = exp.infer( sequence, sequenceNumber=i, enableFeedback=enableFeedback) error += avgActiveCells activityTraces.append(activityTrace) responses.append(responsesThisSeq) print " " error /= len(sequences) print "Average error = ",error return error, activityTraces, responses
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def runStretch(noiseLevel=None, profile=False): """ Stretch test that learns a lot of objects. Parameters: @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """
exp = L4L2Experiment( "stretch_L10_F10_C2", numCorticalColumns=2, ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.createRandomObjects(10, 10, numLocations=10, numFeatures=10) print "Objects are:" for object, pairs in objects.objects.iteritems(): print str(object) + ": " + str(pairs) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile(reset=True) # For inference, we will check and plot convergence for object 0. We create a # sequence of random sensations for each column. We will present each # sensation for 4 time steps to let it settle and ensure it converges. objectCopy1 = [pair for pair in objects[0]] objectCopy2 = [pair for pair in objects[0]] objectCopy3 = [pair for pair in objects[0]] random.shuffle(objectCopy1) random.shuffle(objectCopy2) random.shuffle(objectCopy3) # stay multiple steps on each sensation objectSensations1 = [] for pair in objectCopy1: for _ in xrange(4): objectSensations1.append(pair) # stay multiple steps on each sensation objectSensations2 = [] for pair in objectCopy2: for _ in xrange(4): objectSensations2.append(pair) # stay multiple steps on each sensation objectSensations3 = [] for pair in objectCopy3: for _ in xrange(4): objectSensations3.append(pair) inferConfig = { "numSteps": len(objectSensations1), "noiseLevel": noiseLevel, "pairs": { 0: objectSensations1, 1: objectSensations2, # 2: objectSensations3, # Uncomment for 3 columns } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], onePlot=False, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def trainModel(model, loader, optimizer, device, criterion=F.nll_loss, batches_in_epoch=sys.maxsize, batch_callback=None, progress_bar=None): """ Train the given model by iterating through mini batches. An epoch ends after one pass through the training set, or if the number of mini batches exceeds the parameter "batches_in_epoch". :param model: pytorch model to be trained :type model: torch.nn.Module :param loader: train dataset loader :type loader: :class:`torch.utils.data.DataLoader` :param optimizer: Optimizer object used to train the model. This function will train the model on every batch using this optimizer and the :func:`torch.nn.functional.nll_loss` function :param batches_in_epoch: Max number of mini batches to train. :param device: device to use ('cpu' or 'cuda') :type device: :class:`torch.device :param criterion: loss function to use :type criterion: function :param batch_callback: Callback function to be called on every batch with the following parameters: model, batch_idx :type batch_callback: function :param progress_bar: Optional :class:`tqdm` progress bar args. None for no progress bar :type progress_bar: dict or None """
model.train() if progress_bar is not None: loader = tqdm(loader, **progress_bar) # update progress bar total based on batches_in_epoch if batches_in_epoch < len(loader): loader.total = batches_in_epoch for batch_idx, (data, target) in enumerate(loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() if batch_callback is not None: batch_callback(model=model, batch_idx=batch_idx) if batch_idx >= batches_in_epoch: break if progress_bar is not None: loader.n = loader.total loader.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluateModel(model, loader, device, batches_in_epoch=sys.maxsize, criterion=F.nll_loss, progress=None): """ Evaluate pre-trained model using given test dataset loader. :param model: Pretrained pytorch model :type model: torch.nn.Module :param loader: test dataset loader :type loader: :class:`torch.utils.data.DataLoader` :param device: device to use ('cpu' or 'cuda') :type device: :class:`torch.device :param batches_in_epoch: Max number of mini batches to test on. :param criterion: loss function to use :type criterion: function :param progress: Optional :class:`tqdm` progress bar args. None for no progress bar :type progress: dict or None :return: dictionary with computed "accuracy", "loss", "total_correct". The loss value is computed using :func:`torch.nn.functional.nll_loss` :rtype: dict """
model.eval() loss = 0 correct = 0 dataset_len = len(loader.sampler) if progress is not None: loader = tqdm(loader, **progress) with torch.no_grad(): for batch_idx, (data, target) in enumerate(loader): data, target = data.to(device), target.to(device) output = model(data) loss += criterion(output, target, reduction='sum').item() pred = output.max(1, keepdim=True)[1] correct += pred.eq(target.view_as(pred)).sum().item() if batch_idx >= batches_in_epoch: break if progress is not None: loader.close() loss /= dataset_len accuracy = correct / dataset_len return {"total_correct": correct, "loss": loss, "accuracy": accuracy}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_false_positive_experiment_dim( numActive = 128, dim = 500, numSamples = 1000, numDendrites = 500, synapses = 24, numTrials = 10000, seed = 42, nonlinearity = sigmoid_nonlinearity(11.5, 5)): """ Run an experiment to test the false positive rate based on number of synapses per dendrite, dimension and sparsity. Uses two competing neurons, along the P&M model. Based on figure 5B in the original SDR paper. """
numpy.random.seed(seed) fps = [] fns = [] totalUnclassified = 0 for trial in range(numTrials): # data = generate_evenly_distributed_data_sparse(dim = dim, # num_active = numActive, # num_samples = numSamples) # labels = numpy.asarray([1 for i in range(numSamples / 2)] + # [-1 for i in range(numSamples / 2)]) # flipped_labels = labels * -1 negData = generate_evenly_distributed_data_sparse(dim = dim, num_active = numActive, num_samples = numSamples/2) posData = generate_evenly_distributed_data_sparse(dim = dim, num_active = numActive, num_samples = numSamples/2) halfLabels = numpy.asarray([1 for _ in range(numSamples / 2)]) flippedHalfLabels = halfLabels * -1 neuron = Neuron(size =synapses * numDendrites, num_dendrites = numDendrites, dendrite_length = synapses, dim = dim, nonlinearity = nonlinearity) neg_neuron = Neuron(size =synapses * numDendrites, num_dendrites = numDendrites, dendrite_length = synapses, dim = dim, nonlinearity = nonlinearity) neuron.HTM_style_initialize_on_positive_data(posData) neg_neuron.HTM_style_initialize_on_positive_data(negData) # Get error for positively labeled data fp, fn, uc = get_error(posData, halfLabels, [neuron], [neg_neuron]) totalUnclassified += uc fps.append(fp) fns.append(fn) # Get error for negatively labeled data fp, fn, uc = get_error(negData, flippedHalfLabels, [neuron], [neg_neuron]) totalUnclassified += uc fps.append(fp) fns.append(fn) print "Error with n = {} : {} FP, {} FN, {} unclassified".format( dim, sum(fps), sum(fns), totalUnclassified) result = { "dim": dim, "totalFP": sum(fps), "totalFN": sum(fns), "total mistakes": sum(fns + fps) + totalUnclassified, "error": float(sum(fns + fps) + totalUnclassified) / (numTrials * numSamples), "totalSamples": numTrials * numSamples, "a": numActive, "num_dendrites": numDendrites, "totalUnclassified": totalUnclassified, "synapses": 24, "seed": seed, } return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _getRadius(self, location): """ Returns the radius associated with the given location. This is a bit of an awkward argument to the CoordinateEncoder, which specifies the resolution (in was used to encode differently depending on speed in the GPS encoder). Since the coordinates are object-centric, for now we use the "point radius" as an heuristic, but this should be experimented and improved. """
# TODO: find better heuristic return int(math.sqrt(sum([coord ** 2 for coord in location])))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _addNoise(self, pattern, noiseLevel): """ Adds noise the given list of patterns and returns a list of noisy copies. """
if pattern is None: return None newBits = [] for bit in pattern: if random.random() < noiseLevel: newBits.append(random.randint(0, max(pattern))) else: newBits.append(bit) return set(newBits)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apicalCheck(self, apicalInput): """ Return 'recent' apically predicted cells for each tick of apical timer - finds active apical segments corresponding to predicted basal segment, @param apicalInput (numpy array) List of active input bits for the apical dendrite segments """
# Calculate predictions for this timestep (activeApicalSegments, matchingApicalSegments, apicalPotentialOverlaps) = self._calculateSegmentActivity( self.apicalConnections, apicalInput, self.connectedPermanence, self.activationThreshold, self.minThreshold, self.reducedBasalThreshold) apicallySupportedCells = self.apicalConnections.mapSegmentsToCells( activeApicalSegments) predictedCells = np.intersect1d( self.basalConnections.mapSegmentsToCells(self.activeBasalSegments), apicallySupportedCells) return predictedCells
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setupData(self, dataPath, numLabels=0, ordered=False, stripCats=False, seed=42, **kwargs): """ Main method of this class. Use for setting up a network data file. @param dataPath (str) Path to CSV file. @param numLabels (int) Number of columns of category labels. @param textPreprocess (bool) True will preprocess text while tokenizing. @param ordered (bool) Keep data samples (sequences) in order, otherwise randomize. @param seed (int) Random seed. @return dataFileName (str) Network data file name; same directory as input data file. """
self.split(dataPath, numLabels, **kwargs) if not ordered: self.randomizeData(seed) filename, ext = os.path.splitext(dataPath) classificationFileName = "{}_category.json".format(filename) dataFileName = "{}_network{}".format(filename, ext) if stripCats: self.stripCategories() self.saveData(dataFileName, classificationFileName) return dataFileName
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _formatSequence(tokens, categories, seqID, uniqueID): """Write the sequence of data records for this sample."""
record = {"_category":categories, "_sequenceId":seqID} data = [] reset = 1 for t in tokens: tokenRecord = record.copy() tokenRecord["_token"] = t tokenRecord["_reset"] = reset tokenRecord["ID"] = uniqueID reset = 0 data.append(tokenRecord) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def saveData(self, dataOutputFile, categoriesOutputFile): """ Save the processed data and the associated category mapping. @param dataOutputFile (str) Location to save data @param categoriesOutputFile (str) Location to save category map @return (str) Path to the saved data file iff saveData() is successful. """
if self.records is None: return False if not dataOutputFile.endswith("csv"): raise TypeError("data output file must be csv.") if not categoriesOutputFile.endswith("json"): raise TypeError("category output file must be json") # Ensure directory exists dataOutputDirectory = os.path.dirname(dataOutputFile) if not os.path.exists(dataOutputDirectory): os.makedirs(dataOutputDirectory) categoriesOutputDirectory = os.path.dirname(categoriesOutputFile) if not os.path.exists(categoriesOutputDirectory): os.makedirs(categoriesOutputDirectory) with open(dataOutputFile, "w") as f: # Header writer = csv.DictWriter(f, fieldnames=self.fieldNames) writer.writeheader() # Types writer.writerow(self.types) # Special characters writer.writerow(self.specials) for data in self.records: for record in data: writer.writerow(record) with open(categoriesOutputFile, "w") as f: f.write(json.dumps(self.categoryToId, sort_keys=True, indent=4, separators=(",", ": "))) return dataOutputFile
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generateSequence(self, text, preprocess=False): """ Return a list of lists representing the text sequence in network data format. Does not preprocess the text. """
# TODO: enable text preprocessing; abstract out the logic in split() into a common method. tokens = TextPreprocess().tokenize(text) cat = [-1] self.sequenceCount += 1 uniqueID = "q" data = self._formatSequence(tokens, cat, self.sequenceCount-1, uniqueID) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getSamples(netDataFile): """ Returns samples joined at reset points. @param netDataFile (str) Path to file (in the FileRecordStream format). @return samples (OrderedDict) Keys are sample number (in order they are read in). Values are two-tuples of sample text and category ints. """
try: with open(netDataFile) as f: reader = csv.reader(f) header = next(reader, None) next(reader, None) resetIdx = next(reader).index("R") tokenIdx = header.index("_token") catIdx = header.index("_category") idIdx = header.index("ID") currentSample = [] samples = OrderedDict() for line in reader: if int(line[resetIdx]) == 1: if len(currentSample) != 0: samples[line[idIdx]] = ([" ".join(currentSample)], [int(c) for c in line[catIdx].split(" ")]) currentSample = [line[tokenIdx]] else: currentSample.append(line[tokenIdx]) samples[line[idIdx]] = ([" ".join(currentSample)], [int(c) for c in line[catIdx].split(" ")]) return samples except IOError as e: print "Could not open the file {}.".format(netDataFile) raise e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getClassifications(networkDataFile): """ Returns the classifications at the indices where the data sequences reset. @param networkDataFile (str) Path to file in the FileRecordStream format @return (list) list of string versions of the classifications Sample output: ["0 1", "1", "1 2 3"] """
try: with open(networkDataFile) as f: reader = csv.reader(f) next(reader, None) next(reader, None) specials = next(reader) resetIdx = specials.index("R") classIdx = specials.index("C") classifications = [] for line in reader: if int(line[resetIdx]) == 1: classifications.append(line[classIdx]) return classifications except IOError as e: print "Could not open the file {}.".format(networkDataFile) raise e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getNumberOfTokens(networkDataFile): """ Returns the number of tokens for each sequence @param networkDataFile (str) Path to file in the FileRecordStream format @return (list) list of number of tokens """
try: with open(networkDataFile) as f: reader = csv.reader(f) next(reader, None) next(reader, None) resetIdx = next(reader).index("R") count = 0 numTokens = [] for line in reader: if int(line[resetIdx]) == 1: if count != 0: numTokens.append(count) count = 1 else: count += 1 numTokens.append(count) return numTokens except IOError as e: print "Could not open the file {}.".format(networkDataFile) raise e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getResetsIndices(networkDataFile): """Returns the indices at which the data sequences reset."""
try: with open(networkDataFile) as f: reader = csv.reader(f) next(reader, None) next(reader, None) resetIdx = next(reader).index("R") resets = [] for i, line in enumerate(reader): if int(line[resetIdx]) == 1: resets.append(i) return resets except IOError as e: print "Could not open the file {}.".format(networkDataFile) raise e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lastNoiseCurve(expPath, suite, iteration="last"): """ Print the noise errors from the last iteration of this experiment """
noiseValues = ["0.0", "0.05", "0.1", "0.15", "0.2", "0.25", "0.3", "0.35", "0.4", "0.45", "0.5"] print("\nNOISE CURVE =====",expPath,"====== ITERATION:",iteration,"=========") try: result = suite.get_value(expPath, 0, noiseValues, iteration) info = [] for k in noiseValues: info.append([k,result[k]["testerror"]]) print(tabulate(info, headers=["noise","Test Error"], tablefmt="grid")) print("totalCorrect:", suite.get_value(expPath, 0, "totalCorrect", iteration)) except: print("Couldn't load experiment",expPath)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def learningCurve(expPath, suite): """ Print the test, validation and other scores from each iteration of this experiment. We select the test score that corresponds to the iteration with maximum validation accuracy. """
print("\nLEARNING CURVE ================",expPath,"=====================") try: headers=["testResults","validation","bgResults","elapsedTime"] result = suite.get_value(expPath, 0, headers, "all") info = [] maxValidationAccuracy = -1.0 maxTestAccuracy = -1.0 maxBGAccuracy = -1.0 maxIter = -1 for i,v in enumerate(zip(result["testResults"],result["validation"], result["bgResults"], result["elapsedTime"])): info.append([i, v[0]["testerror"], v[1]["testerror"], v[2]["testerror"], int(v[3])]) if v[1]["testerror"] > maxValidationAccuracy: maxValidationAccuracy = v[1]["testerror"] maxTestAccuracy = v[0]["testerror"] maxBGAccuracy = v[2]["testerror"] maxIter = i headers.insert(0,"iteration") print(tabulate(info, headers=headers, tablefmt="grid")) print("Max validation score =", maxValidationAccuracy, " at iteration", maxIter) print("Test score at that iteration =", maxTestAccuracy) print("BG score at that iteration =", maxBGAccuracy) except: print("Couldn't load experiment",expPath)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bestScore(expPath, suite): """ Given a single experiment, return the test, validation and other scores from the iteration with maximum validation accuracy. """
maxValidationAccuracy = -1.0 maxTestAccuracy = -1.0 maxTotalAccuracy = -1.0 maxBGAccuracy = -1.0 maxIter = -1 try: headers=["testResults", "validation", "bgResults", "elapsedTime", "totalCorrect"] result = suite.get_value(expPath, 0, headers, "all") for i,v in enumerate(zip(result["testResults"], result["validation"], result["bgResults"], result["elapsedTime"], result["totalCorrect"])): if v[1]["testerror"] > maxValidationAccuracy: maxValidationAccuracy = v[1]["testerror"] maxTestAccuracy = v[0]["testerror"] maxBGAccuracy = v[2]["testerror"] if v[4] is not None: maxTotalAccuracy = v[4] maxIter = i # print("Max validation score =", maxValidationAccuracy, " at iteration", maxIter) # print("Test score at that iteration =", maxTestAccuracy) # print("BG score at that iteration =", maxBGAccuracy) return maxTestAccuracy, maxValidationAccuracy, maxBGAccuracy, maxIter, maxTotalAccuracy except: print("Couldn't load experiment",expPath) return None, None, None, None, None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def findOptimalResults(expName, suite, outFile): """ Go through every experiment in the specified folder. For each experiment, find the iteration with the best validation score, and return the metrics associated with that iteration. """
writer = csv.writer(outFile) headers = ["testAccuracy", "bgAccuracy", "maxTotalAccuracy", "experiment path"] writer.writerow(headers) info = [] print("\n================",expName,"=====================") try: # Retrieve the last totalCorrect from each experiment # Print them sorted from best to worst values, params = suite.get_values_fix_params( expName, 0, "testerror", "last") for p in params: expPath = p["name"] if not "results" in expPath: expPath = os.path.join("results", expPath) maxTestAccuracy, maxValidationAccuracy, maxBGAccuracy, maxIter, maxTotalAccuracy = bestScore(expPath, suite) row = [maxTestAccuracy, maxBGAccuracy, maxTotalAccuracy, expPath] info.append(row) writer.writerow(row) print(tabulate(info, headers=headers, tablefmt="grid")) except: print("Couldn't analyze experiment",expName)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getErrorBars(expPath, suite): """ Go through each experiment in the path. Get the best scores for each experiment based on accuracy on validation set. Print out overall mean, and stdev for test accuracy, BG accuracy, and noise accuracy. """
exps = suite.get_exps(expPath) testScores = np.zeros(len(exps)) noiseScores = np.zeros(len(exps)) for i,e in enumerate(exps): maxTestAccuracy, maxValidationAccuracy, maxBGAccuracy, maxIter, maxTotalAccuracy = bestScore( e, suite) testScores[i] = maxTestAccuracy noiseScores[i] = maxTotalAccuracy print(e, maxTestAccuracy, maxTotalAccuracy) print("") print("Experiment:", expPath, "Number of sub-experiments", len(exps)) print("test score mean and standard deviation:", testScores.mean(), testScores.std()) print("noise score mean and standard deviation:", noiseScores.mean(), noiseScores.std())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setCompare(a, b, aKey=None, bKey=None, leftMinusRight=False, rightMinusLeft=False): """ Compute the intersection and differences between two arrays, comparing elements by their key. @param a (numpy array) The left set to compare. @param b (numpy array) The right set to compare. @param aKey (numpy array or None) If specified, elements in "a" are compared by their corresponding entry in "aKey". @param bKey (numpy array or None) If specified, elements in "b" are compared by their corresponding entry in "bKey". @param leftMinusRight If True, also calculate the set difference (a - b) @param rightMinusLeft If True, also calculate the set difference (b - a) @return (numpy array or tuple) Always returns the intersection of "a" and "b". The elements of this intersection are values from "a" (which may be different from the values of "b" or "aKey"). If leftMinusRight or rightMinusLeft are True, it returns a tuple: - intersection (numpy array) See above - leftMinusRight (numpy array) The elements in a that are not in b - rightMinusLeft (numpy array) The elements in b that are not in a """
aKey = aKey if aKey is not None else a bKey = bKey if bKey is not None else b aWithinBMask = np.in1d(aKey, bKey) if rightMinusLeft: bWithinAMask = np.in1d(bKey, aKey) if leftMinusRight: return (a[aWithinBMask], a[~aWithinBMask], b[bWithinAMask]) else: return (a[aWithinBMask], b[~bWithinAMask]) elif leftMinusRight: return (a[aWithinBMask], a[~aWithinBMask]) else: return a[aWithinBMask]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def argmaxMulti(a, groupKeys, assumeSorted=False): """ This is like numpy's argmax, but it returns multiple maximums. It gets the indices of the max values of each group in 'a', grouping the elements by their corresponding value in 'groupKeys'. @param a (numpy array) An array of values that will be compared @param groupKeys (numpy array) An array with the same length of 'a'. Each entry identifies the group for each 'a' value. @param assumeSorted (bool) If true, group keys must be organized together (e.g. sorted). @return (numpy array) The indices of one maximum value per group @example _argmaxMulti([5, 4, 7, 2, 9, 8], [0, 0, 0, 1, 1, 1]) returns [2, 4] """
if not assumeSorted: # Use a stable sort algorithm sorter = np.argsort(groupKeys, kind="mergesort") a = a[sorter] groupKeys = groupKeys[sorter] _, indices, lengths = np.unique(groupKeys, return_index=True, return_counts=True) maxValues = np.maximum.reduceat(a, indices) allMaxIndices = np.flatnonzero(np.repeat(maxValues, lengths) == a) # Break ties by finding the insertion points of the the group start indices # and using the values currently at those points. This approach will choose # the first occurrence of each max value. indices = allMaxIndices[np.searchsorted(allMaxIndices, indices)] if assumeSorted: return indices else: return sorter[indices]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getAllCellsInColumns(columns, cellsPerColumn): """ Calculate all cell indices in the specified columns. @param columns (numpy array) @param cellsPerColumn (int) @return (numpy array) All cells within the specified columns. The cells are in the same order as the provided columns, so they're sorted if the columns are sorted. """
# Add # [[beginningOfColumn0], # [beginningOfColumn1], # ...] # to # [0, 1, 2, ..., cellsPerColumn - 1] # to get # [beginningOfColumn0 + 0, beginningOfColumn0 + 1, ... # beginningOfColumn1 + 0, ... # ...] # then flatten it. return ((columns * cellsPerColumn).reshape((-1, 1)) + np.arange(cellsPerColumn, dtype="uint32")).flatten()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def letterSequence(letters, w=40): """ Return a list of input vectors corresponding to sequence of letters. The vector for each letter has w contiguous bits ON and represented as a sequence of non-zero indices. """
sequence = [] for letter in letters: i = ord(letter) - ord('A') sequence.append(set(range(i*w,(i+1)*w))) return sequence
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getHighOrderSequenceChunk(it, switchover=1000, w=40, n=2048): """ Given an iteration index, returns a list of vectors to be appended to the input stream, as well as a string label identifying the sequence. This version generates a bunch of high order sequences. The first element always provides sufficient context to predict the rest of the elements. After switchover iterations, it will generate a different set of sequences. """
if it%10==3: s = numpy.random.randint(5) if it <= switchover: if s==0: label="XABCDE" elif s==1: label="YCBEAF" elif s==2: label="GHIJKL" elif s==3: label="WABCMN" else: label="ZDBCAE" else: if s==0: label="XCBEAF" elif s==1: label="YABCDE" elif s==2: label="GABCMN" elif s==3: label="WHIJKL" else: label="ZDHICF" vecs = letterSequence(label) else: vecs= [getRandomVector(w, n)] label="." return vecs,label
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addNoise(vecs, percent=0.1, n=2048): """ Add noise to the given sequence of vectors and return the modified sequence. A percentage of the on bits are shuffled to other locations. """
noisyVecs = [] for vec in vecs: nv = vec.copy() for idx in vec: if numpy.random.random() <= percent: nv.discard(idx) nv.add(numpy.random.randint(n)) noisyVecs.append(nv) return noisyVecs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def killCells(i, options, tm): """ Kill cells as appropriate """
# Kill cells if called for if options.simulation == "killer": if i == options.switchover: print "i=",i,"Killing cells for the first time!" tm.killCells(percent = options.noise) if i == options.secondKill: print "i=",i,"Killing cells again up to",options.secondNoise tm.killCells(percent = options.secondNoise) elif options.simulation == "killingMeSoftly" and (i%100 == 0): steps = (options.secondKill - options.switchover)/100 nsteps = (options.secondNoise - options.noise)/steps noise = options.noise + nsteps*(i-options.switchover)/100 if i in xrange(options.switchover, options.secondKill+1): print "i=",i,"Killing cells!" tm.killCells(percent = noise)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def printTemporalMemory(tm, outFile): """ Given an instance of TemporalMemory, print out the relevant parameters """
table = PrettyTable(["Parameter name", "Value", ]) table.add_row(["columnDimensions", tm.getColumnDimensions()]) table.add_row(["cellsPerColumn", tm.getCellsPerColumn()]) table.add_row(["activationThreshold", tm.getActivationThreshold()]) table.add_row(["minThreshold", tm.getMinThreshold()]) table.add_row(["maxNewSynapseCount", tm.getMaxNewSynapseCount()]) table.add_row(["permanenceIncrement", tm.getPermanenceIncrement()]) table.add_row(["permanenceDecrement", tm.getPermanenceDecrement()]) table.add_row(["initialPermanence", tm.getInitialPermanence()]) table.add_row(["connectedPermanence", tm.getConnectedPermanence()]) table.add_row(["predictedSegmentDecrement", tm.getPredictedSegmentDecrement()]) print >>outFile, table.get_string().encode("utf-8")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def printOptions(options, tm, outFile): """ Pretty print the set of options """
print >>outFile, "TM parameters:" printTemporalMemory(tm, outFile) print >>outFile, "Experiment parameters:" for k,v in options.__dict__.iteritems(): print >>outFile, " %s : %s" % (k,str(v)) outFile.flush()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def runBasic(noiseLevel=None, profile=False): """ Runs a basic experiment on continuous locations, learning a few locations on four basic objects, and inferring one of them. This experiment is mostly used for testing the pipeline, as the learned locations are too random and sparse to actually perform inference. Parameters: @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """
exp = L4L2Experiment( "basic_continuous", numCorticalColumns=2 ) objects = createObjectMachine( machineType="continuous", numInputBits=21, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.addObject(Sphere(radius=20), name="sphere") objects.addObject(Cylinder(height=50, radius=20), name="cylinder") objects.addObject(Box(dimensions=[10, 20, 30,]), name="box") objects.addObject(Cube(width=20), name="cube") learnConfig = { "sphere": [("surface", 10)], # the two learning config below will be exactly the same "box": [("face", 5), ("edge", 5), ("vertex", 5)], "cube": [(feature, 5) for feature in objects["cube"].getFeatures()], "cylinder": [(feature, 5) for feature in objects["cylinder"].getFeatures()] } exp.learnObjects( objects.provideObjectsToLearn(learnConfig, plot=True), reset=True ) if profile: exp.printProfile() inferConfig = { "numSteps": 4, "noiseLevel": noiseLevel, "objectName": "cube", "pairs": { 0: ["face", "face", "edge", "edge"], 1: ["edge", "face", "face", "edge"] } } exp.infer( objects.provideObjectToInfer(inferConfig, plot=True), objectName="cube", reset=True ) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plotBoostTrace(sp, inputVectors, columnIndex): """ Plot boostfactor for a selected column Note that learning is ON for SP here :param sp: sp instance :param inputVectors: input data :param columnIndex: index for the column of interest """
numInputVector, inputSize = inputVectors.shape columnNumber = np.prod(sp.getColumnDimensions()) boostFactorsTrace = np.zeros((columnNumber, numInputVector)) activeDutyCycleTrace = np.zeros((columnNumber, numInputVector)) minActiveDutyCycleTrace = np.zeros((columnNumber, numInputVector)) for i in range(numInputVector): outputColumns = np.zeros(sp.getColumnDimensions(), dtype=uintType) inputVector = copy.deepcopy(inputVectors[i][:]) sp.compute(inputVector, True, outputColumns) boostFactors = np.zeros((columnNumber, ), dtype=realDType) sp.getBoostFactors(boostFactors) boostFactorsTrace[:, i] = boostFactors activeDutyCycle = np.zeros((columnNumber, ), dtype=realDType) sp.getActiveDutyCycles(activeDutyCycle) activeDutyCycleTrace[:, i] = activeDutyCycle minActiveDutyCycle = np.zeros((columnNumber, ), dtype=realDType) sp.getMinActiveDutyCycles(minActiveDutyCycle) minActiveDutyCycleTrace[:, i] = minActiveDutyCycle plt.figure() plt.subplot(2, 1, 1) plt.plot(boostFactorsTrace[columnIndex, :]) plt.ylabel('Boost Factor') plt.subplot(2, 1, 2) plt.plot(activeDutyCycleTrace[columnIndex, :]) plt.plot(minActiveDutyCycleTrace[columnIndex, :]) plt.xlabel(' Time ') plt.ylabel('Active Duty Cycle')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def next_epoch(self): """ Load next epoch from disk """
epoch = next(self._all_epochs) folder = os.path.join(self._root, str(epoch), self._subset) self.data = [] silence = None gc.disable() for filename in os.listdir(folder): command = os.path.splitext(os.path.basename(filename))[0] with open(os.path.join(folder, filename), "r") as pkl_file: audio = pickle.load(pkl_file) # Check for 'silence' if command == "silence": silence = audio else: target = self.classes.index(os.path.basename(command)) self.data.extend(itertools.product(audio, [target])) gc.enable() target = self.classes.index("silence") self.data += [(silence, target)] * int(len(self.data) * self._silence_percentage) return epoch
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def isValid(folder, epoch=0): """ Check if the given folder is a valid preprocessed dataset """
# Validate by checking for the training 'silence.pkl' on the given epoch # This file is unique to our pre-processed dataset generated by 'process_dataset.py' return os.path.exists(os.path.join(folder, str(epoch), "train", "silence.pkl"))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def burstColumn(self, column, columnMatchingSegments, prevActiveCells, prevWinnerCells, learn): """ Activates all of the cells in an unpredicted active column, chooses a winner cell, and, if learning is turned on, learns on one segment, growing a new segment if necessary. @param column (int) Index of bursting column. @param columnMatchingSegments (iter) Matching segments in this column, or None if there aren't any. @param prevActiveCells (list) Active cells in `t-1`. @param prevWinnerCells (list) Winner cells in `t-1`. @param learn (bool) Whether or not learning is enabled. @return (tuple) Contains: `cells` (iter), `winnerCell` (int), """
start = self.cellsPerColumn * column # Strip out destroyed cells before passing along to base _burstColumn() cellsForColumn = [cellIdx for cellIdx in xrange(start, start + self.cellsPerColumn) if cellIdx not in self.deadCells] return self._burstColumn( self.connections, self._random, self.lastUsedIterationForSegment, column, columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn, self.numActivePotentialSynapsesForSegment, self.iteration, self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement, self.permanenceDecrement, self.maxSegmentsPerCell, self.maxSynapsesPerSegment, learn)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def printDeadCells(self): """ Print statistics for the dead cells """
columnCasualties = numpy.zeros(self.numberOfColumns()) for cell in self.deadCells: col = self.columnForCell(cell) columnCasualties[col] += 1 for col in range(self.numberOfColumns()): print col, columnCasualties[col]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset(self): """ Reset Union Pooler, clear active cell history """
self._unionSDR = numpy.zeros(shape=(self._numInputs,)) self._activeCellsHistory = []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getSparsity(self): """ Return the sparsity of the current union SDR """
sparsity = numpy.sum(self._unionSDR) / self._numInputs return sparsity
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plotDataframe(table, title, plotPath): """ Plot Panda dataframe. :param table: Panda dataframe returned by :func:`analyzeWeightPruning` :type table: :class:`pandas.DataFrame` :param title: Plot title :type title: str :param plotPath: Plot full path :type plotPath: str """
plt.figure() axes = table.T.plot(subplots=True, sharex=True, grid=True, legend=True, title=title, figsize=(8, 11)) # Use fixed scale for "accuracy" accuracy = next(ax for ax in axes if ax.lines[0].get_label() == 'accuracy') accuracy.set_ylim(0.0, 1.0) plt.savefig(plotPath) plt.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getDatetimeAxis(): """ use datetime as x-axis """
dataSet = 'nyc_taxi' filePath = './data/' + dataSet + '.csv' data = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['datetime', 'value', 'timeofday', 'dayofweek']) xaxisDate = pd.to_datetime(data['datetime']) return xaxisDate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encodeDeltas(self, dx,dy): """Return the SDR for dx,dy"""
dxe = self.dxEncoder.encode(dx) dye = self.dyEncoder.encode(dy) ex = numpy.outer(dxe,dye) return ex.flatten().nonzero()[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encodeThetas(self, theta1, theta2): """Return the SDR for theta1 and theta2"""
# print >> sys.stderr, "encoded theta1 value = ", theta1 # print >> sys.stderr, "encoded theta2 value = ", theta2 t1e = self.theta1Encoder.encode(theta1) t2e = self.theta2Encoder.encode(theta2) # print >> sys.stderr, "encoded theta1 = ", t1e.nonzero()[0] # print >> sys.stderr, "encoded theta2 = ", t2e.nonzero()[0] ex = numpy.outer(t2e,t1e) return ex.flatten().nonzero()[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decodeThetas(self, predictedCells): """ Given the set of predicted cells, return the predicted theta1 and theta2 """
a = numpy.zeros(self.bottomUpInputSize) a[predictedCells] = 1 a = a.reshape((self.theta1Encoder.getWidth(), self.theta1Encoder.getWidth())) theta1PredictedBits = a.mean(axis=0).nonzero()[0] theta2PredictedBits = a.mean(axis=1).nonzero()[0] # To decode it we need to create a flattened array again and pass it # to encoder. # TODO: We use encoder's topDownCompute method - not sure if that is best. t1 = numpy.zeros(self.theta1Encoder.getWidth()) t1[theta1PredictedBits] = 1 t1Prediction = self.theta1Encoder.topDownCompute(t1)[0].value t2 = numpy.zeros(self.theta2Encoder.getWidth()) t2[theta2PredictedBits] = 1 t2Prediction = self.theta2Encoder.topDownCompute(t2)[0].value # print >> sys.stderr, "predicted cells = ", predictedCells # print >> sys.stderr, "decoded theta1 bits = ", theta1PredictedBits # print >> sys.stderr, "decoded theta2 bits = ", theta2PredictedBits # print >> sys.stderr, "decoded theta1 value = ", t1Prediction # print >> sys.stderr, "decoded theta2 value = ", t2Prediction return t1Prediction, t2Prediction
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def inferTM(self, bottomUp, externalInput): """ Run inference and return the set of predicted cells """
self.reset() # print >> sys.stderr, "Bottom up: ", bottomUp # print >> sys.stderr, "ExternalInput: ",externalInput self.tm.compute(bottomUp, basalInput=externalInput, learn=False) # print >> sys.stderr, ("new active cells " + str(self.tm.getActiveCells())) # print >> sys.stderr, ("new predictive cells " + str(self.tm.getPredictiveCells())) return self.tm.getPredictiveCells()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def classify(self, encoding, num=1): """ Classify with basic one-hot local incoding """
probDist = numpy.exp(encoding) / numpy.sum(numpy.exp(encoding)) sortIdx = numpy.argsort(probDist) return sortIdx[-num:].tolist()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _seed(self, seed=-1): """ Initialize the random seed """
if seed != -1: self._random = np.random.RandomState(seed) else: self._random = np.random.RandomState()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initialize_weights(self): """Randomly initializes the visible-to-hidden connections."""
n = self._outputSize m = self._inputSize self._Q = self._random.sample((n,m)) # Normalize the weights of each units for i in range(n): self._Q[i] /= np.sqrt( np.dot(self._Q[i], self._Q[i]) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _inhibitColumnsWithLateral(self, overlaps, lateralConnections): """ Performs an experimentatl local inhibition. Local inhibition is iteratively performed on a column by column basis. """
n,m = self.shape y = np.zeros(n) s = self.sparsity L = lateralConnections desiredWeight = self.codeWeight inhSignal = np.zeros(n) sortedIndices = np.argsort(overlaps, kind='mergesort')[::-1] currentWeight = 0 for i in sortedIndices: if overlaps[i] < self._stimulusThreshold: break inhTooStrong = ( inhSignal[i] >= s ) if not inhTooStrong: y[i] = 1. currentWeight += 1 inhSignal[:] += L[i,:] if self.enforceDesiredWeight and currentWeight == desiredWeight: break activeColumns = np.where(y==1.0)[0] return activeColumns
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute(self, inputVector, learn, activeArray, applyLateralInhibition=True): """ This is the primary public method of the LateralPooler class. This function takes a input vector and outputs the indices of the active columns. If 'learn' is set to True, this method also updates the permanences of the columns and their lateral inhibitory connection weights. """
if not isinstance(inputVector, np.ndarray): raise TypeError("Input vector must be a numpy array, not %s" % str(type(inputVector))) if inputVector.size != self._numInputs: raise ValueError( "Input vector dimensions don't match. Expecting %s but got %s" % ( inputVector.size, self._numInputs)) self._updateBookeepingVars(learn) inputVector = np.array(inputVector, dtype=realDType) inputVector.reshape(-1) self._overlaps = self._calculateOverlap(inputVector) # Apply boosting when learning is on if learn: self._boostedOverlaps = self._boostFactors * self._overlaps else: self._boostedOverlaps = self._overlaps # Apply inhibition to determine the winning columns if applyLateralInhibition == True: activeColumns = self._inhibitColumnsWithLateral(self._boostedOverlaps, self.lateralConnections) else: activeColumns = self._inhibitColumns(self._boostedOverlaps) activeArray.fill(0) activeArray[activeColumns] = 1.0 if learn: self._adaptSynapses(inputVector, activeColumns, self._boostedOverlaps) self._updateDutyCycles(self._overlaps, activeColumns) self._bumpUpWeakColumns() self._updateBoostFactors() self._updateAvgActivityPairs(activeArray) epsilon = self.lateralLearningRate if epsilon > 0: self._updateLateralConnections(epsilon, self.avgActivityPairs) if self._isUpdateRound(): self._updateInhibitionRadius() self._updateMinDutyCycles() return activeArray
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def feedforward(self): """ Soon to be depriciated. Needed to make the SP implementation compatible with some older code. """
m = self._numInputs n = self._numColumns W = np.zeros((n, m)) for i in range(self._numColumns): self.getPermanence(i, W[i, :]) return W
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def learn(self): """ Learn all objects on every column. Each column will learn all the features of every object and store the the object's L2 representation to be later used in the inference stage """
self.setLearning(True) for obj in self.objects: self.sendReset() previousLocation = [None] * self.numColumns displacement = [0., 0.] features = obj["features"] numOfFeatures = len(features) # Randomize touch sequences touchSequence = np.random.permutation(numOfFeatures) for sensation in xrange(numOfFeatures): for col in xrange(self.numColumns): # Shift the touch sequence for each column colSequence = np.roll(touchSequence, col) feature = features[colSequence[sensation]] # Move the sensor to the center of the object locationOnObject = np.array([feature["top"] + feature["height"] / 2., feature["left"] + feature["width"] / 2.]) # Calculate displacement from previous location if previousLocation[col] is not None: displacement = locationOnObject - previousLocation[col] previousLocation[col] = locationOnObject # learn each pattern multiple times activeColumns = self.featureSDR[col][feature["name"]] for _ in xrange(self.numLearningPoints): # Sense feature at location self.motorInput[col].addDataToQueue(displacement) self.sensorInput[col].addDataToQueue(activeColumns, False, 0) # Only move to the location on the first sensation. displacement = [0, 0] self.network.run(numOfFeatures * self.numLearningPoints) # update L2 representations for the object self.learnedObjects[obj["name"]] = self.getL2Representations()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def createL2456Columns(network, networkConfig): """ Create a network consisting of multiple L2456 columns as described in the file comments above. """
# Create each column numCorticalColumns = networkConfig["numCorticalColumns"] for i in xrange(numCorticalColumns): networkConfigCopy = copy.deepcopy(networkConfig) randomSeedBase = networkConfigCopy["randomSeedBase"] networkConfigCopy["L2Params"]["seed"] = randomSeedBase + i networkConfigCopy["L4Params"]["seed"] = randomSeedBase + i networkConfigCopy["L5Params"]["seed"] = randomSeedBase + i networkConfigCopy["L6Params"]["seed"] = randomSeedBase + i networkConfigCopy["L2Params"][ "numOtherCorticalColumns"] = numCorticalColumns - 1 networkConfigCopy["L5Params"][ "numOtherCorticalColumns"] = numCorticalColumns - 1 suffix = "_" + str(i) network = _createL2456Column(network, networkConfigCopy, suffix) # Now connect the L2 columns laterally to every other L2 column, and # the same for L5 columns. for i in range(networkConfig["numCorticalColumns"]): suffixSrc = "_" + str(i) for j in range(networkConfig["numCorticalColumns"]): if i != j: suffixDest = "_" + str(j) network.link( "L2Column" + suffixSrc, "L2Column" + suffixDest, "UniformLink", "", srcOutput="feedForwardOutput", destInput="lateralInput") network.link( "L5Column" + suffixSrc, "L5Column" + suffixDest, "UniformLink", "", srcOutput="feedForwardOutput", destInput="lateralInput") enableProfiling(network) return network
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _mmComputeTransitionTraces(self): """ Computes the transition traces, if necessary. Transition traces are the following: predicted => active cells predicted => inactive cells predicted => active columns predicted => inactive columns unpredicted => active columns """
if not self._mmTransitionTracesStale: return self._mmData["predictedActiveCellsForSequence"] = defaultdict(set) self._mmTraces["predictedActiveCells"] = IndicesTrace(self, "predicted => active cells (correct)") self._mmTraces["predictedInactiveCells"] = IndicesTrace(self, "predicted => inactive cells (extra)") self._mmTraces["predictedActiveColumns"] = IndicesTrace(self, "predicted => active columns (correct)") self._mmTraces["predictedInactiveColumns"] = IndicesTrace(self, "predicted => inactive columns (extra)") self._mmTraces["unpredictedActiveColumns"] = IndicesTrace(self, "unpredicted => active columns (bursting)") predictedCellsTrace = self._mmTraces["predictedCells"] for i, activeColumns in enumerate(self.mmGetTraceActiveColumns().data): predictedActiveCells = set() predictedInactiveCells = set() predictedActiveColumns = set() predictedInactiveColumns = set() for predictedCell in predictedCellsTrace.data[i]: predictedColumn = self.columnForCell(predictedCell) if predictedColumn in activeColumns: predictedActiveCells.add(predictedCell) predictedActiveColumns.add(predictedColumn) sequenceLabel = self.mmGetTraceSequenceLabels().data[i] if sequenceLabel is not None: self._mmData["predictedActiveCellsForSequence"][sequenceLabel].add( predictedCell) else: predictedInactiveCells.add(predictedCell) predictedInactiveColumns.add(predictedColumn) unpredictedActiveColumns = set(activeColumns) - set(predictedActiveColumns) self._mmTraces["predictedActiveCells"].data.append(predictedActiveCells) self._mmTraces["predictedInactiveCells"].data.append(predictedInactiveCells) self._mmTraces["predictedActiveColumns"].data.append(predictedActiveColumns) self._mmTraces["predictedInactiveColumns"].data.append( predictedInactiveColumns) self._mmTraces["unpredictedActiveColumns"].data.append( unpredictedActiveColumns) self._mmTransitionTracesStale = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_biased_correlations(data, threshold= 10): """ Gets the highest few correlations for each bit, across the entirety of the data. Meant to provide a comparison point for the pairwise correlations reported in the literature, which are typically between neighboring neurons tuned to the same inputs. We would expect these neurons to be among the most correlated in any region, so pairwise correlations between most likely do not provide an unbiased estimator of correlations between arbitrary neurons. """
data = data.toDense() correlations = numpy.corrcoef(data, rowvar = False) highest_correlations = [] for row in correlations: highest_correlations += sorted(row, reverse = True)[1:threshold+1] return numpy.mean(highest_correlations)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_pattern_correlations(data): """ Gets the average correlation between all bits in patterns, across the entire dataset. Assumes input is a sparse matrix. Weighted by pattern rather than by bit; this is the average pairwise correlation for every pattern in the data, and is not the average pairwise correlation for all bits that ever cooccur. This is a subtle but important difference. """
patterns = [data.rowNonZeros(i)[0] for i in range(data.nRows())] dense_data = data.toDense() correlations = numpy.corrcoef(dense_data, rowvar = False) correlations = numpy.nan_to_num(correlations) pattern_correlations = [] for pattern in patterns: pattern_correlations.append([correlations[i, j] for i in pattern for j in pattern if i != j]) return numpy.mean(pattern_correlations)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_correlated_data(dim = 2000, num_active = 40, num_samples = 1000, num_cells_per_cluster_size = [2000]*8, cluster_sizes = range(2, 10)): """ Generates a set of data drawn from a uniform distribution, but with bits clustered to force correlation between neurons. Clusters are randomly chosen to form an activation pattern, in such a way as to maintain sparsity. The number of clusters of each size is defined by num_cells_per_cluster_size, as cluster_size*num_clusters = num_cells. This parameter can be thought of as indicating how thoroughly the clusters "cover" the space. Typical values are 1-3 times the total dimension. These are specificied independently for each cluster size, to allow for more variation. """
# Chooses clusters to clusters = [] cells = set(range(dim)) # Associate cluster sizes with how many cells they should have, then generate # clusters. for size, num_cells in zip(cluster_sizes, num_cells_per_cluster_size): # Must have (num_cells/size) clusters in order to have num_cells cells # across all clusters with this size. for i in range(int(1.*num_cells/size)): cluster = tuple(numpy.random.choice(dim, size, replace = False)) clusters.append(cluster) # Now generate a list of num_samples SDRs datapoints = [] for sample in range(num_samples): # This conditional is necessary in the case that very, very few clusters are # created, as otherwise numpy.random.choice can attempt to choose more # clusters than is possible. Typically this case will not occur, but it is # possible if we are attempting to generate extremely correlated data. if len(clusters) > num_active/2: chosen_clusters = numpy.random.choice(len(clusters), num_active/2, replace = False) current_clusters = [clusters[i] for i in chosen_clusters] else: current_clusters = clusters # Pick clusters until doing so would exceed our target num_active. current_cells = set() for cluster in current_clusters: if len(current_cells) + len(cluster) < num_active: current_cells |= set(cluster) else: break # Add random cells to the SDR if we still don't have enough active. if len(current_cells) < num_active: possible_cells = cells - current_cells new_cells = numpy.random.choice(tuple(possible_cells), num_active - len(current_cells), replace = False) current_cells |= set(new_cells) datapoints.append(list(current_cells)) data = SM32() data.reshape(num_samples, dim) for sample, datapoint in enumerate(datapoints): for i in datapoint: data[sample, i] = 1. return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply_noise(data, noise): """ Applies noise to a sparse matrix. Noise can be an integer between 0 and 100, indicating the percentage of ones in the original input to move, or a float in [0, 1), indicating the same thing. The input matrix is modified in-place, and nothing is returned. This operation does not affect the sparsity of the matrix, or of any individual datapoint. """
if noise >= 1: noise = noise/100. for i in range(data.nRows()): ones = data.rowNonZeros(i)[0] replace_indices = numpy.random.choice(ones, size = int(len(ones)*noise), replace = False) for index in replace_indices: data[i, index] = 0 new_indices = numpy.random.choice(data.nCols(), size = int(len(ones)*noise), replace = False) for index in new_indices: while data[i, index] == 1: index = numpy.random.randint(0, data.nCols()) data[i, index] = 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shuffle_sparse_matrix_and_labels(matrix, labels): """ Shuffles a sparse matrix and set of labels together. Resorts to densifying and then re-sparsifying the matrix, for convenience. Still very fast. """
print "Shuffling data" new_matrix = matrix.toDense() rng_state = numpy.random.get_state() numpy.random.shuffle(new_matrix) numpy.random.set_state(rng_state) numpy.random.shuffle(labels) print "Data shuffled" return SM32(new_matrix), numpy.asarray(labels)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split_sparse_matrix(matrix, num_categories): """ An analog of numpy.split for our sparse matrix. If the number of categories does not divide the number of rows in the matrix, all overflow is placed in the final bin. In the event that there are more categories than rows, all later categories are considered to be an empty sparse matrix. """
if matrix.nRows() < num_categories: return [matrix.getSlice(i, i+1, 0, matrix.nCols()) for i in range(matrix.nRows())] + [SM32() for i in range(num_categories - matrix.nRows())] else: inc = matrix.nRows()/num_categories divisions = [matrix.getSlice(i*inc, (i+1)*inc, 0, matrix.nCols()) for i in range(num_categories - 1)] # Handle the last bin separately. All overflow goes into it. divisions.append(matrix.getSlice((num_categories - 1)*inc, matrix.nRows(), 0, matrix.nCols())) return divisions
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_phase_1(dim = 40): """ The first step in creating datapoints in the Poirazi & Mel model. This returns a vector of dimension dim, with the last four values set to 1 and the rest drawn from a normal distribution. """
phase_1 = numpy.random.normal(0, 1, dim) for i in range(dim - 4, dim): phase_1[i] = 1.0 return phase_1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_phase_2(phase_1, dim = 40): """ The second step in creating datapoints in the Poirazi & Mel model. This takes a phase 1 vector, and creates a phase 2 vector where each point is the product of four elements of the phase 1 vector, randomly drawn with replacement. """
phase_2 = [] for i in range(dim): indices = [numpy.random.randint(0, dim) for i in range(4)] phase_2.append(numpy.prod([phase_1[i] for i in indices])) return phase_2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bin_number(datapoint, intervals): """ Given a datapoint and intervals representing bins, returns the number represented in binned form, where the bin including the value is set to 1 and all others are 0. """
index = numpy.searchsorted(intervals, datapoint) return [0 if index != i else 1 for i in range(len(intervals) + 1)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bin_data(data, dim = 40, num_bins = 10): """ Fully bins the data generated by generate_data, using generate_RF_bins and bin_number. """
intervals = generate_RF_bins(data, dim, num_bins) binned_data = [numpy.concatenate([bin_number(data[x][i], intervals[i]) for i in range(len(data[x]))]) for x in range(len(data))] return binned_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def killCells(self, killCellPercent): """ kill a fraction of cells from the network """
if killCellPercent <= 0: return numHiddenNeurons = self.net.numHiddenNeurons numDead = round(killCellPercent * numHiddenNeurons) zombiePermutation = numpy.random.permutation(numHiddenNeurons) deadCells = zombiePermutation[0:numDead] liveCells = zombiePermutation[numDead:] self.net.inputWeights = self.net.inputWeights[liveCells, :] self.net.bias = self.net.bias[:, liveCells] self.net.beta = self.net.beta[liveCells, :] self.net.M = self.net.M[liveCells, liveCells] self.net.numHiddenNeurons = numHiddenNeurons - numDead
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def printFrequencyStatistics(counts, frequencies, numWords, size): """ Print interesting statistics regarding the counts and frequency matrices """
avgBits = float(counts.sum())/numWords print "Retina width=128, height=128" print "Total number of words processed=",numWords print "Average number of bits per word=",avgBits, print "avg sparsity=",avgBits/size print "counts matrix sum=",counts.sum(), print "max=",counts.max(), "min=",counts.min(), print "mean=",counts.sum()/float(size) print "frequency matrix sum=",frequencies.sum(), print "max=",frequencies.max(), "min=",frequencies.min(), print "mean=",frequencies.sum()/float(size) print "Number of bits with zero entries",frequencies.nZeroCols()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def countRandomBitFrequencies(numTerms = 100000, percentSparsity = 0.01): """Create a uniformly random counts matrix through sampling."""
# Accumulate counts by inplace-adding sparse matrices counts = SparseMatrix() size = 128*128 counts.resize(1, size) # Pre-allocate buffer sparse matrix sparseBitmap = SparseMatrix() sparseBitmap.resize(1, size) random.seed(42) # Accumulate counts for each bit for each word numWords=0 for term in xrange(numTerms): bitmap = random.sample(xrange(size), int(size*percentSparsity)) bitmap.sort() sparseBitmap.setRowFromSparse(0, bitmap, [1]*len(bitmap)) counts += sparseBitmap numWords += 1 # Compute normalized version of counts as a separate matrix frequencies = SparseMatrix() frequencies.resize(1, size) frequencies.copy(counts) frequencies.divide(float(numWords)) # Wrap up by printing some statistics and then saving the normalized version printFrequencyStatistics(counts, frequencies, numWords, size) frequencyFilename = "bit_frequencies_random.pkl" print "Saving frequency matrix in",frequencyFilename with open(frequencyFilename, "wb") as frequencyPickleFile: pickle.dump(frequencies, frequencyPickleFile) return counts