text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def HTM_style_initialize_on_data(self, data, labels):
""" Uses a style of initialization inspired by the temporal memory. When a new positive example is found, a dendrite is chosen and a number of synapses are created to the example. This works intelligently with an amount of data larger than the number of available dendrites. In this case, data is clustered, and then similar datapoints are allotted to shared dendrites, with as many overlapping bits as possible chosen. In practice, it is still better to simply allocate enough dendrites to have one per datapoint, but this method at least allows initialization to work on larger amounts of data. """ |
current_dendrite = 0
self.dendrites = SM32()
self.dendrites.reshape(self.dim, self.num_dendrites)
# We want to avoid training on any negative examples
data = copy.deepcopy(data)
data.deleteRows([i for i, v in enumerate(labels) if v != 1])
if data.nRows() > self.num_dendrites:
print "Neuron using clustering to initialize dendrites"
data = (data.toDense())
model = KMeans(n_clusters = self.num_dendrites, n_jobs=1)
clusters = model.fit_predict(data)
multisets = [[Counter(), []] for i in range(self.num_dendrites)]
sparse_data = [[i for i, d in enumerate(datapoint) if d == 1] for datapoint in data]
for datapoint, cluster in zip(sparse_data, clusters):
multisets[cluster][0] = multisets[cluster][0] + Counter(datapoint)
multisets[cluster][1].append(set(datapoint))
for i, multiset in enumerate(multisets):
shared_elements = set(map(lambda x: x[0], filter(lambda x: x[1] > 1, multiset[0].most_common(self.dendrite_length))))
dendrite_connections = shared_elements
while len(shared_elements) < self.dendrite_length:
most_distant_point = multiset[1][numpy.argmin([len(dendrite_connections.intersection(point)) for point in multiset[1]])]
new_connection = random.sample(most_distant_point - dendrite_connections, 1)[0]
dendrite_connections.add(new_connection)
for synapse in dendrite_connections:
self.dendrites[synapse, current_dendrite] = 1.
current_dendrite += 1
else:
for i in range(data.nRows()):
ones = data.rowNonZeros(i)[0]
dendrite_connections = numpy.random.choice(ones, size = self.dendrite_length, replace = False)
for synapse in dendrite_connections:
self.dendrites[synapse, current_dendrite] = 1.
current_dendrite += 1
self.initialize_permanences() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def HTM_style_train_on_datapoint(self, datapoint, label):
""" Run a version of permanence-based training on a datapoint. Due to the fixed dendrite count and dendrite length, we are forced to more efficiently use each synapse, deleting synapses and resetting them if they are not found useful. """ |
activations = datapoint * self.dendrites
self.nonlinearity(activations)
#activations will quite likely still be sparse if using a threshold nonlinearity, so want to keep it sparse
activation = numpy.sign(activations.sum())
if label >= 1 and activation >= 0.5:
strongest_branch = activations.rowMax(0)[0]
datapoint.transpose()
inc_vector = self.dendrites.getSlice(0, self.dim, strongest_branch, strongest_branch + 1) * self.permanence_increment
inc_vector.elementNZMultiply(datapoint)
dec_vector = self.dendrites.getSlice(0, self.dim, strongest_branch, strongest_branch + 1) * self.permanence_decrement
dec_vector.elementNZMultiply(1 - datapoint)
self.permanences.setSlice(0, strongest_branch, self.permanences.getSlice(0, self.dim, strongest_branch, strongest_branch + 1) + inc_vector - dec_vector)
positions, scores = self.permanences.colNonZeros(strongest_branch)[0], self.permanences.colNonZeros(strongest_branch)[1]
for position, score in zip(positions, scores):
if score < self.permanence_threshold:
self.dendrites[position, strongest_branch] = 0
self.permanences[position, strongest_branch] = 0
new_connection = random.sample(set(datapoint.colNonZeros(0)[0]) - set(self.dendrites.colNonZeros(strongest_branch)[0]), 1)[0]
self.dendrites[new_connection, strongest_branch] = 1.
self.permanences[new_connection, strongest_branch] = self.initial_permanence
elif label < 1 and activation >= 0.5:
# Need to weaken some connections
strongest_branch = activations.rowMax(0)[0]
dec_vector = self.dendrites.getSlice(0, self.dim, strongest_branch, strongest_branch + 1) * self.permanence_decrement
datapoint.transpose()
dec_vector.elementNZMultiply(datapoint)
self.permanences.setSlice(0, strongest_branch, self.permanences.getSlice(0, self.dim, strongest_branch, strongest_branch + 1) - dec_vector)
elif label >= 1 and activation < 0.5:
# Need to create some new connections
weakest_branch = numpy.argmin(self.permanences.colSums())
if numpy.median(self.permanences.getCol(weakest_branch)) < self.permanence_threshold:
self.permanences.setColToZero(weakest_branch)
self.dendrites.setColToZero(weakest_branch)
ones = datapoint.rowNonZeros(0)[0]
dendrite_connections = numpy.random.choice(ones, size = self.dendrite_length, replace = False)
for synapse in dendrite_connections:
self.dendrites[synapse, weakest_branch] = 1.
self.permanences[synapse, weakest_branch] = self.initial_permanence |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialize(self):
""" Initialize the self._poolerClass """ |
# Retrieve the necessary extra arguments that were handled automatically
autoArgs = {name: getattr(self, name) for name in self._poolerArgNames}
autoArgs["inputDimensions"] = [self._inputWidth]
autoArgs["columnDimensions"] = [self._columnCount]
autoArgs["potentialRadius"] = self._inputWidth
autoArgs["historyLength"] = self._historyLength
autoArgs["minHistory"] = self._minHistory
# Allocate the pooler
self._pooler = self._poolerClass(**autoArgs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute(self, inputs, outputs):
""" Run one iteration of TemporalPoolerRegion's compute. Note that if the reset signal is True (1) we assume this iteration represents the *end* of a sequence. The output will contain the pooled representation to this point and any history will then be reset. The output at the next compute will start fresh. """ |
resetSignal = False
if 'resetIn' in inputs:
if len(inputs['resetIn']) != 1:
raise Exception("resetIn has invalid length")
if inputs['resetIn'][0] != 0:
resetSignal = True
outputs["mostActiveCells"][:] = numpy.zeros(
self._columnCount, dtype=GetNTAReal())
if self._poolerType == "simpleUnion":
self._pooler.unionIntoArray(inputs["activeCells"],
outputs["mostActiveCells"],
forceOutput = resetSignal)
else:
predictedActiveCells = inputs["predictedActiveCells"] if (
"predictedActiveCells" in inputs) else numpy.zeros(self._inputWidth,
dtype=uintDType)
mostActiveCellsIndices = self._pooler.compute(inputs["activeCells"],
predictedActiveCells,
self.learningMode)
outputs["mostActiveCells"][mostActiveCellsIndices] = 1
if resetSignal:
self.reset() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getSpec(cls):
""" Return the Spec for TemporalPoolerRegion. The parameters collection is constructed based on the parameters specified by the various components (poolerSpec and otherSpec) """ |
spec = cls.getBaseSpec()
p, o = _getAdditionalSpecs()
spec["parameters"].update(p)
spec["parameters"].update(o)
return spec |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def computeCapacity(results, threshold):
"""Returns largest number of objects with accuracy above threshold.""" |
closestBelow = None
closestAbove = None
for numObjects, accuracy in sorted(results):
if accuracy >= threshold:
if closestAbove is None or closestAbove[0] < numObjects:
closestAbove = (numObjects, accuracy)
closestBelow = None
else:
if closestBelow is None:
closestBelow = (numObjects, accuracy)
if closestBelow is None or closestAbove is None:
print closestBelow, threshold, closestAbove
raise ValueError(
"Results must include a value above and below threshold of {}".format(threshold))
print " Capacity threshold is between {} and {}".format(closestAbove[0], closestBelow[0])
return closestAbove[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset(self):
""" Deactivate all cells. """ |
self.activeCells = np.empty(0, dtype="uint32")
self.activeDeltaSegments = np.empty(0, dtype="uint32")
self.activeFeatureLocationSegments = np.empty(0, dtype="uint32") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute(self, deltaLocation=(), newLocation=(), featureLocationInput=(), featureLocationGrowthCandidates=(), learn=True):
""" Run one time step of the Location Memory algorithm. @param deltaLocation (sorted numpy array) @param newLocation (sorted numpy array) @param featureLocationInput (sorted numpy array) @param featureLocationGrowthCandidates (sorted numpy array) """ |
prevActiveCells = self.activeCells
self.activeDeltaSegments = np.where(
(self.internalConnections.computeActivity(
prevActiveCells, self.connectedPermanence
) >= self.activationThreshold)
&
(self.deltaConnections.computeActivity(
deltaLocation, self.connectedPermanence
) >= self.activationThreshold))[0]
# When we're moving, the feature-location input has no effect.
if len(deltaLocation) == 0:
self.activeFeatureLocationSegments = np.where(
self.featureLocationConnections.computeActivity(
featureLocationInput, self.connectedPermanence
) >= self.activationThreshold)[0]
else:
self.activeFeatureLocationSegments = np.empty(0, dtype="uint32")
if len(newLocation) > 0:
# Drive activations by relaying this location SDR.
self.activeCells = newLocation
if learn:
# Learn the delta.
self._learnTransition(prevActiveCells, deltaLocation, newLocation)
# Learn the featureLocationInput.
self._learnFeatureLocationPair(newLocation, featureLocationInput,
featureLocationGrowthCandidates)
elif len(prevActiveCells) > 0:
if len(deltaLocation) > 0:
# Drive activations by applying the deltaLocation to the current location.
# Completely ignore the featureLocationInput. It's outdated, associated
# with the previous location.
cellsForDeltaSegments = self.internalConnections.mapSegmentsToCells(
self.activeDeltaSegments)
self.activeCells = np.unique(cellsForDeltaSegments)
else:
# Keep previous active cells active.
# Modulate with the featureLocationInput.
if len(self.activeFeatureLocationSegments) > 0:
cellsForFeatureLocationSegments = (
self.featureLocationConnections.mapSegmentsToCells(
self.activeFeatureLocationSegments))
self.activeCells = np.intersect1d(prevActiveCells,
cellsForFeatureLocationSegments)
else:
self.activeCells = prevActiveCells
elif len(featureLocationInput) > 0:
# Drive activations with the featureLocationInput.
cellsForFeatureLocationSegments = (
self.featureLocationConnections.mapSegmentsToCells(
self.activeFeatureLocationSegments))
self.activeCells = np.unique(cellsForFeatureLocationSegments) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialize(self):
""" Initialize grid cell modules """ |
if self._modules is None:
self._modules = []
for i in xrange(self.moduleCount):
self._modules.append(ThresholdedGaussian2DLocationModule(
cellsPerAxis=self.cellsPerAxis,
scale=self.scale[i],
orientation=self.orientation[i],
anchorInputSize=self.anchorInputSize,
activeFiringRate=self.activeFiringRate,
bumpSigma=self.bumpSigma,
activationThreshold=self.activationThreshold,
initialPermanence=self.initialPermanence,
connectedPermanence=self.connectedPermanence,
learningThreshold=self.learningThreshold,
sampleSize=self.sampleSize,
permanenceIncrement=self.permanenceIncrement,
permanenceDecrement=self.permanenceDecrement,
maxSynapsesPerSegment=self.maxSynapsesPerSegment,
bumpOverlapMethod=self.bumpOverlapMethod,
seed=self.seed))
# Create a projection matrix for each module used to convert higher
# dimension displacements to 2D
if self.dimensions > 2:
self._projection = [
self.createProjectionMatrix(dimensions=self.dimensions)
for _ in xrange(self.moduleCount)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute(self, inputs, outputs):
""" Compute the location based on the 'displacement' and 'anchorInput' by first applying the movement, if 'displacement' is present in the 'input' array and then applying the sensation if 'anchorInput' is present in the input array. The 'anchorGrowthCandidates' input array is used during learning See :meth:`ThresholdedGaussian2DLocationModule.movementCompute` and :meth:`ThresholdedGaussian2DLocationModule.sensoryCompute` """ |
if inputs.get("resetIn", False):
self.reset()
if self.learningMode:
# Initialize to random location after reset when learning
self.activateRandomLocation()
# send empty output
outputs["activeCells"][:] = 0
outputs["learnableCells"][:] = 0
outputs["sensoryAssociatedCells"][:] = 0
return
displacement = inputs.get("displacement", np.array([]))
anchorInput = inputs.get("anchorInput", np.array([])).nonzero()[0]
anchorGrowthCandidates = inputs.get("anchorGrowthCandidates", np.array([])).nonzero()[0]
# Concatenate the output of all modules
activeCells = np.array([], dtype=np.uint32)
learnableCells = np.array([], dtype=np.uint32)
sensoryAssociatedCells = np.array([], dtype=np.uint32)
# Only process input when data is available
shouldMove = displacement.any()
shouldSense = anchorInput.any() or anchorGrowthCandidates.any()
if shouldMove and len(displacement) != self.dimensions:
raise TypeError("displacement must have {} dimensions".format(self.dimensions))
# Handles dual phase movement/sensation processing
if self.dualPhase:
if self._sensing:
shouldMove = False
else:
shouldSense = False
# Toggle between movement and sensation
self._sensing = not self._sensing
for i in xrange(self.moduleCount):
module = self._modules[i]
# Compute movement
if shouldMove:
movement = displacement
if self.dimensions > 2:
# Project n-dimension displacements to 2D
movement = np.matmul(self._projection[i], movement)
module.movementCompute(movement)
# Compute sensation
if shouldSense:
module.sensoryCompute(anchorInput, anchorGrowthCandidates,
self.learningMode)
# Concatenate outputs
start = i * self.cellCount
activeCells = np.append(activeCells,
module.getActiveCells() + start)
learnableCells = np.append(learnableCells,
module.getLearnableCells() + start)
sensoryAssociatedCells = np.append(sensoryAssociatedCells,
module.getSensoryAssociatedCells() + start)
outputs["activeCells"][:] = 0
outputs["activeCells"][activeCells] = 1
outputs["learnableCells"][:] = 0
outputs["learnableCells"][learnableCells] = 1
outputs["sensoryAssociatedCells"][:] = 0
outputs["sensoryAssociatedCells"][sensoryAssociatedCells] = 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setParameter(self, parameterName, index, parameterValue):
""" Set the value of a Spec parameter. """ |
spec = self.getSpec()
if parameterName not in spec['parameters']:
raise Exception("Unknown parameter: " + parameterName)
setattr(self, parameterName, parameterValue) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getOutputElementCount(self, name):
""" Returns the size of the output array """ |
if name in ["activeCells", "learnableCells", "sensoryAssociatedCells"]:
return self.cellCount * self.moduleCount
else:
raise Exception("Invalid output name specified: " + name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset(self):
""" Clear all cell activity. """ |
self.L4.reset()
for module in self.L6aModules:
module.reset() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getLocationRepresentation(self):
""" Get the full population representation of the location layer. """ |
activeCells = np.array([], dtype="uint32")
totalPrevCells = 0
for module in self.L6aModules:
activeCells = np.append(activeCells,
module.getActiveCells() + totalPrevCells)
totalPrevCells += module.numberOfCells()
return activeCells |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getLearnableLocationRepresentation(self):
""" Get the cells in the location layer that should be associated with the sensory input layer representation. In some models, this is identical to the active cells. In others, it's a subset. """ |
learnableCells = np.array([], dtype="uint32")
totalPrevCells = 0
for module in self.L6aModules:
learnableCells = np.append(learnableCells,
module.getLearnableCells() + totalPrevCells)
totalPrevCells += module.numberOfCells()
return learnableCells |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def learnObject(self, objectDescription, randomLocation=False, useNoise=False, noisyTrainingTime=1):
""" Train the network to recognize the specified object. Move the sensor to one of its features and activate a random location representation in the location layer. Move the sensor over the object, updating the location representation through path integration. At each point on the object, form reciprocal connections between the represention of the location and the representation of the sensory input. @param objectDescription (dict) For example: {"name": "Object 1", "features": [{"top": 0, "left": 0, "width": 10, "height": 10, "name": "A"}, {"top": 0, "left": 10, "width": 10, "height": 10, "name": "B"}]} @return locationsAreUnique (bool) True if this object was assigned a unique set of locations. False if a location on this object has the same location representation as another location somewhere else. """ |
self.reset()
self.column.activateRandomLocation()
locationsAreUnique = True
if randomLocation or useNoise:
numIters = noisyTrainingTime
else:
numIters = 1
for i in xrange(numIters):
for iFeature, feature in enumerate(objectDescription["features"]):
self._move(feature, randomLocation=randomLocation, useNoise=useNoise)
featureSDR = self.features[feature["name"]]
self._sense(featureSDR, learn=True, waitForSettle=False)
locationRepresentation = self.column.getSensoryAssociatedLocationRepresentation()
self.locationRepresentations[(objectDescription["name"],
iFeature)].append(locationRepresentation)
self.inputRepresentations[(objectDescription["name"],
iFeature, feature["name"])] = (
self.column.L4.getWinnerCells())
locationTuple = tuple(locationRepresentation)
locationsAreUnique = (locationsAreUnique and
locationTuple not in self.representationSet)
self.representationSet.add(tuple(locationRepresentation))
self.learnedObjects.append(objectDescription)
return locationsAreUnique |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _move(self, feature, randomLocation = False, useNoise = True):
""" Move the sensor to the center of the specified feature. If the sensor is currently at another location, send the displacement into the cortical column so that it can perform path integration. """ |
if randomLocation:
locationOnObject = {
"top": feature["top"] + np.random.rand()*feature["height"],
"left": feature["left"] + np.random.rand()*feature["width"],
}
else:
locationOnObject = {
"top": feature["top"] + feature["height"]/2.,
"left": feature["left"] + feature["width"]/2.
}
if self.locationOnObject is not None:
displacement = {"top": locationOnObject["top"] -
self.locationOnObject["top"],
"left": locationOnObject["left"] -
self.locationOnObject["left"]}
if useNoise:
params = self.column.movementCompute(displacement,
self.noiseFactor,
self.moduleNoiseFactor)
else:
params = self.column.movementCompute(displacement, 0, 0)
for monitor in self.monitors.values():
monitor.afterLocationShift(**params)
else:
for monitor in self.monitors.values():
monitor.afterLocationInitialize()
self.locationOnObject = locationOnObject
for monitor in self.monitors.values():
monitor.afterLocationChanged(locationOnObject) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sense(self, featureSDR, learn, waitForSettle):
""" Send the sensory input into the network. Optionally, send it multiple times until the network settles. """ |
for monitor in self.monitors.values():
monitor.beforeSense(featureSDR)
iteration = 0
prevCellActivity = None
while True:
(inputParams,
locationParams) = self.column.sensoryCompute(featureSDR, learn)
if waitForSettle:
cellActivity = (set(self.column.getSensoryRepresentation()),
set(self.column.getLocationRepresentation()))
if cellActivity == prevCellActivity:
# It settled. Don't even log this timestep.
break
prevCellActivity = cellActivity
for monitor in self.monitors.values():
if iteration > 0:
monitor.beforeSensoryRepetition()
monitor.afterInputCompute(**inputParams)
monitor.afterLocationAnchor(**locationParams)
iteration += 1
if not waitForSettle or iteration >= self.maxSettlingTime:
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def createObjectMachine(machineType, **kwargs):
""" Return an object machine of the appropriate type. @param machineType (str) A supported ObjectMachine type @param kwargs (dict) Constructor argument for the class that will be instantiated. Keyword parameters specific to each model type should be passed in here. """ |
if machineType not in ObjectMachineTypes.getTypes():
raise RuntimeError("Unknown model type: " + machineType)
return getattr(ObjectMachineTypes, machineType)(**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getEntropies(m):
""" Recursively get the current and max entropies from every child module :param m: any module :return: (currentEntropy, maxEntropy) """ |
entropy = 0.0
max_entropy = 0.0
for module in m.children():
e, m = getEntropies(module)
entropy += e
max_entropy += m
e, m = getEntropy(m)
entropy += e
max_entropy += m
return entropy, max_entropy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def updateBoostStrength(m):
""" Function used to update KWinner modules boost strength after each epoch. Call using :meth:`torch.nn.Module.apply` after each epoch if required For example: ``m.apply(updateBoostStrength)`` :param m: KWinner module """ |
if isinstance(m, KWinnersBase):
if m.training:
m.boostStrength = m.boostStrength * m.boostStrengthFactor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def updateBoostStrength(self):
""" Update boost strength using given strength factor during training """ |
if self.training:
self.boostStrength = self.boostStrength * self.boostStrengthFactor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def entropy(self):
""" Returns the current total entropy of this layer """ |
if self.k < self.n:
_, entropy = binaryEntropy(self.dutyCycle)
return entropy
else:
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def greedySensorPositions(numSensors, numLocations):
""" Returns an infinite sequence of sensor placements. Each return value is a tuple of locations, one location per sensor. Positions are selected using a simple greedy algorithm. The first priority of the algorithm is "touch every position an equal number of times". The second priority is "each individual sensor should touch each position an equal number of times". @param numSensors (int) The number of sensors @param numLocations (int) The number of locations @return (generator of tuples) The next locations for each sensor. The tuple's length is `numSensors`. """ |
locationViewCounts = [0] * numLocations
locationViewCountsBySensor = [[0] * numLocations
for _ in xrange(numSensors)]
placement = random.sample(xrange(numLocations), numSensors)
while True:
yield tuple(placement)
# Update statistics.
for sensor, location in enumerate(placement):
locationViewCounts[location] += 1
locationViewCountsBySensor[sensor][location] += 1
# Choose the locations with the lowest view counts. Break ties randomly.
nextLocationsRanked = sorted(xrange(numLocations),
key=lambda x: (locationViewCounts[x],
random.random()))
nextLocations = nextLocationsRanked[:numSensors]
# For each sensor (in random order), choose the location that has touched
# the least, breaking ties randomly.
sensors = range(numSensors)
random.shuffle(sensors)
for sensor in sensors:
viewCount = min(locationViewCountsBySensor[sensor][location]
for location in nextLocations)
location = random.choice([x for x in nextLocations
if locationViewCountsBySensor[sensor][x]
== viewCount])
nextLocations.remove(location)
placement[sensor] = location |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialize(self):
""" Initialize the self._tm if not already initialized. """ |
if self._tm is None:
params = {
"columnCount": self.columnCount,
"basalInputSize": self.basalInputWidth,
"apicalInputSize": self.apicalInputWidth,
"cellsPerColumn": self.cellsPerColumn,
"activationThreshold": self.activationThreshold,
"initialPermanence": self.initialPermanence,
"connectedPermanence": self.connectedPermanence,
"minThreshold": self.minThreshold,
"sampleSize": self.sampleSize,
"permanenceIncrement": self.permanenceIncrement,
"permanenceDecrement": self.permanenceDecrement,
"basalPredictedSegmentDecrement": self.basalPredictedSegmentDecrement,
"apicalPredictedSegmentDecrement": self.apicalPredictedSegmentDecrement,
"maxSynapsesPerSegment": self.maxSynapsesPerSegment,
"seed": self.seed,
}
if self.implementation == "ApicalTiebreakCPP":
params["learnOnOneCell"] = self.learnOnOneCell
params["maxSegmentsPerCell"] = self.maxSegmentsPerCell
import htmresearch_core.experimental
cls = htmresearch_core.experimental.ApicalTiebreakPairMemory
elif self.implementation == "ApicalTiebreak":
params["reducedBasalThreshold"] = self.reducedBasalThreshold
import htmresearch.algorithms.apical_tiebreak_temporal_memory
cls = htmresearch.algorithms.apical_tiebreak_temporal_memory.ApicalTiebreakPairMemory
elif self.implementation == "ApicalDependent":
params["reducedBasalThreshold"] = self.reducedBasalThreshold
import htmresearch.algorithms.apical_dependent_temporal_memory
cls = htmresearch.algorithms.apical_dependent_temporal_memory.TripleMemory
else:
raise ValueError("Unrecognized implementation %s" % self.implementation)
self._tm = cls(**params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getOutputElementCount(self, name):
""" Return the number of elements for the given output. """ |
if name in ["activeCells", "predictedCells", "predictedActiveCells",
"winnerCells"]:
return self.cellsPerColumn * self.columnCount
else:
raise Exception("Invalid output name specified: %s" % name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def learnSequences(self, sequences):
""" Learns all provided sequences. Always reset the network in between sequences. Sequences format: sequences = [ [ set([16, 22, 32]), # S0, position 0 set([13, 15, 33]) # S0, position 1 ], [ set([6, 12, 52]), # S1, position 0 set([6, 2, 15]) # S1, position 1 ], ] Note that the order of each sequence is important. It denotes the sequence number and will be used during inference to plot accuracy. Parameters: @param sequences (list) Sequences to learn, in the canonical format specified above """ |
# This method goes through four phases:
# 1) We first train L4 on the sequences, over multiple passes
# 2) We then train L2 in one pass.
# 3) We then continue training on L4 so the apical segments learn
# 4) We run inference to store L2 representations for each sequence
# retrieve L2 representations
# print "1) Train L4 sequence memory"
# We're now using online learning, so both layers should be trying to learn
# at all times.
sequence_order = range(len(sequences))
if self.config["L2Params"]["onlineLearning"]:
# Train L2 and L4
self._setLearningMode(l4Learning=True, l2Learning=True)
for _ in xrange(self.numLearningPoints):
random.shuffle(sequence_order)
for i in sequence_order:
sequence = sequences[i]
for s in sequence:
self.sensorInputs[0].addDataToQueue(list(s), 0, 0)
self.network.run(1)
# This is equivalent to, and faster than, giving the network no input
# for a period of time.
self.sendReset()
else:
# Train L4
self._setLearningMode(l4Learning=True, l2Learning=False)
for _ in xrange(self.numLearningPoints):
random.shuffle(sequence_order)
for i in sequence_order:
sequence = sequences[i]
for s in sequence:
self.sensorInputs[0].addDataToQueue(list(s), 0, 0)
self.network.run(1)
# This is equivalent to, and faster than, giving the network no input
# for a period of time.
self.sendReset()
# Train L2
self._setLearningMode(l4Learning=False, l2Learning=True)
for i in sequence_order:
sequence = sequences[i]
for s in sequence:
self.sensorInputs[0].addDataToQueue(list(s), 0, 0)
self.network.run(1)
self.sendReset()
# Train L4 apical segments
self._setLearningMode(l4Learning=True, l2Learning=False)
for _ in xrange(5):
for i in sequence_order:
sequence = sequences[i]
for s in sequence:
self.sensorInputs[0].addDataToQueue(list(s), 0, 0)
self.network.run(1)
self.sendReset()
self._setLearningMode(l4Learning=False, l2Learning=False)
self.sendReset()
for sequenceNum, sequence in enumerate(sequences):
for s in sequence:
self.sensorInputs[0].addDataToQueue(list(s), 0, 0)
self.network.run(1)
self.objectL2Representations[sequenceNum] = self.getL2Representations()
self.sendReset()
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getL4PredictedActiveCells(self):
""" Returns the predicted active cells in each column in L4. """ |
predictedActive = []
for i in xrange(self.numColumns):
region = self.network.regions["L4Column_" + str(i)]
predictedActive.append(
region.getOutputData("predictedActiveCells").nonzero()[0])
return predictedActive |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setLearningMode(self, l4Learning = False, l2Learning=False):
""" Sets the learning mode for L4 and L2. """ |
for column in self.L4Columns:
column.setParameter("learn", 0, l4Learning)
for column in self.L2Columns:
column.setParameter("learningMode", 0, l2Learning) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def printDiagnosticsAfterTraining(exp, verbosity=0):
"""Useful diagnostics a trained system for debugging.""" |
print "Number of connected synapses per cell"
l2 = exp.getAlgorithmInstance("L2")
numConnectedCells = 0
connectedSynapses = 0
for c in range(4096):
cp = l2.numberOfConnectedProximalSynapses([c])
if cp>0:
# print c, ":", cp
numConnectedCells += 1
connectedSynapses += cp
print "Num L2 cells with connected synapses:", numConnectedCells
if numConnectedCells > 0:
print "Avg connected synapses per connected cell:", float(connectedSynapses)/numConnectedCells
print |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def trainSequences(sequences, exp, idOffset=0):
"""Train the network on all the sequences""" |
for seqId in sequences:
# Make sure we learn enough times to deal with high order sequences and
# remove extra predictions.
iterations = 3*len(sequences[seqId])
for p in range(iterations):
# Ensure we generate new random location for each sequence presentation
s = sequences.provideObjectsToLearn([seqId])
objectSDRs = dict()
objectSDRs[seqId + idOffset] = s[seqId]
exp.learnObjects(objectSDRs, reset=False)
# TM needs reset between sequences, but not other regions
exp.TMColumns[0].reset()
# L2 needs resets when we switch to new object
exp.sendReset() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def trainObjects(objects, exp, numRepeatsPerObject, experimentIdOffset=0):
""" Train the network on all the objects by randomly traversing points on each object. We offset the id of each object to avoid confusion with any sequences that might have been learned. """ |
# We want to traverse the features of each object randomly a few times before
# moving on to the next object. Create the SDRs that we need for this.
objectsToLearn = objects.provideObjectsToLearn()
objectTraversals = {}
for objectId in objectsToLearn:
objectTraversals[objectId + experimentIdOffset] = objects.randomTraversal(
objectsToLearn[objectId], numRepeatsPerObject)
# Train the network on all the SDRs for all the objects
exp.learnObjects(objectTraversals) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inferObject(exp, objectId, objects, objectName):
""" Run inference on the given object. objectName is the name of this object in the experiment. """ |
# Create sequence of random sensations for this object for one column. The
# total number of sensations is equal to the number of points on the object.
# No point should be visited more than once.
objectSensations = {}
objectSensations[0] = []
obj = objects[objectId]
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
for pair in objectCopy:
objectSensations[0].append(pair)
inferConfig = {
"numSteps": len(objectSensations[0]),
"pairs": objectSensations,
"includeRandomLocation": False,
}
inferenceSDRs = objects.provideObjectToInfer(inferConfig)
exp.infer(inferenceSDRs, objectName=objectName) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def createSuperimposedSensorySDRs(sequenceSensations, objectSensations):
""" Given two lists of sensations, create a new list where the sensory SDRs are union of the individual sensory SDRs. Keep the location SDRs from the object. A list of sensations has the following format: [ { 0: (set([1, 5, 10]), set([6, 12, 52]), # location, feature for CC0 }, { 0: (set([5, 46, 50]), set([8, 10, 11]), # location, feature for CC0 }, ] We assume there is only one cortical column, and that the two input lists have identical length. """ |
assert len(sequenceSensations) == len(objectSensations)
superimposedSensations = []
for i, objectSensation in enumerate(objectSensations):
# print "sequence loc:", sequenceSensations[i][0][0]
# print "object loc: ",objectSensation[0][0]
# print
# print "sequence feat:", sequenceSensations[i][0][1]
# print "object feat: ",objectSensation[0][1]
# print
newSensation = {
0: (objectSensation[0][0],
sequenceSensations[i][0][1].union(objectSensation[0][1]))
}
# newSensation = {
# 0: (objectSensation[0][0],objectSensation[0][1])
# }
superimposedSensations.append(newSensation)
# print newSensation
# print
# print
return superimposedSensations |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def runExperimentPool(numSequences, numFeatures, numLocations, numObjects, numWorkers=7, nTrials=1, seqLength=10, figure="", numRepetitions=1, synPermProximalDecL2=[0.001], minThresholdProximalL2=[10], sampleSizeProximalL2=[15], inputSize=[1024], basalPredictedSegmentDecrement=[0.0006], resultsName="convergence_results.pkl"):
""" Run a bunch of experiments using a pool of numWorkers multiple processes. For numSequences, numFeatures, and numLocations pass in a list containing valid values for that parameter. The cross product of everything is run, and each combination is run nTrials times. Returns a list of dict containing detailed results from each experiment. Also pickles and saves all the results in resultsName for later analysis. If numWorkers == 1, the experiments will be run in a single thread. This makes it easier to debug. Example: results = runExperimentPool( numSequences=[10, 20], numFeatures=[5, 13], numWorkers=8, nTrials=5) """ |
# Create function arguments for every possibility
args = []
for bd in basalPredictedSegmentDecrement:
for i in inputSize:
for thresh in minThresholdProximalL2:
for dec in synPermProximalDecL2:
for s in sampleSizeProximalL2:
for o in reversed(numSequences):
for l in numLocations:
for f in numFeatures:
for no in numObjects:
for t in range(nTrials):
args.append(
{"numSequences": o,
"numFeatures": f,
"numObjects": no,
"trialNum": t,
"seqLength": seqLength,
"numLocations": l,
"sampleSizeProximalL2": s,
"synPermProximalDecL2": dec,
"minThresholdProximalL2": thresh,
"numRepetitions": numRepetitions,
"figure": figure,
"inputSize": i,
"basalPredictedSegmentDecrement": bd,
}
)
print "{} experiments to run, {} workers".format(len(args), numWorkers)
# Run the pool
if numWorkers > 1:
pool = Pool(processes=numWorkers)
result = pool.map(runExperiment, args)
else:
result = []
for arg in args:
result.append(runExperiment(arg))
# Pickle results for later use
with open(resultsName,"wb") as f:
cPickle.dump(result,f)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def runExperiment5A(dirName):
""" This runs the first experiment in the section "Simulations with Sensorimotor Sequences", an example sensorimotor sequence. """ |
# Results are put into a pkl file which can be used to generate the plots.
# dirName is the absolute path where the pkl file will be placed.
resultsFilename = os.path.join(dirName, "sensorimotor_sequence_example.pkl")
results = runExperiment(
{
"numSequences": 0,
"seqLength": 10,
"numFeatures": 100,
"trialNum": 4,
"numObjects": 50,
"numLocations": 100,
}
)
# Pickle results for plotting and possible later debugging
with open(resultsFilename, "wb") as f:
cPickle.dump(results, f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def runExperiment5B(dirName):
""" This runs the second experiment in the section "Simulations with Sensorimotor Sequences". It averages over many parameter combinations. This experiment could take several hours. You can run faster versions by reducing the number of trials. """ |
# Results are put into a pkl file which can be used to generate the plots.
# dirName is the absolute path where the pkl file will be placed.
resultsName = os.path.join(dirName, "sensorimotor_batch_results_more_objects.pkl")
# We run 10 trials for each column number and then analyze results
numTrials = 10
featureRange = [10, 50, 100, 150, 500]
objectRange = [110, 130, 200, 300]
locationRange = [100]
# Comment this out if you are re-running analysis on already saved results.
# Very useful for debugging the plots
runExperimentPool(
numSequences=[0],
numObjects=objectRange,
numFeatures=featureRange,
numLocations=locationRange,
nTrials=numTrials,
numWorkers=cpu_count() - 1,
numRepetitions=10,
resultsName=resultsName) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def runExperiment6(dirName):
""" This runs the experiment the section "Simulations with Combined Sequences", an example stream containing a mixture of temporal and sensorimotor sequences. """ |
# Results are put into a pkl file which can be used to generate the plots.
# dirName is the absolute path where the pkl file will be placed.
resultsFilename = os.path.join(dirName, "combined_results.pkl")
results = runExperiment(
{
"numSequences": 50,
"seqLength": 10,
"numObjects": 50,
"numFeatures": 500,
"trialNum": 8,
"numLocations": 100,
"settlingTime": 1,
"figure": "6",
"numRepetitions": 30,
"basalPredictedSegmentDecrement": 0.001,
"stripStats": False,
}
)
# Pickle results for plotting and possible later debugging
with open(resultsFilename, "wb") as f:
cPickle.dump(results, f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def runExperimentS(dirName):
""" This runs an experiment where the network is trained on stream containing a mixture of temporal and sensorimotor sequences. """ |
# Results are put into a pkl file which can be used to generate the plots.
# dirName is the absolute path where the pkl file will be placed.
resultsFilename = os.path.join(dirName, "superimposed_training.pkl")
results = runExperiment(
{
"numSequences": 50,
"numObjects": 50,
"seqLength": 10,
"numFeatures": 100,
"trialNum": 8,
"numLocations": 100,
"numRepetitions": 30,
"sampleSizeProximalL2": 15,
"minThresholdProximalL2": 10,
"figure": "S",
"stripStats": False,
}
)
# Pickle results for plotting and possible later debugging
with open(resultsFilename, "wb") as f:
cPickle.dump(results, f)
# Debugging
with open(resultsFilename, "rb") as f:
r = cPickle.load(f)
r.pop("objects")
r.pop("sequences")
stat = r.pop("statistics")
pprint.pprint(r)
sObject = 0
sSequence = 0
for i in range(0, 50):
sObject += sum(stat[i]['L4 PredictedActive C0'])
for i in range(50, 100):
sSequence += sum(stat[i]['L4 PredictedActive C0'])
print sObject, sSequence |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def runExperimentSP(dirName):
""" This runs a pool of experiments where the network is trained on stream containing a mixture of temporal and sensorimotor sequences. """ |
# Results are put into a pkl file which can be used to generate the plots.
# dirName is the absolute path where the pkl file will be placed.
resultsFilename = os.path.join(dirName, "superimposed_128mcs.pkl")
# We run a bunch of trials with these combinations
numTrials = 10
featureRange = [1000]
objectRange = [50]
# Comment this out if you are re-running analysis on already saved results.
runExperimentPool(
numSequences=objectRange,
numObjects=objectRange,
numFeatures=featureRange,
numLocations=[100],
nTrials=numTrials,
numWorkers=cpu_count() - 1,
resultsName=resultsFilename,
figure="S",
numRepetitions=30,
sampleSizeProximalL2=[15],
minThresholdProximalL2=[10],
synPermProximalDecL2=[0.001],
# basalPredictedSegmentDecrement=[0.0, 0.001, 0.002, 0.003, 0.004, 0.005],
basalPredictedSegmentDecrement=[0.0, 0.001, 0.002, 0.003, 0.004, 0.005, 0.01, 0.02, 0.04, 0.08, 0.12],
inputSize=[128],
)
print "Done with experiments" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset(self,):
""" Reset the state of all cells. This is normally used between sequences while training. All internal states are reset to 0. """ |
self.activeState['t-1'].fill(0)
self.activeState['t'].fill(0)
self.predictedState['t-1'].fill(0)
self.predictedState['t'].fill(0)
self.learnState['t-1'].fill(0)
self.learnState['t'].fill(0)
self.confidence['t-1'].fill(0)
self.confidence['t'].fill(0)
# Flush the segment update queue
self.segmentUpdates = {}
self._internalStats['nInfersSinceReset'] = 0
#To be removed
self._internalStats['curPredictionScore'] = 0
#New prediction score
self._internalStats['curPredictionScore2'] = 0
self._internalStats['curFalseNegativeScore'] = 0
self._internalStats['curFalsePositiveScore'] = 0
self._internalStats['curMissing'] = 0
self._internalStats['curExtra'] = 0
# When a reset occurs, set prevSequenceSignature to the signature of the
# just-completed sequence and start accumulating histogram for the next
# sequence.
self._internalStats['prevSequenceSignature'] = None
if self.collectSequenceStats:
if self._internalStats['confHistogram'].sum() > 0:
sig = self._internalStats['confHistogram'].copy()
sig.reshape(self.numberOfCols * self.cellsPerColumn)
self._internalStats['prevSequenceSignature'] = sig
self._internalStats['confHistogram'].fill(0)
self.resetCalled = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def printComputeEnd(self, output, learn=False):
""" Called at the end of inference to print out various diagnostic information based on the current verbosity level. """ |
if self.verbosity >= 3:
print "----- computeEnd summary: "
print "numBurstingCols: %s, " % (self.activeState['t'].min(axis=1).sum()),
print "curPredScore2: %s, " % (self._internalStats['curPredictionScore2']),
print "curFalsePosScore: %s, " % (self._internalStats['curFalsePositiveScore']),
print "1-curFalseNegScore: %s, " % (1-self._internalStats['curFalseNegativeScore']),
print "numPredictedCells[t-1]: %s" % (self.predictedState['t-1'].sum()),
print "numSegments: ",self.getNumSegments()
print "----- activeState (%d on) ------" \
% (self.activeState['t'].sum())
self.printActiveIndices(self.activeState['t'])
if self.verbosity >= 5:
self.printState(self.activeState['t'])
print "----- predictedState (%d on)-----" \
% (self.predictedState['t'].sum())
self.printActiveIndices(self.predictedState['t'])
if self.verbosity >= 5:
self.printState(self.predictedState['t'])
print "----- cell confidence -----"
self.printActiveIndices(self.confidence['t'], andValues=True)
if self.verbosity >= 5:
self.printConfidence(self.confidence['t'])
print "----- confidence[t-1] for currently active cells -----"
cc = self.confidence['t-1'] * self.activeState['t']
self.printActiveIndices(cc, andValues=True)
if self.verbosity == 4:
print "Cells, predicted segments only:"
self.printCells(predictedOnly=True)
elif self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
print |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def computePhase2(self, doLearn=False):
""" This is the phase 2 of learning, inference and multistep prediction. During this phase, all the cell with lateral support have their predictedState turned on and the firing segments are queued up for updates. Parameters: doLearn: Boolean flag to queue segment updates during learning retval: ? """ |
# Phase 2: compute predicted state for each cell
# - if a segment has enough horizontal connections firing because of
# bottomUpInput, it's set to be predicting, and we queue up the segment
# for reinforcement,
# - if pooling is on, try to find the best weakly activated segment to
# reinforce it, else create a new pooling segment.
for c in xrange(self.numberOfCols):
buPredicted = False # whether any cell in the column is predicted
for i in xrange(self.cellsPerColumn):
# Iterate over each of the segments of this cell
maxConfidence = 0
for s in self.cells[c][i]:
# sum(connected synapses) >= activationThreshold?
if self.isSegmentActive(s, self.activeState['t']):
self.predictedState['t'][c,i] = 1
buPredicted = True
maxConfidence = max(maxConfidence, s.dutyCycle(readOnly=True))
if doLearn:
s.totalActivations += 1 # increment activationFrequency
s.lastActiveIteration = self.iterationIdx
# mark this segment for learning
activeUpdate = self.getSegmentActiveSynapses(c,i,s,'t')
activeUpdate.phase1Flag = False
self.addToSegmentUpdates(c, i, activeUpdate)
# Store the max confidence seen among all the weak and strong segments
# as the cell's confidence.
self.confidence['t'][c,i] = maxConfidence |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def columnConfidences(self, cellConfidences=None):
""" Compute the column confidences given the cell confidences. If None is passed in for cellConfidences, it uses the stored cell confidences from the last compute. Parameters: cellConfidencs : cell confidences to use, or None to use the the current cell confidences. retval: : Column confidence scores. """ |
if cellConfidences is None:
cellConfidences = self.confidence['t']
colConfidences = cellConfidences.sum(axis=1)
# Make the max column confidence 1.0
#colConfidences /= colConfidences.max()
return colConfidences |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def trimSegments(self, minPermanence=None, minNumSyns=None):
""" This method deletes all synapses whose permanence is less than minPermanence and deletes any segments that have less than minNumSyns synapses remaining. Parameters: minPermanence: Any syn whose permamence is 0 or < minPermanence will be deleted. If None is passed in, then self.connectedPerm is used. minNumSyns: Any segment with less than minNumSyns synapses remaining in it will be deleted. If None is passed in, then self.activationThreshold is used. retval: (numSegsRemoved, numSynsRemoved) """ |
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all cells
totalSegsRemoved, totalSynsRemoved = 0, 0
for c,i in product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)):
(segsRemoved, synsRemoved) = self.trimSegmentsInCell(colIdx=c, cellIdx=i,
segList=self.cells[c][i], minPermanence=minPermanence,
minNumSyns=minNumSyns)
totalSegsRemoved += segsRemoved
totalSynsRemoved += synsRemoved
return totalSegsRemoved, totalSynsRemoved |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def checkPrediction2(self, patternNZs, output=None, confidence=None, details=False):
""" This function will replace checkPrediction. This function produces goodness-of-match scores for a set of input patterns, by checking for their presense in the current and predicted output of the TP. Returns a global count of the number of extra and missing bits, the confidence scores for each input pattern, and (if requested) the bits in each input pattern that were not present in the TP's prediction. todo: Add option to check predictedState only. Parameters: ========== patternNZs: a list of input patterns that we want to check for. Each element is a list of the non-zeros in that pattern. output: The output of the TP. If not specified, then use the TP's current output. This can be specified if you are trying to check the prediction metric for an output from the past. confidence: The cell confidences. If not specified, then use the TP's current self.confidence. This can be specified if you are trying to check the prediction metrics for an output from the past. details: if True, also include details of missing bits per pattern. Return value: ============ The following list is returned: [ totalExtras, totalMissing, ] totalExtras: a global count of the number of 'extras', i.e. bits that are on in the current output but not in the or of all the passed in patterns totalMissing: a global count of all the missing bits, i.e. the bits that are on in the or of the patterns, but not in the current output conf_i the confidence score for the i'th pattern in patternsToCheck missing_i the bits in the i'th pattern that were missing in the output. This list is only returned if details is True. """ |
# Get the non-zeros in each pattern
numPatterns = len(patternNZs)
# Compute the union of all the expected patterns
orAll = set()
orAll = orAll.union(*patternNZs)
# Get the list of active columns in the output
if output is None:
assert self.currentOutput is not None
output = self.currentOutput
output = set(output.sum(axis=1).nonzero()[0])
# Compute the total extra and missing in the output
totalExtras = len(output.difference(orAll))
totalMissing = len(orAll.difference(output))
# Get the percent confidence level per column by summing the confidence levels
# of the cells in the column. During training, each segment's confidence
# number is computed as a running average of how often it correctly
# predicted bottom-up activity on that column. A cell's confidence number
# is taken from the first active segment found in the cell. Note that
# confidence will only be non-zero for predicted columns.
if confidence is None:
confidence = self.confidence['t']
# Set the column confidence to be the max of the cell confidences in that
# column.
colConfidence = self.columnConfidences(confidence)
# Assign confidences to each pattern
confidences = []
for i in xrange(numPatterns):
# Sum of the column confidences for this pattern
positivePredictionSum = colConfidence[patternNZs[i]].sum()
# How many columns in this pattern
positiveColumnCount = len(patternNZs[i])
# Sum of all the column confidences
totalPredictionSum = colConfidence.sum()
# Total number of columns
totalColumnCount = len(colConfidence)
negativePredictionSum = totalPredictionSum - positivePredictionSum
negativeColumnCount = totalColumnCount - positiveColumnCount
# Compute the average confidence score per column for this pattern
if positiveColumnCount != 0:
positivePredictionScore = positivePredictionSum/positiveColumnCount
else:
positivePredictionScore = 0.0
# Compute the average confidence score per column for the other patterns
if negativeColumnCount != 0:
negativePredictionScore = negativePredictionSum/negativeColumnCount
else:
negativePredictionScore = 0.0
predictionScore = positivePredictionScore - negativePredictionScore
confidences.append((predictionScore,
positivePredictionScore,
negativePredictionScore))
# Include detail? (bits in each pattern that were missing from the output)
if details:
missingPatternBits = [set(pattern).difference(output) \
for pattern in patternNZs]
return (totalExtras, totalMissing, confidences, missingPatternBits)
else:
return (totalExtras, totalMissing, confidences) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isSegmentActive(self, seg, activeState):
""" A segment is active if it has >= activationThreshold connected synapses that are active due to activeState. Notes: studied various cutoffs, none of which seem to be worthwhile list comprehension didn't help either """ |
# Computing in C - *much* faster
return isSegmentActive(seg.syns, activeState,
self.connectedPerm, self.activationThreshold) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getActiveSegment(self, c, i, timeStep):
""" For a given cell, return the segment with the strongest _connected_ activation, i.e. sum up the activations of the connected synapses of the segments only. That is, a segment is active only if it has enough connected synapses. """ |
# todo: put back preference for sequence segments.
nSegments = len(self.cells[c][i])
bestActivation = self.activationThreshold
which = -1
for j,s in enumerate(self.cells[c][i]):
activity = self.getSegmentActivityLevel(s, self.activeState[timeStep], connectedSynapsesOnly = True)
if activity >= bestActivation:
bestActivation = activity
which = j
if which != -1:
return self.cells[c][i][which]
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def chooseCellsToLearnFrom(self, c,i,s, n, timeStep):
"""Choose n random cells to learn from. Returns tuples of (column index, cell index). """ |
if n <= 0:
return []
tmpCandidates = [] # tmp because we'll refine just below with activeSynapses
if timeStep == 't-1':
tmpCandidates = numpy.where(self.learnState['t-1'] == 1)
else:
tmpCandidates = numpy.where(self.learnState['t'] == 1)
# Candidates can be empty at this point, in which case we return
# an empty segment list. adaptSegments will do nothing when getting
# that list.
if len(tmpCandidates[0]) == 0:
return []
if s is None: # new segment
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1])]
else:
# We exclude any synapse that is already in this segment.
synapsesAlreadyInSegment = set((syn[0], syn[1]) for syn in s.syns)
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1]) \
if (syn[0], syn[1]) not in synapsesAlreadyInSegment]
if n == 1: # so that we don't shuffle if only one is needed
idx = self._random.getUInt32(len(cands))
return [cands[idx]] # col and cell idx in col
# If we need more than one candidate
self._random.getUInt32(10) # this required to line RNG with C++ (??)
indices = array([j for j in range(len(cands))], dtype='uint32')
tmp = zeros(min(n, len(indices)), dtype='uint32')
self._random.getUInt32Sample(indices, tmp, True)
return [cands[j] for j in tmp] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getBestMatchingCell(self, c, activeState):
"""Find weakly activated cell in column. Returns index and segment of most activated segment above minThreshold. """ |
# Collect all cells in column c that have at least minThreshold in the most
# activated segment
bestActivityInCol = self.minThreshold
bestSegIdxInCol = -1
bestCellInCol = -1
for i in xrange(self.cellsPerColumn):
maxSegActivity = 0
maxSegIdx = 0
for j,s in enumerate(self.cells[c][i]):
activity = self.getSegmentActivityLevel(s, activeState, connectedSynapsesOnly =False)
if self.verbosity >= 6:
print " Segment Activity for column ", c, " cell ", i, " segment ", " j is ", activity
if activity > maxSegActivity:
maxSegActivity = activity
maxSegIdx = j
if maxSegActivity >= bestActivityInCol:
bestActivityInCol = maxSegActivity
bestSegIdxInCol = maxSegIdx
bestCellInCol = i
if self.verbosity >= 6:
print "Best Matching Cell In Col: ", bestCellInCol
if bestCellInCol == -1:
return (None, None)
else:
return bestCellInCol, self.cells[c][bestCellInCol][bestSegIdxInCol] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getBestMatchingSegment(self, c, i, activeState):
"""For the given cell, find the segment with the largest number of active synapses. This routine is aggressive in finding the best match. The permanence value of synapses is allowed to be below connectedPerm. The number of active synapses is allowed to be below activationThreshold, but must be above minThreshold. The routine returns the segment index. If no segments are found, then an index of -1 is returned. """ |
maxActivity, which = self.minThreshold, -1
for j,s in enumerate(self.cells[c][i]):
activity = self.getSegmentActivityLevel(s, activeState,
connectedSynapsesOnly=False)
if activity >= maxActivity:
maxActivity, which = activity, j
if which == -1:
return None
else:
return self.cells[c][i][which] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getLeastUsedCell(self, c):
"""For the least used cell in a column""" |
segmentsPerCell = numpy.zeros(self.cellsPerColumn, dtype='uint32')
for i in range(self.cellsPerColumn):
segmentsPerCell[i] = self.getNumSegmentsInCell(c,i)
cellMinUsage = numpy.where(segmentsPerCell==segmentsPerCell.min())[0]
# return cellMinUsage[0] # return the first cell with minimum usage
# if multiple cells has minimum usage, randomly pick one
self._random.getUInt32(len(cellMinUsage))
return cellMinUsage[self._random.getUInt32(len(cellMinUsage))] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def updateSynapses(self, segment, synapses, delta):
"""Update a set of synapses of the given segment, delta can be permanenceInc, or permanenceDec. retval: True if synapse reached 0 """ |
reached0 = False
if delta > 0:
for synapse in synapses:
segment[synapse][2] = newValue = segment[synapse][2] + delta
# Cap synapse permanence at permanenceMax
if newValue > self.permanenceMax:
segment[synapse][2] = self.permanenceMax
else:
for synapse in synapses:
segment[synapse][2] = newValue = segment[synapse][2] + delta
# Cap min synapse permanence to 0 in case there is no global decay
if newValue < 0:
segment[synapse][2] = 0
reached0 = True
return reached0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def adaptSegment(self, segUpdate, positiveReinforcement):
"""This function applies segment update information to a segment in a cell. If positiveReinforcement is true then synapses on the active list get their permanence counts incremented by permanenceInc. All other synapses get their permanence counts decremented by permanenceDec. If positiveReinforcement is false, then synapses on the active list get their permanence counts decremented by permanenceDec. After this step, any synapses in segmentUpdate that do yet exist get added with a permanence count of initialPerm. Parameters: segUpdate: SegmentUpdate instance positiveReinforcement: True for positive enforcement, false for negative re-enforcement retval: True if some synapses were decremented to 0 and the segment is a candidate for trimming """ |
# This will be set to True if detect that any syapses were decremented to
# 0
trimSegment = False
# segUpdate.segment is None when creating a new segment
c, i, segment = segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segment
# update.activeSynapses can be empty.
# If not, it can contain either or both integers and tuples.
# The integers are indices of synapses to update.
# The tuples represent new synapses to create (src col, src cell in col).
# We pre-process to separate these various element types.
# synToCreate is not empty only if positiveReinforcement is True.
# NOTE: the synapse indices start at *1* to skip the segment flags.
activeSynapses = segUpdate.activeSynapses
synToUpdate = set([syn for syn in activeSynapses if type(syn) == int])
if segment is not None: # modify an existing segment
if positiveReinforcement:
if self.verbosity >= 4:
print "Reinforcing segment for cell[%d,%d]" %(c,i),
segment.printSegment()
# Update frequency and positiveActivations
segment.positiveActivations += 1 # positiveActivations += 1
segment.dutyCycle(active=True)
# First, decrement synapses that are not active
# s is a synapse *index*, with index 0 in the segment being the tuple
# (segId, sequence segment flag). See below, creation of segments.
lastSynIndex = len(segment.syns) - 1
inactiveSynIndices = [s for s in xrange(0, lastSynIndex+1) \
if s not in synToUpdate]
trimSegment = segment.updateSynapses(inactiveSynIndices,
-self.permanenceDec)
# Now, increment active synapses
activeSynIndices = [syn for syn in synToUpdate if syn <= lastSynIndex]
segment.updateSynapses(activeSynIndices, self.permanenceInc)
# Finally, create new synapses if needed
# syn is now a tuple (src col, src cell)
synsToAdd = [syn for syn in activeSynapses if type(syn) != int]
for newSyn in synsToAdd:
segment.addSynapse(newSyn[0], newSyn[1], self.initialPerm)
if self.verbosity >= 4:
print " after",
segment.printSegment()
else: # positiveReinforcement is False
desc = ""
if self.verbosity >= 4:
print "Negatively Reinforcing %s segment for cell[%d,%d]" \
% (desc, c,i),
segment.printSegment()
# Decrease frequency count
segment.dutyCycle(active=True)
# We decrement all the "active" that were passed in
trimSegment = segment.updateSynapses(synToUpdate,
-self.permanenceDec)
if self.verbosity >= 4:
print " after",
segment.printSegment()
else: # segment is None: create a new segment
newSegment = Segment(tp=self, isSequenceSeg=segUpdate.sequenceSegment)
# numpy.float32 important so that we can match with C++
for synapse in activeSynapses:
newSegment.addSynapse(synapse[0], synapse[1], self.initialPerm)
if self.verbosity >= 3:
print "New segment for cell[%d,%d]" %(c,i),
newSegment.printSegment()
self.cells[c][i].append(newSegment)
return trimSegment |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getSegmentInfo(self, collectActiveData = False):
"""Returns information about the distribution of segments, synapses and permanence values in the current TP. If requested, also returns information regarding the number of currently active segments and synapses. The method returns the following tuple: ( nSegments, # total number of segments nSynapses, # total number of synapses nActiveSegs, # total no. of active segments nActiveSynapses, # total no. of active synapses distSegSizes, # a dict where d[n] = number of segments with n synapses distNSegsPerCell, # a dict where d[n] = number of cells with n segments distPermValues, # a dict where d[p] = number of synapses with perm = p/10 distAges, # a list of tuples (ageRange, numSegments) ) nActiveSegs and nActiveSynapses are 0 if collectActiveData is False """ |
nSegments, nSynapses = 0, 0
nActiveSegs, nActiveSynapses = 0, 0
distSegSizes, distNSegsPerCell = {}, {}
distPermValues = {} # Num synapses with given permanence values
numAgeBuckets = 20
distAges = []
ageBucketSize = int((self.lrnIterationIdx+20) / 20)
for i in range(numAgeBuckets):
distAges.append(['%d-%d' % (i*ageBucketSize, (i+1)*ageBucketSize-1), 0])
for c in xrange(self.numberOfCols):
for i in xrange(self.cellsPerColumn):
if len(self.cells[c][i]) > 0:
nSegmentsThisCell = len(self.cells[c][i])
nSegments += nSegmentsThisCell
if distNSegsPerCell.has_key(nSegmentsThisCell):
distNSegsPerCell[nSegmentsThisCell] += 1
else:
distNSegsPerCell[nSegmentsThisCell] = 1
for seg in self.cells[c][i]:
nSynapsesThisSeg = seg.getNumSynapses()
nSynapses += nSynapsesThisSeg
if distSegSizes.has_key(nSynapsesThisSeg):
distSegSizes[nSynapsesThisSeg] += 1
else:
distSegSizes[nSynapsesThisSeg] = 1
# Accumulate permanence value histogram
for syn in seg.syns:
p = int(syn[2]*10)
if distPermValues.has_key(p):
distPermValues[p] += 1
else:
distPermValues[p] = 1
# Accumulate segment age histogram
age = self.lrnIterationIdx - seg.lastActiveIteration
ageBucket = int(age/ageBucketSize)
distAges[ageBucket][1] += 1
# Get active synapse statistics if requested
if collectActiveData:
if self.isSegmentActive(seg, self.infActiveState['t']):
nActiveSegs += 1
for syn in seg.syns:
if self.activeState['t'][syn[0]][syn[1]] == 1:
nActiveSynapses += 1
return (nSegments, nSynapses, nActiveSegs, nActiveSynapses,
distSegSizes, distNSegsPerCell, distPermValues, distAges) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def updateSynapses(self, synapses, delta):
"""Update a set of synapses in the segment. @param tp The owner TP @param synapses List of synapse indices to update @param delta How much to add to each permanence @returns True if synapse reached 0 """ |
reached0 = False
if delta > 0:
for synapse in synapses:
self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta
# Cap synapse permanence at permanenceMax
if newValue > self.tp.permanenceMax:
self.syns[synapse][2] = self.tp.permanenceMax
else:
for synapse in synapses:
self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta
# Cap min synapse permanence to 0 in case there is no global decay
if newValue <= 0:
self.syns[synapse][2] = 0
reached0 = True
return reached0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def maxEntropy(n,k):
""" The maximum enropy we could get with n units and k winners """ |
s = float(k)/n
if s > 0.0 and s < 1.0:
entropy = - s * math.log(s,2) - (1 - s) * math.log(1 - s,2)
else:
entropy = 0
return n*entropy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def binaryEntropy(x):
""" Calculate entropy for a list of binary random variables :param x: (torch tensor) the probability of the variable to be 1. :return: entropy: (torch tensor) entropy, sum(entropy) """ |
entropy = - x*x.log2() - (1-x)*(1-x).log2()
entropy[x*(1 - x) == 0] = 0
return entropy, entropy.sum() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plotDutyCycles(dutyCycle, filePath):
""" Create plot showing histogram of duty cycles :param dutyCycle: (torch tensor) the duty cycle of each unit :param filePath: (str) Full filename of image file """ |
_,entropy = binaryEntropy(dutyCycle)
bins = np.linspace(0.0, 0.3, 200)
plt.hist(dutyCycle, bins, alpha=0.5, label='All cols')
plt.title("Histogram of duty cycles, entropy=" + str(float(entropy)))
plt.xlabel("Duty cycle")
plt.ylabel("Number of units")
plt.savefig(filePath)
plt.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute(self, inputVector, learn, activeArray):
""" This method resembles the primary public method of the SpatialPooler class. """ |
super(SpatialPoolerWrapper, self).compute(inputVector, learn, activeArray)
self._updateAvgActivityPairs(activeArray) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def computeMaxPool(input_width):
""" Compute CNN max pool width. see 'cnn_sdr.py' """ |
wout = math.floor((input_width + 2 * PADDING - KERNEL_SIZE) / STRIDE + 1)
return int(math.floor(wout / 2.0)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def createRandomObjects(self, numObjects, numPoints, numLocations=None, numFeatures=None):
""" Creates a set of random objects and adds them to the machine. If numLocations and numFeatures and not specified, they will be set to the desired number of points. """ |
if numObjects > 0:
if numLocations is None:
numLocations = numPoints
if numFeatures is None:
numFeatures = numPoints
assert(numPoints <= numLocations), ("Number of points in object cannot be "
"greater than number of locations")
locationArray = numpy.array(range(numLocations))
numpy.random.seed(self.seed)
for _ in xrange(numObjects):
# Permute the number of locations and select points from it
locationArray = numpy.random.permutation(locationArray)
self.addObject(
[(locationArray[p],
numpy.random.randint(0, numFeatures)) for p in xrange(numPoints)],
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getUniqueFeaturesLocationsInObject(self, name):
""" Return two sets. The first set contains the unique locations Ids in the object. The second set contains the unique feature Ids in the object. """ |
uniqueFeatures = set()
uniqueLocations = set()
for pair in self.objects[name]:
uniqueLocations = uniqueLocations.union({pair[0]})
uniqueFeatures = uniqueFeatures.union({pair[1]})
return uniqueLocations, uniqueFeatures |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _generateLocations(self):
""" Generates a pool of locations to be used for the experiments. For each index, numColumns SDR's are created, as locations for the same feature should be different for each column. """ |
size = self.externalInputSize
bits = self.numInputBits
random.seed(self.seed)
self.locations = []
for _ in xrange(self.numColumns):
self.locations.append(
[self._generatePattern(bits, size) for _ in xrange(self.numLocations)]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getDetectorClassConstructors(detectors):
""" Takes in names of detectors. Collects class names that correspond to those detectors and returns them in a dict. The dict maps detector name to class names. Assumes the detectors have been imported. """ |
detectorConstructors = {
d : globals()[detectorNameToClass(d)] for d in detectors}
return detectorConstructors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def select_action(self, state):
""" Select the best action for the given state using e-greedy exploration to minimize overfitting :return: tuple(action, value) """ |
value = 0
if self.steps < self.min_steps:
action = np.random.randint(self.actions)
else:
self.eps = max(self.eps_end, self.eps * self.eps_decay)
if random.random() < self.eps:
action = np.random.randint(self.actions)
else:
self.local.eval()
with torch.no_grad():
state = torch.tensor(state, device=self.device, dtype=torch.float).unsqueeze(0)
Q = self.local(state)
value, action = torch.max(Q, 1)
return int(action), float(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inferObjects(self, bodyPlacement, maxTouches=2):
""" Touch each object with multiple sensors twice. :returns: dict mapping the number of touches required to the number of objects that took that many touches to be uniquely inferred. The 'None' key is reserved for objects not recognized after `maxTouches` touches """ |
for monitor in self.monitors.itervalues():
monitor.afterBodyWorldLocationChanged(bodyPlacement)
numTouchesRequired = collections.defaultdict(int)
for objectName, objectFeatures in self.objects.iteritems():
self.reset()
objectPlacement = self.objectPlacements[objectName]
featureIndexByColumnIterator = (
greedySensorPositions(self.numCorticalColumns, len(objectFeatures)))
for touch in xrange(maxTouches):
# Choose where to place each sensor.
featureIndexByColumn = featureIndexByColumnIterator.next()
sensedFeatures = [objectFeatures[i] for i in featureIndexByColumn]
featureSDRByColumn = [self.features[(iCol, feature["name"])]
for iCol, feature in enumerate(sensedFeatures)]
worldLocationByColumn = np.array([
[objectPlacement[0] + feature["top"] + feature["height"]/2,
objectPlacement[1] + feature["left"] + feature["width"]/2]
for feature in sensedFeatures])
for monitor in self.monitors.itervalues():
monitor.afterSensorWorldLocationChanged(worldLocationByColumn)
egocentricLocationByColumn = worldLocationByColumn - bodyPlacement
prevCellActivity = None
for t in xrange(self.maxSettlingTime):
for monitor in self.monitors.itervalues():
monitor.beforeCompute(egocentricLocationByColumn, featureSDRByColumn,
isRepeat=(t > 0))
self.compute(egocentricLocationByColumn, featureSDRByColumn, learn=False)
cellActivity = (
tuple(c.getAllCellActivity()
for c in self.corticalColumns),
tuple(set(module.activeCells)
for module in self.bodyToSpecificObjectModules))
if cellActivity == prevCellActivity:
# It settled. Cancel logging this timestep.
for monitor in self.monitors.itervalues():
monitor.clearUnflushedData()
break
else:
prevCellActivity = cellActivity
for monitor in self.monitors.itervalues():
monitor.flush()
# Check if the object is narrowed down
if self.isObjectClassified(objectName):
numTouchesRequired[touch + 1] += 1
break
else:
numTouchesRequired[None] += 1
return numTouchesRequired |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plotAccuracy(suite, name):
""" Plots classification accuracy """ |
path = suite.cfgparser.get(name, "path")
path = os.path.join(path, name)
accuracy = defaultdict(list)
sensations = defaultdict(list)
for exp in suite.get_exps(path=path):
params = suite.get_params(exp)
maxTouches = params["num_sensations"]
cells = params["cells_per_axis"]
res = suite.get_history(exp, 0, "Correct classification")
classified = [any(x) for x in res]
accuracy[cells] = float(sum(classified)) / float(len(classified))
touches = [np.argmax(x) or maxTouches for x in res]
sensations[cells] = [np.mean(touches), np.max(touches)]
plt.title("Classification Accuracy")
accuracy = OrderedDict(sorted(accuracy.items(), key=lambda t: t[0]))
fig, ax1 = plt.subplots()
ax1.plot(accuracy.keys(), accuracy.values(), "b")
ax1.set_xlabel("Cells per axis")
ax1.set_ylabel("Accuracy", color="b")
ax1.tick_params("y", colors="b")
sensations = OrderedDict(sorted(sensations.items(), key=lambda t: t[0]))
ax2 = ax1.twinx()
ax2.set_prop_cycle(linestyle=["-", "--"])
ax2.plot(sensations.keys(), sensations.values(), "r")
ax2.set_ylabel("Sensations", color="r")
ax2.tick_params("y", colors="r")
ax2.legend(("Mean", "Max"))
# save
path = suite.cfgparser.get(name, "path")
plotPath = os.path.join(path, "{}.pdf".format(name))
plt.savefig(plotPath)
plt.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_error(data, labels, pos_neurons, neg_neurons = [], add_noise = True):
""" Calculates error, including number of false positives and false negatives. Written to allow the use of multiple neurons, in case we attempt to use a population in the future. """ |
num_correct = 0
num_false_positives = 0
num_false_negatives = 0
classifications = numpy.zeros(data.nRows())
for neuron in pos_neurons:
classifications += neuron.calculate_on_entire_dataset(data)
for neuron in neg_neurons:
classifications -= neuron.calculate_on_entire_dataset(data)
if add_noise:
classifications += (numpy.random.rand() - 0.5)/1000
classifications = numpy.sign(classifications)
for classification, label in zip(classifications, labels):
if classification > 0 and label > 0:
num_correct += 1.0
elif classification <= 0 and label <= 0:
num_correct += 1.0
elif classification > 0 and label <= 0:
num_false_positives += 1
else:
num_false_negatives += 1
return (1.*num_false_positives + num_false_negatives)/data.nRows(), num_false_positives, num_false_negatives |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def computeActivity(self, activeInputsBySource, permanenceThreshold=None):
""" Calculate the number of active synapses per segment. @param activeInputsBySource (dict) The active cells in each source. Example: {"customInputName1": np.array([42, 69])} """ |
overlaps = None
for source, connections in self.connectionsBySource.iteritems():
o = connections.computeActivity(activeInputsBySource[source],
permanenceThreshold)
if overlaps is None:
overlaps = o
else:
overlaps += o
return overlaps |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def createSegments(self, cells):
""" Create a segment on each of the specified cells. @param cells (numpy array) """ |
segments = None
for connections in self.connectionsBySource.itervalues():
created = connections.createSegments(cells)
if segments is None:
segments = created
else:
# Sanity-check that the segment numbers are the same.
np.testing.assert_equal(segments, created)
return segments |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def growSynapses(self, segments, activeInputsBySource, initialPermanence):
""" Grow synapses to each of the specified inputs on each specified segment. @param segments (numpy array) The segments that should add synapses @param activeInputsBySource (dict) The active cells in each source. Example: {"customInputName1": np.array([42, 69])} @param initialPermanence (float) """ |
for source, connections in self.connectionsBySource.iteritems():
connections.growSynapses(segments, activeInputsBySource[source],
initialPermanence) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setPermanences(self, segments, presynapticCellsBySource, permanence):
""" Set the permanence of a specific set of synapses. Any synapses that don't exist will be initialized. Any existing synapses will be overwritten. Conceptually, this method takes a list of [segment, presynapticCell] pairs and initializes their permanence. For each segment, one synapse is added (although one might be added for each "source"). To add multiple synapses to a segment, include it in the list multiple times. The total number of affected synapses is len(segments)*number_of_sources*1. @param segments (numpy array) One segment for each synapse that should be added @param presynapticCellsBySource (dict of numpy arrays) One presynaptic cell for each segment. Example: {"customInputName1": np.array([42, 69])} @param permanence (float) The permanence to assign the synapse """ |
permanences = np.repeat(np.float32(permanence), len(segments))
for source, connections in self.connectionsBySource.iteritems():
if source in presynapticCellsBySource:
connections.matrix.setElements(segments, presynapticCellsBySource[source],
permanences) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loadImage(t, filename="cajal.jpg"):
""" Load the given gray scale image. Threshold it to black and white and crop it to be the dimensions of the FF input for the thalamus. Return a binary numpy matrix where 1 corresponds to black, and 0 corresponds to white. """ |
image = Image.open("cajal.jpg").convert("1")
image.load()
box = (0, 0, t.inputWidth, t.inputHeight)
image = image.crop(box)
# Here a will be a binary numpy array where True is white. Convert to floating
# point numpy array where white is 0.0
a = np.asarray(image)
im = np.ones((t.inputWidth, t.inputHeight))
im[a] = 0
return im |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inferThalamus(t, l6Input, ffInput):
""" Compute the effect of this feed forward input given the specific L6 input. :param t: instance of Thalamus :param l6Input: :param ffInput: a numpy array of 0's and 1's :return: """ |
print("\n-----------")
t.reset()
t.deInactivateCells(l6Input)
ffOutput = t.computeFeedForwardActivity(ffInput)
# print("L6 input:", l6Input)
# print("Active TRN cells: ", t.activeTRNCellIndices)
# print("Burst ready relay cells: ", t.burstReadyCellIndices)
return ffOutput |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filtered(w=250):
""" In this example we filter the image into several channels using gabor filters. L6 activity is used to select one of those channels. Only activity selected by those channels burst. """ |
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
print("Initializing thalamus")
t = Thalamus(
trnCellShape=(w, w),
relayCellShape=(w, w),
inputShape=(w, w),
l6CellCount=128*128,
trnThreshold=15,
)
ff = loadImage(t)
for i,k in enumerate(kernels):
plotActivity(k, "kernel"+str(i)+".jpg", "Filter kernel", vmax=k.max(),
vmin=k.min())
filtered0 = power(ff, k)
ft = np.zeros((w, w))
ft[filtered0 > filtered0.mean() + filtered0.std()] = 1.0
plotActivity(ft, "filtered"+str(i)+".jpg", "Filtered image", vmax=1.0)
encoder = createLocationEncoder(t, w=17)
trainThalamusLocations(t, encoder)
filtered0 = power(ff, kernels[3])
ft = np.zeros((w, w))
ft[filtered0 > filtered0.mean() + filtered0.std()] = 1.0
# Get a salt and pepper burst ready image
print("Getting unions")
l6Code = list(getUnionLocations(encoder, 125, 125, 150, step=10))
print("Num active cells in L6 union:", len(l6Code),"out of", t.l6CellCount)
ffOutput = inferThalamus(t, l6Code, ft)
plotActivity(t.burstReadyCells, "relay_burstReady_filtered.jpg",
title="Burst-ready cells",
)
plotActivity(ffOutput, "cajal_relay_output_filtered.jpg",
title="Filtered activity",
cmap="Greys")
# Get a more detailed filtered image
print("Getting unions")
l6Code = list(getUnionLocations(encoder, 125, 125, 150, step=3))
print("Num active cells in L6 union:", len(l6Code),"out of", t.l6CellCount)
ffOutput_all = inferThalamus(t, l6Code, ff)
ffOutput_filtered = inferThalamus(t, l6Code, ft)
ffOutput3 = ffOutput_all*0.4 + ffOutput_filtered
plotActivity(t.burstReadyCells, "relay_burstReady_all.jpg",
title="Burst-ready cells",
)
plotActivity(ffOutput3, "cajal_relay_output_filtered2.jpg",
title="Filtered activity",
cmap="Greys") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_delimited_message_bytes(byte_stream, nr=4):
''' Parse a delimited protobuf message. This is done by first getting a protobuf varint from
the stream that represents the length of the message, then reading that amount of
from the message and then parse it.
Since the int can be represented as max 4 bytes, first get 4 bytes and try to decode.
The decoder returns the value and the position where the value was found, so we need
to rewind the buffer to the position, because the remaining bytes belong to the message
after.
'''
(length, pos) = decoder._DecodeVarint32(byte_stream.read(nr), 0)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message length (pos %d): %d" % (pos, length))
delimiter_bytes = nr - pos
byte_stream.rewind(delimiter_bytes)
message_bytes = byte_stream.read(length)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message bytes (%d): %s" % (len(message_bytes), format_bytes(message_bytes)))
total_len = length + pos
return (total_len, message_bytes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read(self, n):
'''Reads n bytes into the internal buffer'''
bytes_wanted = n - self.buffer_length + self.pos + 1
if bytes_wanted > 0:
self._buffer_bytes(bytes_wanted)
end_pos = self.pos + n
ret = self.buffer[self.pos + 1:end_pos + 1]
self.pos = end_pos
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def rewind(self, places):
'''Rewinds the current buffer to a position. Needed for reading varints,
because we might read bytes that belong to the stream after the varint.
'''
log.debug("Rewinding pos %d with %d places" % (self.pos, places))
self.pos -= places
log.debug("Reset buffer to pos %d" % self.pos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def create_rpc_request_header(self):
'''Creates and serializes a delimited RpcRequestHeaderProto message.'''
rpcheader = RpcRequestHeaderProto()
rpcheader.rpcKind = 2 # rpcheaderproto.RpcKindProto.Value('RPC_PROTOCOL_BUFFER')
rpcheader.rpcOp = 0 # rpcheaderproto.RpcPayloadOperationProto.Value('RPC_FINAL_PACKET')
rpcheader.callId = self.call_id
rpcheader.retryCount = -1
rpcheader.clientId = self.client_id[0:16]
if self.call_id == -3:
self.call_id = 0
else:
self.call_id += 1
# Serialize delimited
s_rpcHeader = rpcheader.SerializeToString()
log_protobuf_message("RpcRequestHeaderProto (len: %d)" % (len(s_rpcHeader)), rpcheader)
return s_rpcHeader |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def send_rpc_message(self, method, request):
'''Sends a Hadoop RPC request to the NameNode.
The IpcConnectionContextProto, RpcPayloadHeaderProto and HadoopRpcRequestProto
should already be serialized in the right way (delimited or not) before
they are passed in this method.
The Hadoop RPC protocol looks like this for sending requests:
When sending requests
+---------------------------------------------------------------------+
| Length of the next three parts (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Delimited serialized RpcRequestHeaderProto (varint len + header) |
+---------------------------------------------------------------------+
| Delimited serialized RequestHeaderProto (varint len + header) |
+---------------------------------------------------------------------+
| Delimited serialized Request (varint len + request) |
+---------------------------------------------------------------------+
'''
log.debug("############## SENDING ##############")
#0. RpcRequestHeaderProto
rpc_request_header = self.create_rpc_request_header()
#1. RequestHeaderProto
request_header = self.create_request_header(method)
#2. Param
param = request.SerializeToString()
if log.getEffectiveLevel() == logging.DEBUG:
log_protobuf_message("Request", request)
rpc_message_length = len(rpc_request_header) + encoder._VarintSize(len(rpc_request_header)) + \
len(request_header) + encoder._VarintSize(len(request_header)) + \
len(param) + encoder._VarintSize(len(param))
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("RPC message length: %s (%s)" % (rpc_message_length, format_bytes(struct.pack('!I', rpc_message_length))))
self.write(struct.pack('!I', rpc_message_length))
self.write_delimited(rpc_request_header)
self.write_delimited(request_header)
self.write_delimited(param) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_response(self, byte_stream, response_class):
'''Parses a Hadoop RPC response.
The RpcResponseHeaderProto contains a status field that marks SUCCESS or ERROR.
The Hadoop RPC protocol looks like the diagram below for receiving SUCCESS requests.
+-----------------------------------------------------------+
| Length of the RPC resonse (4 bytes/32 bit int) |
+-----------------------------------------------------------+
| Delimited serialized RpcResponseHeaderProto |
+-----------------------------------------------------------+
| Serialized delimited RPC response |
+-----------------------------------------------------------+
In case of an error, the header status is set to ERROR and the error fields are set.
'''
log.debug("############## PARSING ##############")
log.debug("Payload class: %s" % response_class)
# Read first 4 bytes to get the total length
len_bytes = byte_stream.read(4)
total_length = struct.unpack("!I", len_bytes)[0]
log.debug("Total response length: %s" % total_length)
header = RpcResponseHeaderProto()
(header_len, header_bytes) = get_delimited_message_bytes(byte_stream)
log.debug("Header read %d" % header_len)
header.ParseFromString(header_bytes)
log_protobuf_message("RpcResponseHeaderProto", header)
if header.status == 0:
log.debug("header: %s, total: %s" % (header_len, total_length))
if header_len >= total_length:
return
response = response_class()
response_bytes = get_delimited_message_bytes(byte_stream, total_length - header_len)[1]
if len(response_bytes) > 0:
response.ParseFromString(response_bytes)
if log.getEffectiveLevel() == logging.DEBUG:
log_protobuf_message("Response", response)
return response
else:
self.handle_error(header) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def close_socket(self):
'''Closes the socket and resets the channel.'''
log.debug("Closing socket")
if self.sock:
try:
self.sock.close()
except:
pass
self.sock = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def CallMethod(self, method, controller, request, response_class, done):
'''Call the RPC method. The naming doesn't confirm PEP8, since it's
a method called by protobuf
'''
try:
self.validate_request(request)
if not self.sock:
self.get_connection(self.host, self.port)
self.send_rpc_message(method, request)
byte_stream = self.recv_rpc_message()
return self.parse_response(byte_stream, response_class)
except RequestError: # Raise a request error, but don't close the socket
raise
except Exception: # All other errors close the socket
self.close_socket()
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def getLogger(name):
''' Create and return a logger with the specified name. '''
# Create logger and add a default NULL handler
log = logging.getLogger(name)
log.addHandler(_NullHandler())
return log |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def put(self, src, dst):
'''Upload a file to HDFS
This will take a file from the ``testfiles_path`` supplied in the constuctor.
'''
src = "%s%s" % (self._testfiles_path, src)
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-put', src, self._full_hdfs_path(dst)], True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ls(self, src, extra_args=[]):
'''List files in a directory'''
src = [self._full_hdfs_path(x) for x in src]
output = self._getStdOutCmd([self._hadoop_cmd, 'fs', '-ls'] + extra_args + src, True)
return self._transform_ls_output(output, self.hdfs_url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def df(self, src):
'''Perform ``df`` on a path'''
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-df', self._full_hdfs_path(src)], True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def du(self, src, extra_args=[]):
'''Perform ``du`` on a path'''
src = [self._full_hdfs_path(x) for x in src]
return self._transform_du_output(self._getStdOutCmd([self._hadoop_cmd, 'fs', '-du'] + extra_args + src, True), self.hdfs_url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def count(self, src):
'''Perform ``count`` on a path'''
src = [self._full_hdfs_path(x) for x in src]
return self._transform_count_output(self._getStdOutCmd([self._hadoop_cmd, 'fs', '-count'] + src, True), self.hdfs_url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def handleError(self, error_code, message):
'''Log and set the controller state.'''
self._fail = True
self.reason = error_code
self._error = message |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ls(self, paths, recurse=False, include_toplevel=False, include_children=True):
''' Issues 'ls' command and returns a list of maps that contain fileinfo
:param paths: Paths to list
:type paths: list
:param recurse: Recursive listing
:type recurse: boolean
:param include_toplevel: Include the given path in the listing. If the path is a file, include_toplevel is always True.
:type include_toplevel: boolean
:param include_children: Include child nodes in the listing.
:type include_children: boolean
:returns: a generator that yields dictionaries
**Examples:**
Directory listing
>>> list(client.ls(["/"]))
[{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1367317324982L, 'block_replication': 1, 'modification_time': 1367317325346L, 'length': 6783L, 'blocksize': 134217728L, 'owner': u'wouter', 'path': '/Makefile'}, {'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0L, 'block_replication': 0, 'modification_time': 1367317325431L, 'length': 0L, 'blocksize': 0L, 'owner': u'wouter', 'path': '/build'}, {'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1367317326510L, 'block_replication': 1, 'modification_time': 1367317326522L, 'length': 100L, 'blocksize': 134217728L, 'owner': u'wouter', 'path': '/index.asciidoc'}, {'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0L, 'block_replication': 0, 'modification_time': 1367317326628L, 'length': 0L, 'blocksize': 0L, 'owner': u'wouter', 'path': '/source'}]
File listing
>>> list(client.ls(["/Makefile"]))
[{'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1367317324982L, 'block_replication': 1, 'modification_time': 1367317325346L, 'length': 6783L, 'blocksize': 134217728L, 'owner': u'wouter', 'path': '/Makefile'}]
Get directory information
>>> list(client.ls(["/source"], include_toplevel=True, include_children=False))
[{'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0L, 'block_replication': 0, 'modification_time': 1367317326628L, 'length': 0L, 'blocksize': 0L, 'owner': u'wouter', 'path': '/source'}]
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
for item in self._find_items(paths, self._handle_ls,
include_toplevel=include_toplevel,
include_children=include_children,
recurse=recurse):
if item:
yield item |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _handle_ls(self, path, node):
''' Handle every node received for an ls request'''
entry = {}
entry["file_type"] = self.FILETYPES[node.fileType]
entry["permission"] = node.permission.perm
entry["path"] = path
for attribute in self.LISTING_ATTRIBUTES:
entry[attribute] = node.__getattribute__(attribute)
return entry |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def chmod(self, paths, mode, recurse=False):
''' Change the mode for paths. This returns a list of maps containing the resut of the operation.
:param paths: List of paths to chmod
:type paths: list
:param mode: Octal mode (e.g. 0o755)
:type mode: int
:param recurse: Recursive chmod
:type recurse: boolean
:returns: a generator that yields dictionaries
.. note:: The top level directory is always included when `recurse=True`'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("chmod: no path given")
if not mode:
raise InvalidInputException("chmod: no mode given")
processor = lambda path, node, mode=mode: self._handle_chmod(path, node, mode)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=recurse):
if item:
yield item |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def count(self, paths):
''' Count files in a path
:param paths: List of paths to count
:type paths: list
:returns: a generator that yields dictionaries
**Examples:**
>>> list(client.count(['/']))
[{'spaceConsumed': 260185L, 'quota': 2147483647L, 'spaceQuota': 18446744073709551615L, 'length': 260185L, 'directoryCount': 9L, 'path': '/', 'fileCount': 34L}]
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("count: no path given")
for item in self._find_items(paths, self._handle_count, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def df(self):
''' Get FS information
:returns: a dictionary
**Examples:**
>>> client.df()
{'used': 491520L, 'capacity': 120137519104L, 'under_replicated': 0L, 'missing_blocks': 0L, 'filesystem': 'hdfs://localhost:8020', 'remaining': 19669295104L, 'corrupt_blocks': 0L}
'''
processor = lambda path, node: self._handle_df(path, node)
return list(self._find_items(['/'], processor, include_toplevel=True, include_children=False, recurse=False))[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def du(self, paths, include_toplevel=False, include_children=True):
'''Returns size information for paths
:param paths: Paths to du
:type paths: list
:param include_toplevel: Include the given path in the result. If the path is a file, include_toplevel is always True.
:type include_toplevel: boolean
:param include_children: Include child nodes in the result.
:type include_children: boolean
:returns: a generator that yields dictionaries
**Examples:**
Children:
>>> list(client.du(['/']))
[{'path': '/Makefile', 'length': 6783L}, {'path': '/build', 'length': 244778L}, {'path': '/index.asciidoc', 'length': 100L}, {'path': '/source', 'length': 8524L}]
Directory only:
>>> list(client.du(['/'], include_toplevel=True, include_children=False))
[{'path': '/', 'length': 260185L}]
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("du: no path given")
processor = lambda path, node: self._handle_du(path, node)
for item in self._find_items(paths, processor, include_toplevel=include_toplevel,
include_children=include_children, recurse=False):
if item:
yield item |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def rmdir(self, paths):
''' Delete a directory
:param paths: Paths to delete
:type paths: list
:returns: a generator that yields dictionaries
.. note: directories have to be empty.
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("rmdir: no path given")
processor = lambda path, node: self._handle_rmdir(path, node)
for item in self._find_items(paths, processor, include_toplevel=True):
if item:
yield item |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def touchz(self, paths, replication=None, blocksize=None):
''' Create a zero length file or updates the timestamp on a zero length file
:param paths: Paths
:type paths: list
:param replication: Replication factor
:type recurse: int
:param blocksize: Block size (in bytes) of the newly created file
:type blocksize: int
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("touchz: no path given")
# Let's get the blocksize and replication from the server defaults
# provided by the namenode if they are not specified
if not replication or not blocksize:
defaults = self.serverdefaults()
if not replication:
replication = defaults['replication']
if not blocksize:
blocksize = defaults['blockSize']
processor = lambda path, node, replication=replication, blocksize=blocksize: self._handle_touchz(path, node, replication, blocksize)
for item in self._find_items(paths, processor, include_toplevel=True, check_nonexistence=True, include_children=False):
if item:
yield item |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def setrep(self, paths, replication, recurse=False):
''' Set the replication factor for paths
:param paths: Paths
:type paths: list
:param replication: Replication factor
:type recurse: int
:param recurse: Apply replication factor recursive
:type recurse: boolean
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("setrep: no path given")
if not replication:
raise InvalidInputException("setrep: no replication given")
processor = lambda path, node, replication=replication: self._handle_setrep(path, node, replication)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=recurse):
if item:
yield item |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.