text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _initializeDimensions(self, inputData):
""" Stores the training images' dimensions, for convenience. """ |
if len(inputData.shape) == 2:
self.imageHeight, self.numImages = inputData.shape
self.imageWidth, self.numChannels = None, None
elif len(inputData.shape) == 3:
self.imageHeight, \
self.imageWidth, \
self.numImages = inputData.shape
self.numChannels = None
elif len(inputData.shape) == 4:
self.imageHeight, \
self.imageWidth, \
self.numChannels, \
self.numImages = inputData.shape
else:
raise ValueError("The provided image set has more than 4 dimensions.") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mmGetPermanencesPlot(self, title=None):
""" Returns plot of column permanences. @param title an optional title for the figure @return (Plot) plot """ |
plot = Plot(self, title)
data = numpy.zeros((self.getNumColumns(), self.getNumInputs()))
for i in xrange(self.getNumColumns()):
self.getPermanence(i, data[i])
plot.add2DArray(data, xlabel="Permanences", ylabel="Column")
return plot |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def finalize(self, params, rep):
""" Save the full model once we are done. """ |
if params.get("saveNet", True):
saveDir = os.path.join(params["path"], params["name"],
"model_{}.pt".format(rep))
torch.save(self.model, saveDir) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loadDatasets(self, params):
""" The GSC dataset specifies specific files to be used as training, test, and validation. We assume the data has already been processed according to those files into separate train, test, and valid directories. For our experiment we use a subset of the data (10 categories out of 30), just like the Kaggle competition. """ |
n_mels = 32
# Check if using pre-processed data or raw data
self.use_preprocessed_dataset = PreprocessedSpeechDataset.isValid(self.dataDir)
if self.use_preprocessed_dataset:
trainDataset = PreprocessedSpeechDataset(self.dataDir, subset="train")
validationDataset = PreprocessedSpeechDataset(self.dataDir, subset="valid",
silence_percentage=0)
testDataset = PreprocessedSpeechDataset(self.dataDir, subset="test",
silence_percentage=0)
bgNoiseDataset = PreprocessedSpeechDataset(self.dataDir, subset="noise",
silence_percentage=0)
else:
trainDataDir = os.path.join(self.dataDir, "train")
testDataDir = os.path.join(self.dataDir, "test")
validationDataDir = os.path.join(self.dataDir, "valid")
backgroundNoiseDir = os.path.join(self.dataDir, params["background_noise_dir"])
dataAugmentationTransform = transforms.Compose([
ChangeAmplitude(),
ChangeSpeedAndPitchAudio(),
FixAudioLength(),
ToSTFT(),
StretchAudioOnSTFT(),
TimeshiftAudioOnSTFT(),
FixSTFTDimension(),
])
featureTransform = transforms.Compose(
[
ToMelSpectrogramFromSTFT(n_mels=n_mels),
DeleteSTFT(),
ToTensor('mel_spectrogram', 'input')
])
trainDataset = SpeechCommandsDataset(
trainDataDir,
transforms.Compose([
dataAugmentationTransform,
# add_bg_noise, # Uncomment to allow adding BG noise
# during training
featureTransform
]))
testFeatureTransform = transforms.Compose([
FixAudioLength(),
ToMelSpectrogram(n_mels=n_mels),
ToTensor('mel_spectrogram', 'input')
])
validationDataset = SpeechCommandsDataset(
validationDataDir,
testFeatureTransform,
silence_percentage=0,
)
testDataset = SpeechCommandsDataset(
testDataDir,
testFeatureTransform,
silence_percentage=0,
)
bg_dataset = BackgroundNoiseDataset(
backgroundNoiseDir,
transforms.Compose([FixAudioLength(), ToSTFT()]),
)
bgNoiseTransform = transforms.Compose([
FixAudioLength(),
ToSTFT(),
AddBackgroundNoiseOnSTFT(bg_dataset),
ToMelSpectrogramFromSTFT(n_mels=n_mels),
DeleteSTFT(),
ToTensor('mel_spectrogram', 'input')
])
bgNoiseDataset = SpeechCommandsDataset(
testDataDir,
bgNoiseTransform,
silence_percentage=0,
)
weights = trainDataset.make_weights_for_balanced_classes()
sampler = WeightedRandomSampler(weights, len(weights))
# print("Number of training samples=",len(trainDataset))
# print("Number of validation samples=",len(validationDataset))
# print("Number of test samples=",len(testDataset))
self.train_loader = DataLoader(trainDataset,
batch_size=params["batch_size"],
sampler=sampler
)
self.validation_loader = DataLoader(validationDataset,
batch_size=params["batch_size"],
shuffle=False
)
self.test_loader = DataLoader(testDataset,
batch_size=params["batch_size"],
sampler=None,
shuffle=False
)
self.bg_noise_loader = DataLoader(bgNoiseDataset,
batch_size=params["batch_size"],
sampler=None,
shuffle=False
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contains(self, location):
""" Checks that the provided point is on the sphere. """ |
return self.almostEqual(
sum([coord ** 2 for coord in location]), self.radius ** 2
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contains(self, location):
""" Checks that the provided point is on the cylinder. """ |
if self.almostEqual(location[0] ** 2 + location[1] ** 2, self.radius ** 2):
return abs(location[2]) < self.height / 2.
if self.almostEqual(location[2], self.height / 2.):
return location[0] ** 2 + location[1] ** 2 < self.radius ** 2
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sampleLocation(self):
""" Simple method to sample uniformly from a cylinder. """ |
areaRatio = self.radius / (self.radius + self.height)
if random.random() < areaRatio:
return self._sampleLocationOnDisc()
else:
return self._sampleLocationOnSide() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sampleLocationFromFeature(self, feature):
""" Samples a location from the provided specific features. """ |
if feature == "topDisc":
return self._sampleLocationOnDisc(top=True)
elif feature == "topEdge":
return self._sampleLocationOnEdge(top=True)
elif feature == "bottomDisc":
return self._sampleLocationOnDisc(top=False)
elif feature == "bottomEdge":
return self._sampleLocationOnEdge(top=False)
elif feature == "side":
return self._sampleLocationOnSide()
elif feature == "random":
return self.sampleLocation()
else:
raise NameError("No such feature in {}: {}".format(self, feature)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sampleLocationOnDisc(self, top=None):
""" Helper method to sample from the top and bottom discs of a cylinder. If top is set to True, samples only from top disc. If top is set to False, samples only from bottom disc. If not set (defaults to None), samples from both discs. """ |
if top is None:
z = random.choice([-1, 1]) * self.height / 2.
else:
z = self.height / 2. if top else - self.height / 2.
sampledAngle = 2 * random.random() * pi
sampledRadius = self.radius * sqrt(random.random())
x, y = sampledRadius * cos(sampledAngle), sampledRadius * sin(sampledAngle)
return [x, y, z] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sampleLocationOnEdge(self, top=None):
""" Helper method to sample from the top and bottom edges of a cylinder. If top is set to True, samples only from top edge. If top is set to False, samples only from bottom edge. If not set (defaults to None), samples from both edges. """ |
if top is None:
z = random.choice([-1, 1]) * self.height / 2.
else:
z = self.height / 2. if top else - self.height / 2.
sampledAngle = 2 * random.random() * pi
x, y = self.radius * cos(sampledAngle), self.radius * sin(sampledAngle)
return [x, y, z] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sampleLocationOnSide(self):
""" Helper method to sample from the lateral surface of a cylinder. """ |
z = random.uniform(-1, 1) * self.height / 2.
sampledAngle = 2 * random.random() * pi
x, y = self.radius * cos(sampledAngle), self.radius * sin(sampledAngle)
return [x, y, z] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contains(self, location):
""" A location is on the box if one of the dimension is "satured"). """ |
for i, coord in enumerate(location):
if self.almostEqual(abs(coord), self.dimensions[i] / 2.):
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sampleLocationFromFeature(self, feature):
""" Samples a location from one specific feature. This is only supported with three dimensions. """ |
if feature == "face":
return self._sampleFromFaces()
elif feature == "edge":
return self._sampleFromEdges()
elif feature == "vertex":
return self._sampleFromVertices()
elif feature == "random":
return self.sampleLocation()
else:
raise NameError("No such feature in {}: {}".format(self, feature)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sampleFromFaces(self):
""" We start by sampling a dimension to "max out", then sample the sign and the other dimensions' values. """ |
coordinates = [random.uniform(-1, 1) * dim / 2. for dim in self.dimensions]
dim = random.choice(range(self.dimension))
coordinates[dim] = self.dimensions[dim] / 2. * random.choice([-1, 1])
return coordinates |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot(self, numPoints=100):
""" Specific plotting method for boxes. Only supports 3-dimensional objects. """ |
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# generate cylinder
x = np.linspace(- self.dimensions[0]/2., self.dimensions[0]/2., numPoints)
y = np.linspace(- self.dimensions[1]/2., self.dimensions[1]/2., numPoints)
z = np.linspace(- self.dimensions[2]/2., self.dimensions[2]/2., numPoints)
# plot
Xc, Yc = np.meshgrid(x, y)
ax.plot_surface(Xc, Yc, -self.dimensions[2]/2,
alpha=0.2, rstride=20, cstride=10)
ax.plot_surface(Xc, Yc, self.dimensions[2]/2,
alpha=0.2, rstride=20, cstride=10)
Yc, Zc = np.meshgrid(y, z)
ax.plot_surface(-self.dimensions[0]/2, Yc, Zc,
alpha=0.2, rstride=20, cstride=10)
ax.plot_surface(self.dimensions[0]/2, Yc, Zc,
alpha=0.2, rstride=20, cstride=10)
Xc, Zc = np.meshgrid(x, z)
ax.plot_surface(Xc, -self.dimensions[1]/2, Zc,
alpha=0.2, rstride=20, cstride=10)
ax.plot_surface(Xc, self.dimensions[1]/2, Zc,
alpha=0.2, rstride=20, cstride=10)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title("{}".format(self))
return fig, ax |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def visualize(self, numPoints=100):
""" Visualization utility for models. Helps to debug the math and logic. Helps to monitor complex objects with difficult to define boundaries. Only supports 3-dimensional objects. TODO: center the objects using scale, rotate and translate operations on mesh objects. """ |
try:
import pyqtgraph as pg
import pyqtgraph.multiprocess as mp
import pyqtgraph.opengl as gl
except ImportError as e:
print("PyQtGraph needs to be installed.")
return (None, None, None, None, None)
class PlyVisWindow:
"""
The pyqtgraph visualization utility window class
Creates a remote process with viewbox frame for visualizations
Provided access to mesh and scatter for realtime update to view.
"""
def __init__(self):
self.proc = mp.QtProcess()
self.rpg = self.proc._import('pyqtgraph')
self.rgl = self.proc._import('pyqtgraph.opengl')
self.rview = self.rgl.GLViewWidget()
self.rview.setBackgroundColor('k')
self.rview.setCameraPosition(distance=10)
self.grid = self.rgl.GLGridItem()
self.rview.addItem(self.grid)
self.rpg.setConfigOption('background', 'w')
self.rpg.setConfigOption('foreground', 'k')
def snapshot(self, name=""):
"""
utility to grabframe of the visualization window.
@param name (string) helps to avoid overwriting grabbed images programmatically.
"""
self.rview.grabFrameBuffer().save("{}.png".format(name))
# We might need this for future purposes Dont Delete
# class MeshUpdate:
# def __init__(self, proc):
# self.data_x = proc.transfer([])
# self.data_y = proc.transfer([])
# self._t = None
# @property
# def t(self):
# return self._t
# def update(self,x):
# self.data_y.extend([x], _callSync='async')
# self.data_x.extend([self.t], _callSync='async',)
# self.curve.setData(y=self.data_y, _callSync='async')
pg.mkQApp()
self.graphicsWindow = PlyVisWindow()
self.graphicsWindow.rview.setWindowTitle(self.file)
vertices = self.vertices.data
vertices = np.array(vertices.tolist())
faces = np.array([self.faces[i]['vertex_indices'] for i in range(self.faces.count)])
self.mesh = self.graphicsWindow.rgl.GLMeshItem(vertexes=vertices, faces=faces,
shader='normalColor', drawEdges=True,
drawFaces=True, computeNormals=False,
smooth=False)
self.graphicsWindow.rview.addItem(self.mesh)
self.graphicsWindow.rview.show()
pos = np.empty((numPoints,3))
size = np.ones((numPoints,))
color = np.ones((numPoints,4))
self.scatter = self.graphicsWindow.rgl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=True)
self.graphicsWindow.rview.addItem(self.scatter)
return self.scatter, self.mesh, pos, size, color |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def createRandomSequences(self, numSequences, sequenceLength):
""" Creates a set of random sequences, each with sequenceLength elements, and adds them to the machine. """ |
for _ in xrange(numSequences):
self.addObject(
[numpy.random.randint(0, self.numFeatures)
for _ in xrange(sequenceLength)]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _addNoise(self, pattern, noiseLevel, inputSize):
""" Adds noise to the given pattern and returns the new one. A noiseLevel of 0.1 means that 10% of the ON bits will be replaced by other randomly chosen ON bits. The returned SDR will still contain the same number of bits. """ |
if pattern is None:
return None
# Bits that could be noise. These can't be from the original set.
candidateBits = list(set(range(inputSize)) - set(pattern))
random.shuffle(candidateBits)
newBits = set()
for bit in pattern:
if random.random() < noiseLevel:
newBits.add(candidateBits.pop())
else:
newBits.add(bit)
return newBits |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _generateFeatures(self):
""" Generates a pool of features to be used for the experiments. For each index, numColumns SDR's are created, as locations for the same feature should be different for each column. """ |
size = self.sensorInputSize
bits = self.numInputBits
self.features = []
for _ in xrange(self.numColumns):
self.features.append(
[self._generatePattern(bits, size) for _ in xrange(self.numFeatures)]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def createThreeObjects():
""" Helper function that creates a set of three objects used for basic experiments. :return: (list(list(tuple)) List of lists of feature / location pairs. """ |
objectA = zip(range(10), range(10))
objectB = [(0, 0), (2, 2), (1, 1), (1, 4), (4, 2), (4, 1)]
objectC = [(0, 0), (1, 1), (3, 1), (0, 1)]
return [objectA, objectB, objectC] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def runSharedFeatures(noiseLevel=None, profile=False):
""" Runs a simple experiment where three objects share a number of location, feature pairs. Parameters: @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ |
exp = L4L2Experiment(
"shared_features",
enableLateralSP=True,
enableFeedForwardSP=True
)
pairs = createThreeObjects()
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024
)
for object in pairs:
objects.addObject(object)
exp.learnObjects(objects.provideObjectsToLearn())
if profile:
exp.printProfile()
inferConfig = {
"numSteps": 10,
"noiseLevel": noiseLevel,
"pairs": {
0: zip(range(10), range(10))
}
}
exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0)
if profile:
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def runStretchExperiment(numObjects=25):
""" Generates a lot of random objects to profile the network. Parameters: @param numObjects (int) Number of objects to create and learn. """ |
exp = L4L2Experiment(
"profiling_experiment",
enableLateralSP = True,
enableFeedForwardSP=True
)
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024
)
objects.createRandomObjects(numObjects=numObjects, numPoints=10)
exp.learnObjects(objects.provideObjectsToLearn())
exp.printProfile()
inferConfig = {
"numSteps": len(objects[0]),
"pairs": {
0: objects[0]
}
}
exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0)
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot(self, numPoints=100):
""" Plots the object in a 3D scatter. This method should be overriden when possible. This default behavior simply samples numPoints points from the object and plots them in a 3d scatter. """ |
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for feature in self._FEATURES:
for _ in xrange(numPoints):
x, y, z = tuple(self.sampleLocationFromFeature(feature))
ax.scatter(x, y, z, marker=".")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.title("{}".format(self))
return fig, ax |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def printSequence(x, formatString="%d"):
""" Compact print a list or numpy array. """ |
numElements = len(x)
s = ""
for j in range(numElements):
s += formatString % x[j]
print s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def printSequences(x, formatString="%d"):
""" Print a bunch of sequences stored in a 2D numpy array. """ |
[seqLen, numElements] = x.shape
for i in range(seqLen):
s = ""
for j in range(numElements):
s += formatString % x[i][j]
print s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialize(self, useRandomEncoder):
""" Initialize the various data structures. """ |
self.setRandomSeed(self.seed)
self.dim = numpy.shape(self.spatialConfig)[-1]
self.spatialMap = dict( zip( map(tuple, list(self.spatialConfig)),
self.sensoryInputElements))
self.lengthMotorInput1D = (2*self.maxDisplacement + 1) * \
self.numActiveBitsMotorInput
uniqueSensoryElements = list(set(self.sensoryInputElementsPool))
if useRandomEncoder:
self.sensoryEncoder = SDRCategoryEncoder(n=1024,
w=self.numActiveBitsSensoryInput,
categoryList=uniqueSensoryElements,
forced=True)
self.lengthSensoryInput = self.sensoryEncoder.getWidth()
else:
self.lengthSensoryInput = (len(self.sensoryInputElementsPool)+1) * \
self.numActiveBitsSensoryInput
self.sensoryEncoder = CategoryEncoder(w=self.numActiveBitsSensoryInput,
categoryList=uniqueSensoryElements, forced=True)
motorEncoder1D = ScalarEncoder(n=self.lengthMotorInput1D,
w=self.numActiveBitsMotorInput,
minval=-self.maxDisplacement,
maxval=self.maxDisplacement,
clipInput=True,
forced=True)
self.motorEncoder = VectorEncoder(length=self.dim, encoder=motorEncoder1D) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generateSensorimotorSequence(self, sequenceLength):
""" Generate sensorimotor sequences of length sequenceLength. @param sequenceLength (int) Length of the sensorimotor sequence. @return (tuple) Contains: sensorySequence (list) Encoded sensory input for whole sequence. motorSequence (list) Encoded motor input for whole sequence. sensorimotorSequence (list) Encoder sensorimotor input for whole sequence. This is useful when you want to give external input to temporal memory. """ |
motorSequence = []
sensorySequence = []
sensorimotorSequence = []
currentEyeLoc = self.nupicRandomChoice(self.spatialConfig)
for i in xrange(sequenceLength):
currentSensoryInput = self.spatialMap[tuple(currentEyeLoc)]
nextEyeLoc, currentEyeV = self.getNextEyeLocation(currentEyeLoc)
if self.verbosity:
print "sensory input = ", currentSensoryInput, \
"eye location = ", currentEyeLoc, \
" motor command = ", currentEyeV
sensoryInput = self.encodeSensoryInput(currentSensoryInput)
motorInput = self.encodeMotorInput(list(currentEyeV))
sensorimotorInput = numpy.concatenate((sensoryInput, motorInput))
sensorySequence.append(sensoryInput)
motorSequence.append(motorInput)
sensorimotorSequence.append(sensorimotorInput)
currentEyeLoc = nextEyeLoc
return (sensorySequence, motorSequence, sensorimotorSequence) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getNextEyeLocation(self, currentEyeLoc):
""" Generate next eye location based on current eye location. @param currentEyeLoc (numpy.array) Current coordinate describing the eye location in the world. @return (tuple) Contains: nextEyeLoc (numpy.array) Coordinate of the next eye location. eyeDiff (numpy.array) Vector describing change from currentEyeLoc to nextEyeLoc. """ |
possibleEyeLocs = []
for loc in self.spatialConfig:
shift = abs(max(loc - currentEyeLoc))
if self.minDisplacement <= shift <= self.maxDisplacement:
possibleEyeLocs.append(loc)
nextEyeLoc = self.nupicRandomChoice(possibleEyeLocs)
eyeDiff = nextEyeLoc - currentEyeLoc
return nextEyeLoc, eyeDiff |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setRandomSeed(self, seed):
""" Reset the nupic random generator. This is necessary to reset random seed to generate new sequences. @param seed (int) Seed for nupic.bindings.Random. """ |
self.seed = seed
self._random = Random()
self._random.setSeed(seed) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encodeMotorInput(self, motorInput):
""" Encode motor command to bit vector. @param motorInput (1D numpy.array) Motor command to be encoded. @return (1D numpy.array) Encoded motor command. """ |
if not hasattr(motorInput, "__iter__"):
motorInput = list([motorInput])
return self.motorEncoder.encode(motorInput) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decodeMotorInput(self, motorInputPattern):
""" Decode motor command from bit vector. @param motorInputPattern (1D numpy.array) Encoded motor command. @return (1D numpy.array) Decoded motor command. """ |
key = self.motorEncoder.decode(motorInputPattern)[0].keys()[0]
motorCommand = self.motorEncoder.decode(motorInputPattern)[0][key][1][0]
return motorCommand |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def printSensoryCodingScheme(self):
""" Print sensory inputs along with their encoded versions. """ |
print "\nsensory coding scheme: "
for loc in self.spatialConfig:
sensoryElement = self.spatialMap[tuple(loc)]
print sensoryElement, "%s : " % loc,
printSequence(self.encodeSensoryInput(sensoryElement)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build(self, n, vec):
""" Recursive function to help print motor coding scheme. """ |
for i in range(-self.maxDisplacement, self.maxDisplacement+1):
next = vec + [i]
if n == 1:
print '{:>5}\t'.format(next), " = ",
printSequence(self.encodeMotorInput(next))
else:
self.build(n-1, next) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getDefaultTMParams(self, inputSize, numInputBits):
""" Returns a good default set of parameters to use in the TM region. """ |
sampleSize = int(1.5 * numInputBits)
if numInputBits == 20:
activationThreshold = 18
minThreshold = 18
elif numInputBits == 10:
activationThreshold = 8
minThreshold = 8
else:
activationThreshold = int(numInputBits * .6)
minThreshold = activationThreshold
return {
"columnCount": inputSize,
"cellsPerColumn": 16,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.41,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.03,
"minThreshold": minThreshold,
"basalPredictedSegmentDecrement": 0.003,
"apicalPredictedSegmentDecrement": 0.0,
"reducedBasalThreshold": int(activationThreshold*0.6),
"activationThreshold": activationThreshold,
"sampleSize": sampleSize,
"implementation": "ApicalTiebreak",
"seed": self.seed
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_movie(fig, update_figure, filename, title, fps=15, dpi=100):
"""Helps us to create a movie.""" |
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title=title)
writer = FFMpegWriter(fps=fps, metadata=metadata)
with writer.saving(fig, filename, dpi):
t = 0
while True:
if update_figure(t):
writer.grab_frame()
t += 1
else:
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def createExperimentArgs():
"""Run the basic probability of false positives experiment.""" |
experimentArguments = []
# for n in [300, 500, 700, 900, 1100, 1300, 1500, 1700, 1900, 2100, 2300,
# 2500, 2700, 2900, 3100, 3300, 3500, 3700, 3900]:
for n in [1500, 1700, 1900, 2100]:
for a in [128]:
# Some parameter combinations are just not worth running!
if ( a==64 and n<=1500 ) or ( a==128 and n<= 1900 ) or ( a==256 ):
experimentArguments.append(
("./sdr_calculations2", "results_errorbars/temp_"+str(n)+"_"+str(a)+".csv",
"200000", str(n), str(a), "0"),
)
return experimentArguments |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def createNoiseExperimentArgs():
"""Run the probability of false negatives with noise experiment.""" |
experimentArguments = []
n = 6000
for a in [128]:
noisePct = 0.75
while noisePct <= 0.85:
noise = int(round(noisePct*a,0))
# Some parameter combinations are just not worth running!
experimentArguments.append(
("./sdr_calculations2",
"results_noise_10m/temp_"+str(n)+"_"+str(a)+"_"+str(noise)+"_30.csv",
"200000", str(n), str(a), str(noise))
)
noisePct += 0.05
return experimentArguments |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generateMinicolumnSDRs(n, w, threshold):
""" Wraps enumerateDistantSDRsBruteForce, caching its result on the filesystem. """ |
if not os.path.exists("sdrs"):
os.makedirs("sdrs")
filename = "sdrs/{}_{}_{}.json".format(n, w, threshold)
if len(glob.glob(filename)) > 0:
with open(filename, "r") as fIn:
sdrs = json.load(fIn)
else:
begin = time.time()
sdrs = enumerateDistantSDRsBruteForce(n, w, threshold)
end = time.time()
with open(filename, "w") as fOut:
json.dump([sdr.tolist() for sdr in sdrs], fOut)
print("Saved", filename)
print("Elapsed time: {:.2f} seconds".format(end - begin))
return sdrs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def createEvenlySpreadSDRs(numSDRs, n, w):
""" Return a set of ~random SDRs that use every available bit an equal number of times, +- 1. """ |
assert w <= n
available = np.arange(n)
np.random.shuffle(available)
SDRs = []
for _ in xrange(numSDRs):
selected = available[:w]
available = available[w:]
if available.size == 0:
remainderSelected = np.random.choice(
np.setdiff1d(np.arange(n), selected),
size=(w - selected.size),
replace= False)
selected = np.append(selected, remainderSelected)
available = np.setdiff1d(np.arange(n), remainderSelected)
np.random.shuffle(available)
selected.sort()
SDRs.append(selected)
return SDRs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def carefullyCollideContexts(numContexts, numCells, numMinicolumns):
""" Use a greedy algorithm to choose how each minicolumn should distribute contexts between its cells. @return (list of lists of lists of ints) iContext integers for each cell, grouped by minicolumn. For example, [[[1, 3], [2,4]], [[1, 2]]] would specify that cell 0 connects to location 1 and location 3, while cell 1 connects to locations 2 and 4, and cell 2 (in the second minicolumn) connects to locations 1 and 2. """ |
minicolumns = []
for _ in xrange(numMinicolumns):
contextsForCell = [set() for _ in xrange(numCells)]
contexts = range(numContexts)
random.shuffle(contexts)
while len(contexts) > 0:
eligibleCells = range(len(contextsForCell))
while len(contexts) > 0 and len(eligibleCells) > 0:
candidateAdditions = [(context, cell)
for context in contexts
for cell in eligibleCells]
# How many new duplicate collisions will come from this addition?
#
# For every other context in on this cell, check how many times this
# pair occurs elsewhere.
badness = [sum(sum(1 if (context in otherCellContexts and
otherContext in otherCellContexts) else 0
for minicolumn in minicolumns
for otherCellContexts in minicolumn)
for otherContext in contextsForCell[cell])
for context, cell in candidateAdditions]
selectedContext, selectedCell = candidateAdditions[
badness.index(min(badness))]
contextsForCell[selectedCell].add(selectedContext)
eligibleCells.remove(selectedCell)
contexts.remove(selectedContext)
minicolumns.append(contextsForCell)
return minicolumns |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def printSegmentForCell(tm, cell):
"""Print segment information for this cell""" |
print "Segments for cell", cell, ":"
for seg in tm.basalConnections._cells[cell]._segments:
print " ",
synapses = seg._synapses
for s in synapses:
print "%d:%g" %(s.presynapticCell,s.permanence),
print |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute(self, inputs, outputs):
""" Get the next record from the queue and outputs it. """ |
if len(self.queue) > 0:
# Take the top element of the data queue
data = self.queue.pop()
else:
raise Exception("RawValues: No data: queue is empty ")
# Copy data into output vectors
outputs["resetOut"][0] = data["reset"]
outputs["dataOut"][:] = data["dataOut"] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addDataToQueue(self, displacement, reset=False):
""" Add the given displacement to the region's internal queue. Calls to compute will cause items in the queue to be dequeued in FIFO order. :param displacement: Two floats representing translation vector [dx, dy] to be passed to the linked regions via 'dataOut' :type displacement: list :param reset: Reset flag to be passed to the linked regions via 'resetOut' :type reset: bool """ |
self.queue.appendleft({
"dataOut": list(displacement),
"reset": bool(reset)
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def runNetworkOnSequences(self, inputSequences, inputCategories, tmLearn=True, upLearn=None, classifierLearn=False, verbosity=0, progressInterval=None):
""" Runs Union Temporal Pooler network on specified sequence. @param inputSequences One or more sequences of input patterns. Each should be terminated with None. @param inputCategories A sequence of category representations for each element in inputSequences Each should be terminated with None. @param tmLearn: (bool) Temporal Memory learning mode @param upLearn: (None, bool) Union Temporal Pooler learning mode. If None, Union Temporal Pooler will not be run. @param classifierLearn: (bool) Classifier learning mode @param progressInterval: (int) Interval of console progress updates in terms of timesteps. """ |
currentTime = time.time()
for i in xrange(len(inputSequences)):
sensorPattern = inputSequences[i]
inputCategory = inputCategories[i]
self.runNetworkOnPattern(sensorPattern,
tmLearn=tmLearn,
upLearn=upLearn,
sequenceLabel=inputCategory)
if classifierLearn and sensorPattern is not None:
unionSDR = self.up.getUnionSDR()
upCellCount = self.up.getColumnDimensions()
self.classifier.learn(unionSDR, inputCategory, isSparse=upCellCount)
if verbosity > 1:
pprint.pprint("{0} is category {1}".format(unionSDR, inputCategory))
if progressInterval is not None and i > 0 and i % progressInterval == 0:
elapsed = (time.time() - currentTime) / 60.0
print ("Ran {0} / {1} elements of sequence in "
"{2:0.2f} minutes.".format(i, len(inputSequences), elapsed))
currentTime = time.time()
print MonitorMixinBase.mmPrettyPrintMetrics(
self.tm.mmGetDefaultMetrics())
if verbosity >= 2:
traces = self.tm.mmGetDefaultTraces(verbosity=verbosity)
print MonitorMixinBase.mmPrettyPrintTraces(traces,
breakOnResets=
self.tm.mmGetTraceResets())
if upLearn is not None:
traces = self.up.mmGetDefaultTraces(verbosity=verbosity)
print MonitorMixinBase.mmPrettyPrintTraces(traces,
breakOnResets=
self.up.mmGetTraceResets())
print |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getUnionTemporalPoolerInput(self):
""" Gets the Union Temporal Pooler input from the Temporal Memory """ |
activeCells = numpy.zeros(self.tm.numberOfCells()).astype(realDType)
activeCells[list(self.tm.activeCellsIndices())] = 1
predictedActiveCells = numpy.zeros(self.tm.numberOfCells()).astype(
realDType)
predictedActiveCells[list(self.tm.predictedActiveCellsIndices())] = 1
burstingColumns = numpy.zeros(self.tm.numberOfColumns()).astype(realDType)
burstingColumns[list(self.tm.unpredictedActiveColumns)] = 1
return activeCells, predictedActiveCells, burstingColumns |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute(self, inputVector, learn, activeArray):
"""This method resembles the primary public method of the SpatialPooler class. It takes a input vector and outputs the indices of the active columns. If 'learn' is set to True, this method also performs weight updates and updates to the activity statistics according to the respective methods implemented below.""" |
x = inputVector
y = self.encode(x)
active_units = np.where(y==1.)[0]
if learn:
self.update_statistics([y])
self.update_weights([x],[y])
activeArray[active_units] = 1.
return active_units |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode_batch(self, inputBatch):
"""Encodes a whole batch of input arrays, without learning.""" |
X = inputBatch
encode = self.encode
Y = np.array([ encode(x) for x in X])
return Y |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def learn(self, x):
"""Encodes an input array, and performs weight updates and updates to the activity statistics according to the respective methods implemented below.""" |
y = self.encode(x)
self.update_statistics([y])
self.update_weights([x],[y])
return y |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def learn_batch(self, inputBatch):
"""Encodes a whole batch of input arrays, and performs weight updates and updates to the activity statistics according to the respective methods implemented below.""" |
X = inputBatch
Y = self.encode_batch(X)
self.update_statistics(Y)
self.update_weights(X,Y)
return Y |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_statistics(self, activityVectors):
"""Updates the variable that maintains exponential moving averages of individual and pairwise unit activiy""" |
Y = activityVectors
n = self.output_size
A = np.zeros((n, n))
batchSize = len(Y)
for y in Y:
active_units = np.where( y == 1 )[0]
for i in active_units:
for j in active_units:
A[i,j] += 1.
A = A/batchSize
self.average_activity = self.exponential_moving_average(self.average_activity, A, self.smoothing_period) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getSparseWeights(weightSparsity, inputSize, outputSize):
""" Return a randomly initialized weight matrix Size is outputSize X inputSize, with sparsity weightSparsity% """ |
# Initialize weights in the typical fashion.
w = torch.Tensor(outputSize, inputSize)
stdv = 1. / math.sqrt(w.size(1))
w.data.uniform_(-stdv, stdv)
numZeros = int(round((1.0 - weightSparsity) * inputSize))
outputIndices = np.arange(outputSize)
inputIndices = np.array([np.random.permutation(inputSize)[:numZeros]
for _ in outputIndices], dtype=np.long)
# Create tensor indices for all non-zero weights
zeroIndices = np.empty((outputSize, numZeros, 2), dtype=np.long)
zeroIndices[:, :, 0] = outputIndices[:, None]
zeroIndices[:, :, 1] = inputIndices
zeroIndices = torch.LongTensor(zeroIndices.reshape(-1, 2))
zeroWts = (zeroIndices[:, 0], zeroIndices[:, 1])
w.data[zeroWts] = 0.0
return w |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plotOverlapHistogram(v, w, title, base="random"):
""" Given a vector v, compute the overlap with the weight matrix w and save the histogram of overlaps. """ |
overlaps = v.matmul(w.t())
# Plot histogram of overlaps
bins = np.linspace(float(overlaps.min()), float(overlaps.max()), 28)
plt.hist(overlaps, bins, alpha=0.5, label='All cols')
plt.legend(loc='upper right')
plt.xlabel("Overlap scores")
plt.ylabel("Frequency")
plt.title(title)
plt.savefig(base+"_1")
plt.close()
return overlaps |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plotOverlaps(vList, w, base="random", k=20):
""" Given a list of vectors v, compute the overlap of each with the weight matrix w and plot the overlap curves. """ |
for i,v in enumerate(vList):
if i==0:
col = "m"
label = "Random vector"
else:
col="c"
label = ""
if i==1: label="Test images"
# Get a sorted list of overlap values, in decreasing order
overlaps = v.matmul(w.t())
sortedOverlaps = overlaps.sort()[0].tolist()[0][::-1]
plt.plot(sortedOverlaps,col,label=label)
plt.axvspan(0, k, facecolor="g", alpha=0.3, label="Active units")
plt.legend(loc="upper right")
plt.xlabel("Units")
plt.ylabel("Overlap scores")
plt.title("Sorted unit overlaps of a sparse net.")
plt.savefig(base+"_2")
plt.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def random_mini_batches(X, Y, minibatch_size, seed=None):
""" Compute a list of minibatches from inputs X and targets Y. A datapoint is expected to be represented as a column in the data matrices X and Y. """ |
d = X.shape[1]
size = minibatch_size
minibatches = []
if Y is None:
Y = np.zeros((1, d))
np.random.seed(seed)
perm = np.random.permutation(d)
for t in range(0, d, size):
subset = perm[t: t+size]
minibatches.append((X[:, subset], Y[:, subset]))
return minibatches |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generateRandomSDR(numSDR, numDims, numActiveInputBits, seed=42):
""" Generate a set of random SDR's @param numSDR: @param nDim: @param numActiveInputBits: """ |
randomSDRs = np.zeros((numSDR, numDims), dtype=uintType)
indices = np.array(range(numDims))
np.random.seed(seed)
for i in range(numSDR):
randomIndices = np.random.permutation(indices)
activeBits = randomIndices[:numActiveInputBits]
randomSDRs[i, activeBits] = 1
return randomSDRs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def percentOverlap(x1, x2):
""" Computes the percentage of overlap between vectors x1 and x2. @param x1 (array) binary vector @param x2 (array) binary vector @param size (int) length of binary vectors @return percentOverlap (float) percentage overlap between x1 and x2 """ |
nonZeroX1 = np.count_nonzero(x1)
nonZeroX2 = np.count_nonzero(x2)
percentOverlap = 0
minX1X2 = min(nonZeroX1, nonZeroX2)
if minX1X2 > 0:
overlap = float(np.dot(x1.T, x2))
percentOverlap = overlap / minX1X2
return percentOverlap |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addNoiseToVector(inputVector, noiseLevel, vectorType):
""" Add noise to SDRs @param inputVector (array) binary vector to be corrupted @param noiseLevel (float) amount of noise to be applied on the vector. @param vectorType (string) "sparse" or "dense" """ |
if vectorType == 'sparse':
corruptSparseVector(inputVector, noiseLevel)
elif vectorType == 'dense':
corruptDenseVector(inputVector, noiseLevel)
else:
raise ValueError("vectorType must be 'sparse' or 'dense' ") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def corruptDenseVector(vector, noiseLevel):
""" Corrupts a binary vector by inverting noiseLevel percent of its bits. @param vector (array) binary vector to be corrupted @param noiseLevel (float) amount of noise to be applied on the vector. """ |
size = len(vector)
for i in range(size):
rnd = random.random()
if rnd < noiseLevel:
if vector[i] == 1:
vector[i] = 0
else:
vector[i] = 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def corruptSparseVector(sdr, noiseLevel):
""" Add noise to sdr by turning off numNoiseBits active bits and turning on numNoiseBits in active bits @param sdr (array) Numpy array of the SDR @param noiseLevel (float) amount of noise to be applied on the vector. """ |
numNoiseBits = int(noiseLevel * np.sum(sdr))
if numNoiseBits <= 0:
return sdr
activeBits = np.where(sdr > 0)[0]
inActiveBits = np.where(sdr == 0)[0]
turnOffBits = np.random.permutation(activeBits)
turnOnBits = np.random.permutation(inActiveBits)
turnOffBits = turnOffBits[:numNoiseBits]
turnOnBits = turnOnBits[:numNoiseBits]
sdr[turnOffBits] = 0
sdr[turnOnBits] = 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculateOverlapCurve(sp, inputVectors):
""" Evalulate noise robustness of SP for a given set of SDRs @param sp a spatial pooler instance @param inputVectors list of arrays. :return: """ |
columnNumber = np.prod(sp.getColumnDimensions())
numInputVector, inputSize = inputVectors.shape
outputColumns = np.zeros((numInputVector, columnNumber), dtype=uintType)
outputColumnsCorrupted = np.zeros((numInputVector, columnNumber),
dtype=uintType)
noiseLevelList = np.linspace(0, 1.0, 21)
inputOverlapScore = np.zeros((numInputVector, len(noiseLevelList)))
outputOverlapScore = np.zeros((numInputVector, len(noiseLevelList)))
for i in range(numInputVector):
for j in range(len(noiseLevelList)):
inputVectorCorrupted = copy.deepcopy(inputVectors[i][:])
corruptSparseVector(inputVectorCorrupted, noiseLevelList[j])
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
sp.compute(inputVectorCorrupted, False,
outputColumnsCorrupted[i][:])
inputOverlapScore[i][j] = percentOverlap(inputVectors[i][:],
inputVectorCorrupted)
outputOverlapScore[i][j] = percentOverlap(outputColumns[i][:],
outputColumnsCorrupted[i][:])
return noiseLevelList, inputOverlapScore, outputOverlapScore |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def classifySPoutput(targetOutputColumns, outputColumns):
""" Classify the SP output @param targetOutputColumns (list) The target outputs, corresponding to different classes @param outputColumns (array) The current output @return classLabel (int) classification outcome """ |
numTargets, numDims = targetOutputColumns.shape
overlap = np.zeros((numTargets,))
for i in range(numTargets):
overlap[i] = percentOverlap(outputColumns, targetOutputColumns[i, :])
classLabel = np.argmax(overlap)
return classLabel |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def classificationAccuracyVsNoise(sp, inputVectors, noiseLevelList):
""" Evaluate whether the SP output is classifiable, with varying amount of noise @param sp a spatial pooler instance @param inputVectors (list) list of input SDRs @param noiseLevelList (list) list of noise levels :return: """ |
numInputVector, inputSize = inputVectors.shape
if sp is None:
targetOutputColumns = copy.deepcopy(inputVectors)
else:
columnNumber = np.prod(sp.getColumnDimensions())
# calculate target output given the uncorrupted input vectors
targetOutputColumns = np.zeros((numInputVector, columnNumber),
dtype=uintType)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, targetOutputColumns[i][:])
outcomes = np.zeros((len(noiseLevelList), numInputVector))
for i in range(len(noiseLevelList)):
for j in range(numInputVector):
corruptedInputVector = copy.deepcopy(inputVectors[j][:])
corruptSparseVector(corruptedInputVector, noiseLevelList[i])
if sp is None:
outputColumns = copy.deepcopy(corruptedInputVector)
else:
outputColumns = np.zeros((columnNumber, ), dtype=uintType)
sp.compute(corruptedInputVector, False, outputColumns)
predictedClassLabel = classifySPoutput(targetOutputColumns, outputColumns)
outcomes[i][j] = predictedClassLabel == j
predictionAccuracy = np.mean(outcomes, 1)
return predictionAccuracy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plotExampleInputOutput(sp, inputVectors, saveFigPrefix=None):
""" Plot example input & output @param sp: an spatial pooler instance @param inputVectors: a set of input vectors """ |
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType)
connectedCounts = np.zeros((numColumns,), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
winnerInputOverlap = np.zeros(numInputVector)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
inputOverlap[i][:] = sp.getOverlaps()
activeColumns = np.where(outputColumns[i][:] > 0)[0]
if len(activeColumns) > 0:
winnerInputOverlap[i] = np.mean(
inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]])
fig, axs = plt.subplots(2, 1)
axs[0].imshow(inputVectors[:, :200], cmap='gray', interpolation="nearest")
axs[0].set_ylabel('input #')
axs[0].set_title('input vectors')
axs[1].imshow(outputColumns[:, :200], cmap='gray', interpolation="nearest")
axs[1].set_ylabel('input #')
axs[1].set_title('output vectors')
if saveFigPrefix is not None:
plt.savefig('figures/{}_example_input_output.pdf'.format(saveFigPrefix))
inputDensity = np.sum(inputVectors, 1) / float(inputSize)
outputDensity = np.sum(outputColumns, 1) / float(numColumns)
fig, axs = plt.subplots(2, 1)
axs[0].plot(inputDensity)
axs[0].set_xlabel('input #')
axs[0].set_ylim([0, 0.2])
axs[1].plot(outputDensity)
axs[1].set_xlabel('input #')
axs[1].set_ylim([0, 0.05])
if saveFigPrefix is not None:
plt.savefig('figures/{}_example_input_output_density.pdf'.format(saveFigPrefix)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inspectSpatialPoolerStats(sp, inputVectors, saveFigPrefix=None):
""" Inspect the statistics of a spatial pooler given a set of input vectors @param sp: an spatial pooler instance @param inputVectors: a set of input vectors """ |
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType)
connectedCounts = np.zeros((numColumns, ), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
winnerInputOverlap = np.zeros(numInputVector)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
inputOverlap[i][:] = sp.getOverlaps()
activeColumns = np.where(outputColumns[i][:] > 0)[0]
if len(activeColumns) > 0:
winnerInputOverlap[i] = np.mean(
inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]])
avgInputOverlap = np.mean(inputOverlap, 0)
entropy = calculateEntropy(outputColumns)
activationProb = np.mean(outputColumns.astype(realDType), 0)
dutyCycleDist, binEdge = np.histogram(activationProb,
bins=10, range=[-0.005, 0.095])
dutyCycleDist = dutyCycleDist.astype('float32') / np.sum(dutyCycleDist)
binCenter = (binEdge[1:] + binEdge[:-1])/2
fig, axs = plt.subplots(2, 2)
axs[0, 0].hist(connectedCounts)
axs[0, 0].set_xlabel('# Connected Synapse')
axs[0, 1].hist(winnerInputOverlap)
axs[0, 1].set_xlabel('# winner input overlap')
axs[1, 0].bar(binEdge[:-1]+0.001, dutyCycleDist, width=.008)
axs[1, 0].set_xlim([-0.005, .1])
axs[1, 0].set_xlabel('Activation Frequency')
axs[1, 0].set_title('Entropy: {}'.format(entropy))
axs[1, 1].plot(connectedCounts, activationProb, '.')
axs[1, 1].set_xlabel('connection #')
axs[1, 1].set_ylabel('activation freq')
plt.tight_layout()
if saveFigPrefix is not None:
plt.savefig('figures/{}_network_stats.pdf'.format(saveFigPrefix))
return fig |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculateEntropy(activeColumns, type='binary'):
""" calculate the mean entropy given activation history @param activeColumns (array) 2D numpy array of activation history @return entropy (float) mean entropy """ |
activationProb = np.mean(activeColumns, 0)
if type == 'binary':
totalEntropy = np.sum(binaryEntropyVectorized(activationProb))
elif type == 'renyi':
totalEntropy = np.sum(renyiEntropyVectorized(activationProb))
else:
raise ValueError('unknown entropy type')
numberOfColumns = activeColumns.shape[1]
# return mean entropy
return totalEntropy/numberOfColumns |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def meanMutualInformation(sp, activeColumnsCurrentEpoch, columnsUnderInvestigation = []):
""" Computes the mean of the mutual information of pairs taken from a list of columns. """ |
if len(columnsUnderInvestigation) == 0:
columns = range(np.prod(sp.getColumnDimensions()))
else:
columns = columnsUnderInvestigation
numCols = len(columns)
sumMutualInfo = 0
normalizingConst = numCols*(numCols - 1)/2
for i in range(numCols):
for j in range(i+1, numCols):
sumMutualInfo += mutualInformation(sp, activeColumnsCurrentEpoch, columns[i], columns[j])
return sumMutualInfo/normalizingConst |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def learnL6Pattern(self, l6Pattern, cellsToLearnOn):
""" Learn the given l6Pattern on TRN cell dendrites. The TRN cells to learn are given in cellsTeLearnOn. Each of these cells will learn this pattern on a single dendritic segment. :param l6Pattern: An SDR from L6. List of indices corresponding to L6 cells. :param cellsToLearnOn: Each cell index is (x,y) corresponding to the TRN cells that should learn this pattern. For each cell, create a new dendrite that stores this pattern. The SDR is stored on this dendrite """ |
cellIndices = [self.trnCellIndex(x) for x in cellsToLearnOn]
newSegments = self.trnConnections.createSegments(cellIndices)
self.trnConnections.growSynapses(newSegments, l6Pattern, 1.0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def computeFeedForwardActivity(self, feedForwardInput):
""" Activate trnCells according to the l6Input. These in turn will impact bursting mode in relay cells that are connected to these trnCells. Given the feedForwardInput, compute which cells will be silent, tonic, or bursting. :param feedForwardInput: a numpy matrix of shape relayCellShape containing 0's and 1's :return: feedForwardInput is modified to contain 0, 1, or 2. A "2" indicates bursting cells. """ |
ff = feedForwardInput.copy()
# For each relay cell, see if any of its FF inputs are active.
for x in range(self.relayWidth):
for y in range(self.relayHeight):
inputCells = self._preSynapticFFCells(x, y)
for idx in inputCells:
if feedForwardInput[idx] != 0:
ff[x, y] = 1.0
continue
# If yes, and it is in burst mode, this cell bursts
# If yes, and it is not in burst mode, then we just get tonic input.
# ff += self.burstReadyCells * ff
ff2 = ff * 0.4 + self.burstReadyCells * ff
return ff2 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset(self):
""" Set everything back to zero """ |
self.trnOverlaps = []
self.activeTRNSegments = []
self.activeTRNCellIndices = []
self.relayOverlaps = []
self.activeRelaySegments = []
self.burstReadyCellIndices = []
self.burstReadyCells = np.zeros((self.relayWidth, self.relayHeight)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _initializeTRNToRelayCellConnections(self):
""" Initialize TRN to relay cell connectivity. For each relay cell, create a dendritic segment for each TRN cell it connects to. """ |
for x in range(self.relayWidth):
for y in range(self.relayHeight):
# Create one dendrite for each trn cell that projects to this relay cell
# This dendrite contains one synapse corresponding to this TRN->relay
# connection.
relayCellIndex = self.relayCellIndex((x,y))
trnCells = self._preSynapticTRNCells(x, y)
for trnCell in trnCells:
newSegment = self.relayConnections.createSegments([relayCellIndex])
self.relayConnections.growSynapses(newSegment,
[self.trnCellIndex(trnCell)], 1.0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def countWordOverlapFrequencies(filename="goodOverlapPairs.pkl"):
""" Count how many high overlaps each word has, and print it out """ |
with open(filename,"rb") as f:
goodOverlapPairs = pickle.load(f)
with open("word_bitmaps_40_bits_minimum.pkl","rb") as f:
bitmaps = pickle.load(f)
# Count how often each word has a highly overlapping match with other words
wordFrequencies = {}
for w1, w2, overlap in goodOverlapPairs:
wordFrequencies[w1] = wordFrequencies.get(w1, 0) + 1
printTemplate = PrettyTable(["Num High Overlaps", "Word", "On Bits"],
sortby="Num High Overlaps", reversesort=True)
for word in wordFrequencies.iterkeys():
printTemplate.add_row([wordFrequencies[word], word, len(bitmaps[word])])
print printTemplate |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def activateCells(self, activeColumns, basalReinforceCandidates, apicalReinforceCandidates, basalGrowthCandidates, apicalGrowthCandidates, learn=True):
""" Activate cells in the specified columns, using the result of the previous 'depolarizeCells' as predictions. Then learn. @param activeColumns (numpy array) List of active columns @param basalReinforceCandidates (numpy array) List of bits that the active cells may reinforce basal synapses to. @param apicalReinforceCandidates (numpy array) List of bits that the active cells may reinforce apical synapses to. @param basalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new basal synapses to. @param apicalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new apical synapses to @param learn (bool) Whether to grow / reinforce / punish synapses """ |
# Calculate active cells
(correctPredictedCells,
burstingColumns) = np2.setCompare(self.predictedCells, activeColumns,
self.predictedCells / self.cellsPerColumn,
rightMinusLeft=True)
newActiveCells = np.concatenate((correctPredictedCells,
np2.getAllCellsInColumns(
burstingColumns, self.cellsPerColumn)))
# Calculate learning
(learningActiveBasalSegments,
learningActiveApicalSegments,
learningMatchingBasalSegments,
learningMatchingApicalSegments,
basalSegmentsToPunish,
apicalSegmentsToPunish,
newSegmentCells,
learningCells) = self._calculateLearning(activeColumns,
burstingColumns,
correctPredictedCells,
self.activeBasalSegments,
self.activeApicalSegments,
self.matchingBasalSegments,
self.matchingApicalSegments,
self.basalPotentialOverlaps,
self.apicalPotentialOverlaps)
if learn:
# Learn on existing segments
for learningSegments in (learningActiveBasalSegments,
learningMatchingBasalSegments):
self._learn(self.basalConnections, self.rng, learningSegments,
basalReinforceCandidates, basalGrowthCandidates,
self.basalPotentialOverlaps,
self.initialPermanence, self.sampleSize,
self.permanenceIncrement, self.permanenceDecrement,
self.maxSynapsesPerSegment)
for learningSegments in (learningActiveApicalSegments,
learningMatchingApicalSegments):
self._learn(self.apicalConnections, self.rng, learningSegments,
apicalReinforceCandidates, apicalGrowthCandidates,
self.apicalPotentialOverlaps, self.initialPermanence,
self.sampleSize, self.permanenceIncrement,
self.permanenceDecrement, self.maxSynapsesPerSegment)
# Punish incorrect predictions
if self.basalPredictedSegmentDecrement != 0.0:
self.basalConnections.adjustActiveSynapses(
basalSegmentsToPunish, basalReinforceCandidates,
-self.basalPredictedSegmentDecrement)
if self.apicalPredictedSegmentDecrement != 0.0:
self.apicalConnections.adjustActiveSynapses(
apicalSegmentsToPunish, apicalReinforceCandidates,
-self.apicalPredictedSegmentDecrement)
# Only grow segments if there is basal *and* apical input.
if len(basalGrowthCandidates) > 0 and len(apicalGrowthCandidates) > 0:
self._learnOnNewSegments(self.basalConnections, self.rng,
newSegmentCells, basalGrowthCandidates,
self.initialPermanence, self.sampleSize,
self.maxSynapsesPerSegment)
self._learnOnNewSegments(self.apicalConnections, self.rng,
newSegmentCells, apicalGrowthCandidates,
self.initialPermanence, self.sampleSize,
self.maxSynapsesPerSegment)
# Save the results
newActiveCells.sort()
learningCells.sort()
self.activeCells = newActiveCells
self.winnerCells = learningCells
self.predictedActiveCells = correctPredictedCells |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _calculateLearning(self, activeColumns, burstingColumns, correctPredictedCells, activeBasalSegments, activeApicalSegments, matchingBasalSegments, matchingApicalSegments, basalPotentialOverlaps, apicalPotentialOverlaps):
""" Learning occurs on pairs of segments. Correctly predicted cells always have active basal and apical segments, and we learn on these segments. In bursting columns, we either learn on an existing segment pair, or we grow a new pair of segments. @param activeColumns (numpy array) @param burstingColumns (numpy array) @param correctPredictedCells (numpy array) @param activeBasalSegments (numpy array) @param activeApicalSegments (numpy array) @param matchingBasalSegments (numpy array) @param matchingApicalSegments (numpy array) @param basalPotentialOverlaps (numpy array) @param apicalPotentialOverlaps (numpy array) @return (tuple) - learningActiveBasalSegments (numpy array) Active basal segments on correct predicted cells - learningActiveApicalSegments (numpy array) Active apical segments on correct predicted cells - learningMatchingBasalSegments (numpy array) Matching basal segments selected for learning in bursting columns - learningMatchingApicalSegments (numpy array) Matching apical segments selected for learning in bursting columns - basalSegmentsToPunish (numpy array) Basal segments that should be punished for predicting an inactive column - apicalSegmentsToPunish (numpy array) Apical segments that should be punished for predicting an inactive column - newSegmentCells (numpy array) Cells in bursting columns that were selected to grow new segments - learningCells (numpy array) Every cell that has a learning segment or was selected to grow a segment """ |
# Correctly predicted columns
learningActiveBasalSegments = self.basalConnections.filterSegmentsByCell(
activeBasalSegments, correctPredictedCells)
learningActiveApicalSegments = self.apicalConnections.filterSegmentsByCell(
activeApicalSegments, correctPredictedCells)
# Bursting columns
cellsForMatchingBasal = self.basalConnections.mapSegmentsToCells(
matchingBasalSegments)
cellsForMatchingApical = self.apicalConnections.mapSegmentsToCells(
matchingApicalSegments)
matchingCells = np.intersect1d(
cellsForMatchingBasal, cellsForMatchingApical)
(matchingCellsInBurstingColumns,
burstingColumnsWithNoMatch) = np2.setCompare(
matchingCells, burstingColumns, matchingCells / self.cellsPerColumn,
rightMinusLeft=True)
(learningMatchingBasalSegments,
learningMatchingApicalSegments) = self._chooseBestSegmentPairPerColumn(
matchingCellsInBurstingColumns, matchingBasalSegments,
matchingApicalSegments, basalPotentialOverlaps, apicalPotentialOverlaps)
newSegmentCells = self._getCellsWithFewestSegments(
burstingColumnsWithNoMatch)
# Incorrectly predicted columns
if self.basalPredictedSegmentDecrement > 0.0:
correctMatchingBasalMask = np.in1d(
cellsForMatchingBasal / self.cellsPerColumn, activeColumns)
basalSegmentsToPunish = matchingBasalSegments[~correctMatchingBasalMask]
else:
basalSegmentsToPunish = ()
if self.apicalPredictedSegmentDecrement > 0.0:
correctMatchingApicalMask = np.in1d(
cellsForMatchingApical / self.cellsPerColumn, activeColumns)
apicalSegmentsToPunish = matchingApicalSegments[~correctMatchingApicalMask]
else:
apicalSegmentsToPunish = ()
# Make a list of every cell that is learning
learningCells = np.concatenate(
(correctPredictedCells,
self.basalConnections.mapSegmentsToCells(learningMatchingBasalSegments),
newSegmentCells))
return (learningActiveBasalSegments,
learningActiveApicalSegments,
learningMatchingBasalSegments,
learningMatchingApicalSegments,
basalSegmentsToPunish,
apicalSegmentsToPunish,
newSegmentCells,
learningCells) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _learnOnNewSegments(connections, rng, newSegmentCells, growthCandidates, initialPermanence, sampleSize, maxSynapsesPerSegment):
""" Create new segments, and grow synapses on them. @param connections (SparseMatrixConnections) @param rng (Random) @param newSegmentCells (numpy array) @param growthCandidates (numpy array) """ |
numNewSynapses = len(growthCandidates)
if sampleSize != -1:
numNewSynapses = min(numNewSynapses, sampleSize)
if maxSynapsesPerSegment != -1:
numNewSynapses = min(numNewSynapses, maxSynapsesPerSegment)
newSegments = connections.createSegments(newSegmentCells)
connections.growSynapsesToSample(newSegments, growthCandidates,
numNewSynapses, initialPermanence,
rng) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _chooseBestSegmentPairPerColumn(self, matchingCellsInBurstingColumns, matchingBasalSegments, matchingApicalSegments, basalPotentialOverlaps, apicalPotentialOverlaps):
""" Choose the best pair of matching segments - one basal and one apical - for each column. Pairs are ranked by the sum of their potential overlaps. When there's a tie, the first pair wins. @param matchingCellsInBurstingColumns (numpy array) Cells in bursting columns that have at least one matching basal segment and at least one matching apical segment @param matchingBasalSegments (numpy array) @param matchingApicalSegments (numpy array) @param basalPotentialOverlaps (numpy array) @param apicalPotentialOverlaps (numpy array) @return (tuple) - learningBasalSegments (numpy array) The selected basal segments - learningApicalSegments (numpy array) The selected apical segments """ |
basalCandidateSegments = self.basalConnections.filterSegmentsByCell(
matchingBasalSegments, matchingCellsInBurstingColumns)
apicalCandidateSegments = self.apicalConnections.filterSegmentsByCell(
matchingApicalSegments, matchingCellsInBurstingColumns)
# Sort everything once rather than inside of each call to argmaxMulti.
self.basalConnections.sortSegmentsByCell(basalCandidateSegments)
self.apicalConnections.sortSegmentsByCell(apicalCandidateSegments)
# Narrow it down to one pair per cell.
oneBasalPerCellFilter = np2.argmaxMulti(
basalPotentialOverlaps[basalCandidateSegments],
self.basalConnections.mapSegmentsToCells(basalCandidateSegments),
assumeSorted=True)
basalCandidateSegments = basalCandidateSegments[oneBasalPerCellFilter]
oneApicalPerCellFilter = np2.argmaxMulti(
apicalPotentialOverlaps[apicalCandidateSegments],
self.apicalConnections.mapSegmentsToCells(apicalCandidateSegments),
assumeSorted=True)
apicalCandidateSegments = apicalCandidateSegments[oneApicalPerCellFilter]
# Narrow it down to one pair per column.
cellScores = (basalPotentialOverlaps[basalCandidateSegments] +
apicalPotentialOverlaps[apicalCandidateSegments])
columnsForCandidates = (
self.basalConnections.mapSegmentsToCells(basalCandidateSegments) /
self.cellsPerColumn)
onePerColumnFilter = np2.argmaxMulti(cellScores, columnsForCandidates,
assumeSorted=True)
learningBasalSegments = basalCandidateSegments[onePerColumnFilter]
learningApicalSegments = apicalCandidateSegments[onePerColumnFilter]
return (learningBasalSegments,
learningApicalSegments) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute(self, activeColumns, basalInput, apicalInput=(), basalGrowthCandidates=None, apicalGrowthCandidates=None, learn=True):
""" Perform one timestep. Use the basal and apical input to form a set of predictions, then activate the specified columns, then learn. @param activeColumns (numpy array) List of active columns @param basalInput (numpy array) List of active input bits for the basal dendrite segments @param apicalInput (numpy array) List of active input bits for the apical dendrite segments @param basalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new basal synapses to. If None, the basalInput is assumed to be growth candidates. @param apicalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new apical synapses to If None, the apicalInput is assumed to be growth candidates. @param learn (bool) Whether to grow / reinforce / punish synapses """ |
activeColumns = np.asarray(activeColumns)
basalInput = np.asarray(basalInput)
apicalInput = np.asarray(apicalInput)
if basalGrowthCandidates is None:
basalGrowthCandidates = basalInput
basalGrowthCandidates = np.asarray(basalGrowthCandidates)
if apicalGrowthCandidates is None:
apicalGrowthCandidates = apicalInput
apicalGrowthCandidates = np.asarray(apicalGrowthCandidates)
self.depolarizeCells(basalInput, apicalInput, learn)
self.activateCells(activeColumns, basalInput, apicalInput,
basalGrowthCandidates, apicalGrowthCandidates, learn) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute(self, activeColumns, apicalInput=(), apicalGrowthCandidates=None, learn=True):
""" Perform one timestep. Activate the specified columns, using the predictions from the previous timestep, then learn. Then form a new set of predictions using the new active cells and the apicalInput. @param activeColumns (numpy array) List of active columns @param apicalInput (numpy array) List of active input bits for the apical dendrite segments @param apicalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new apical synapses to If None, the apicalInput is assumed to be growth candidates. @param learn (bool) Whether to grow / reinforce / punish synapses """ |
activeColumns = np.asarray(activeColumns)
apicalInput = np.asarray(apicalInput)
if apicalGrowthCandidates is None:
apicalGrowthCandidates = apicalInput
apicalGrowthCandidates = np.asarray(apicalGrowthCandidates)
self.prevPredictedCells = self.predictedCells
self.activateCells(activeColumns, self.activeCells, self.prevApicalInput,
self.winnerCells, self.prevApicalGrowthCandidates, learn)
self.depolarizeCells(self.activeCells, apicalInput, learn)
self.prevApicalInput = apicalInput.copy()
self.prevApicalGrowthCandidates = apicalGrowthCandidates.copy() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def createLocationEncoder(t, w=15):
""" A default coordinate encoder for encoding locations into sparse distributed representations. """ |
encoder = CoordinateEncoder(name="positionEncoder", n=t.l6CellCount, w=w)
return encoder |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getUnionLocations(encoder, x, y, r, step=1):
""" Return a union of location encodings that correspond to the union of all locations within the specified circle. """ |
output = np.zeros(encoder.getWidth(), dtype=defaultDtype)
locations = set()
for dx in range(-r, r+1, step):
for dy in range(-r, r+1, step):
if dx*dx + dy*dy <= r*r:
e = encodeLocation(encoder, x+dx, y+dy, output)
locations = locations.union(set(e))
return locations |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _calculateBasalLearning(self, activeColumns, burstingColumns, correctPredictedCells, activeBasalSegments, matchingBasalSegments, basalPotentialOverlaps):
""" Basic Temporal Memory learning. Correctly predicted cells always have active basal segments, and we learn on these segments. In bursting columns, we either learn on an existing basal segment, or we grow a new one. The only influence apical dendrites have on basal learning is: the apical dendrites influence which cells are considered "predicted". So an active apical dendrite can prevent some basal segments in active columns from learning. @param correctPredictedCells (numpy array) @param burstingColumns (numpy array) @param activeBasalSegments (numpy array) @param matchingBasalSegments (numpy array) @param basalPotentialOverlaps (numpy array) @return (tuple) - learningActiveBasalSegments (numpy array) Active basal segments on correct predicted cells - learningMatchingBasalSegments (numpy array) Matching basal segments selected for learning in bursting columns - basalSegmentsToPunish (numpy array) Basal segments that should be punished for predicting an inactive column - newBasalSegmentCells (numpy array) Cells in bursting columns that were selected to grow new basal segments - learningCells (numpy array) Cells that have learning basal segments or are selected to grow a basal segment """ |
# Correctly predicted columns
learningActiveBasalSegments = self.basalConnections.filterSegmentsByCell(
activeBasalSegments, correctPredictedCells)
cellsForMatchingBasal = self.basalConnections.mapSegmentsToCells(
matchingBasalSegments)
matchingCells = np.unique(cellsForMatchingBasal)
(matchingCellsInBurstingColumns,
burstingColumnsWithNoMatch) = np2.setCompare(
matchingCells, burstingColumns, matchingCells / self.cellsPerColumn,
rightMinusLeft=True)
learningMatchingBasalSegments = self._chooseBestSegmentPerColumn(
self.basalConnections, matchingCellsInBurstingColumns,
matchingBasalSegments, basalPotentialOverlaps, self.cellsPerColumn)
newBasalSegmentCells = self._getCellsWithFewestSegments(
self.basalConnections, self.rng, burstingColumnsWithNoMatch,
self.cellsPerColumn)
learningCells = np.concatenate(
(correctPredictedCells,
self.basalConnections.mapSegmentsToCells(learningMatchingBasalSegments),
newBasalSegmentCells))
# Incorrectly predicted columns
correctMatchingBasalMask = np.in1d(
cellsForMatchingBasal / self.cellsPerColumn, activeColumns)
basalSegmentsToPunish = matchingBasalSegments[~correctMatchingBasalMask]
return (learningActiveBasalSegments,
learningMatchingBasalSegments,
basalSegmentsToPunish,
newBasalSegmentCells,
learningCells) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _calculateApicalLearning(self, learningCells, activeColumns, activeApicalSegments, matchingApicalSegments, apicalPotentialOverlaps):
""" Calculate apical learning for each learning cell. The set of learning cells was determined completely from basal segments. Do all apical learning on the same cells. Learn on any active segments on learning cells. For cells without active segments, learn on the best matching segment. For cells without a matching segment, grow a new segment. @param learningCells (numpy array) @param correctPredictedCells (numpy array) @param activeApicalSegments (numpy array) @param matchingApicalSegments (numpy array) @param apicalPotentialOverlaps (numpy array) @return (tuple) - learningActiveApicalSegments (numpy array) Active apical segments on correct predicted cells - learningMatchingApicalSegments (numpy array) Matching apical segments selected for learning in bursting columns - apicalSegmentsToPunish (numpy array) Apical segments that should be punished for predicting an inactive column - newApicalSegmentCells (numpy array) Cells in bursting columns that were selected to grow new apical segments """ |
# Cells with active apical segments
learningActiveApicalSegments = self.apicalConnections.filterSegmentsByCell(
activeApicalSegments, learningCells)
# Cells with matching apical segments
learningCellsWithoutActiveApical = np.setdiff1d(
learningCells,
self.apicalConnections.mapSegmentsToCells(learningActiveApicalSegments))
cellsForMatchingApical = self.apicalConnections.mapSegmentsToCells(
matchingApicalSegments)
learningCellsWithMatchingApical = np.intersect1d(
learningCellsWithoutActiveApical, cellsForMatchingApical)
learningMatchingApicalSegments = self._chooseBestSegmentPerCell(
self.apicalConnections, learningCellsWithMatchingApical,
matchingApicalSegments, apicalPotentialOverlaps)
# Cells that need to grow an apical segment
newApicalSegmentCells = np.setdiff1d(learningCellsWithoutActiveApical,
learningCellsWithMatchingApical)
# Incorrectly predicted columns
correctMatchingApicalMask = np.in1d(
cellsForMatchingApical / self.cellsPerColumn, activeColumns)
apicalSegmentsToPunish = matchingApicalSegments[~correctMatchingApicalMask]
return (learningActiveApicalSegments,
learningMatchingApicalSegments,
apicalSegmentsToPunish,
newApicalSegmentCells) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _calculateApicalSegmentActivity(connections, activeInput, connectedPermanence, activationThreshold, minThreshold):
""" Calculate the active and matching apical segments for this timestep. @param connections (SparseMatrixConnections) @param activeInput (numpy array) @return (tuple) - activeSegments (numpy array) Dendrite segments with enough active connected synapses to cause a dendritic spike - matchingSegments (numpy array) Dendrite segments with enough active potential synapses to be selected for learning in a bursting column - potentialOverlaps (numpy array) The number of active potential synapses for each segment. Includes counts for active, matching, and nonmatching segments. """ |
# Active
overlaps = connections.computeActivity(activeInput, connectedPermanence)
activeSegments = np.flatnonzero(overlaps >= activationThreshold)
# Matching
potentialOverlaps = connections.computeActivity(activeInput)
matchingSegments = np.flatnonzero(potentialOverlaps >= minThreshold)
return (activeSegments,
matchingSegments,
potentialOverlaps) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _calculatePredictedCells(self, activeBasalSegments, activeApicalSegments):
""" Calculate the predicted cells, given the set of active segments. An active basal segment is enough to predict a cell. An active apical segment is *not* enough to predict a cell. When a cell has both types of segments active, other cells in its minicolumn must also have both types of segments to be considered predictive. @param activeBasalSegments (numpy array) @param activeApicalSegments (numpy array) @return (numpy array) """ |
cellsForBasalSegments = self.basalConnections.mapSegmentsToCells(
activeBasalSegments)
cellsForApicalSegments = self.apicalConnections.mapSegmentsToCells(
activeApicalSegments)
fullyDepolarizedCells = np.intersect1d(cellsForBasalSegments,
cellsForApicalSegments)
partlyDepolarizedCells = np.setdiff1d(cellsForBasalSegments,
fullyDepolarizedCells)
inhibitedMask = np.in1d(partlyDepolarizedCells / self.cellsPerColumn,
fullyDepolarizedCells / self.cellsPerColumn)
predictedCells = np.append(fullyDepolarizedCells,
partlyDepolarizedCells[~inhibitedMask])
if self.useApicalTiebreak == False:
predictedCells = cellsForBasalSegments
return predictedCells |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _chooseBestSegmentPerCell(cls, connections, cells, allMatchingSegments, potentialOverlaps):
""" For each specified cell, choose its matching segment with largest number of active potential synapses. When there's a tie, the first segment wins. @param connections (SparseMatrixConnections) @param cells (numpy array) @param allMatchingSegments (numpy array) @param potentialOverlaps (numpy array) @return (numpy array) One segment per cell """ |
candidateSegments = connections.filterSegmentsByCell(allMatchingSegments,
cells)
# Narrow it down to one pair per cell.
onePerCellFilter = np2.argmaxMulti(potentialOverlaps[candidateSegments],
connections.mapSegmentsToCells(
candidateSegments))
learningSegments = candidateSegments[onePerCellFilter]
return learningSegments |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _chooseBestSegmentPerColumn(cls, connections, matchingCells, allMatchingSegments, potentialOverlaps, cellsPerColumn):
""" For all the columns covered by 'matchingCells', choose the column's matching segment with largest number of active potential synapses. When there's a tie, the first segment wins. @param connections (SparseMatrixConnections) @param matchingCells (numpy array) @param allMatchingSegments (numpy array) @param potentialOverlaps (numpy array) """ |
candidateSegments = connections.filterSegmentsByCell(allMatchingSegments,
matchingCells)
# Narrow it down to one segment per column.
cellScores = potentialOverlaps[candidateSegments]
columnsForCandidates = (connections.mapSegmentsToCells(candidateSegments) /
cellsPerColumn)
onePerColumnFilter = np2.argmaxMulti(cellScores, columnsForCandidates)
learningSegments = candidateSegments[onePerColumnFilter]
return learningSegments |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def infer(self, sensationList, reset=True, objectName=None):
""" Infer on given sensations. The provided sensationList is a list of sensations, and each sensation is a mapping from cortical column to a tuple of two SDR's respectively corresponding to the location in object space and the feature. For example, the input can look as follows, if we are inferring a simple object with two sensations (with very few active bits for simplicity):
sensationList = [ { 0: (set([1, 5, 10]), set([6, 12, 52]), # location, feature for CC0 1: (set([6, 2, 15]), set([64, 1, 5]), # location, feature for CC1 }, { 0: (set([5, 46, 50]), set([8, 10, 11]), # location, feature for CC0 1: (set([1, 6, 45]), set([12, 17, 23]), # location, feature for CC1 }, ] In many uses cases, this object can be created by implementations of ObjectMachines (cf htm.research.object_machine_factory), through their method providedObjectsToInfer. If the object is known by the caller, an object name can be specified as an optional argument, and must match the objects given while learning. Parameters: @param sensationList (list) List of sensations, in the canonical format specified above @param reset (bool) If set to True (which is the default value), the network will be reset after learning. @param objectName (str) Name of the objects (must match the names given during learning). """ |
self._unsetLearningMode()
statistics = collections.defaultdict(list)
for sensations in sensationList:
# feed all columns with sensations
for col in xrange(self.numColumns):
location, feature = sensations[col]
self.sensorInputs[col].addDataToQueue(list(feature), 0, 0)
self.externalInputs[col].addDataToQueue(list(location), 0, 0)
self.network.run(1)
self._updateInferenceStats(statistics, objectName)
if reset:
# send reset signal
self._sendReset()
# save statistics
statistics["numSteps"] = len(sensationList)
statistics["object"] = objectName if objectName is not None else "Unknown"
self.statistics.append(statistics) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _saveL2Representation(self, objectName):
""" Record the current active L2 cells as the representation for 'objectName'. """ |
self.objectL2Representations[objectName] = self.getL2Representations()
try:
objectIndex = self.objectNameToIndex[objectName]
except KeyError:
# Grow the matrices as needed.
if self.objectNamesAreIndices:
objectIndex = objectName
if objectIndex >= self.objectL2RepresentationsMatrices[0].nRows():
for matrix in self.objectL2RepresentationsMatrices:
matrix.resize(objectIndex + 1, matrix.nCols())
else:
objectIndex = self.objectL2RepresentationsMatrices[0].nRows()
for matrix in self.objectL2RepresentationsMatrices:
matrix.resize(matrix.nRows() + 1, matrix.nCols())
self.objectNameToIndex[objectName] = objectIndex
for colIdx, matrix in enumerate(self.objectL2RepresentationsMatrices):
activeCells = self.L2Columns[colIdx]._pooler.getActiveCells()
matrix.setRowFromSparse(objectIndex, activeCells,
np.ones(len(activeCells), dtype="float32")) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plotInferenceStats(self, fields, plotDir="plots", experimentID=0, onePlot=True):
""" Plots and saves the desired inference statistics. Parameters: @param fields (list(str)) List of fields to include in the plots @param experimentID (int) ID of the experiment (usually 0 if only one was conducted) @param onePlot (bool) If true, all cortical columns will be merged in one plot. """ |
if not os.path.exists(plotDir):
os.makedirs(plotDir)
plt.figure()
stats = self.statistics[experimentID]
objectName = stats["object"]
for i in xrange(self.numColumns):
if not onePlot:
plt.figure()
# plot request stats
for field in fields:
fieldKey = field + " C" + str(i)
plt.plot(stats[fieldKey], marker='+', label=fieldKey)
# format
plt.legend(loc="upper right")
plt.xlabel("Sensation #")
plt.xticks(range(stats["numSteps"]))
plt.ylabel("Number of active bits")
plt.ylim(plt.ylim()[0] - 5, plt.ylim()[1] + 5)
plt.title("Object inference for object {}".format(objectName))
# save
if not onePlot:
relPath = "{}_exp_{}_C{}.png".format(self.name, experimentID, i)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close()
if onePlot:
relPath = "{}_exp_{}.png".format(self.name, experimentID)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def averageConvergencePoint(self, prefix, minOverlap, maxOverlap, settlingTime=1, firstStat=0, lastStat=None):
""" For each object, compute the convergence time - the first point when all L2 columns have converged. Return the average convergence time and accuracy across all objects. Using inference statistics for a bunch of runs, locate all traces with the given prefix. For each trace locate the iteration where it finally settles on targetValue. Return the average settling iteration and accuracy across all runs. :param prefix: Use this prefix to filter relevant stats. :param minOverlap: Min target overlap :param maxOverlap: Max target overlap :param settlingTime: Setting time between iteration. Default 1 :return: Average settling iteration and accuracy across all runs """ |
convergenceSum = 0.0
numCorrect = 0.0
inferenceLength = 1000000
# For each object
for stats in self.statistics[firstStat:lastStat]:
# For each L2 column locate convergence time
convergencePoint = 0.0
for key in stats.iterkeys():
if prefix in key:
inferenceLength = len(stats[key])
columnConvergence = L4L2Experiment._locateConvergencePoint(
stats[key], minOverlap, maxOverlap)
convergencePoint = max(convergencePoint, columnConvergence)
convergenceSum += ceil(float(convergencePoint) / settlingTime)
if ceil(float(convergencePoint) / settlingTime) <= inferenceLength:
numCorrect += 1
if len(self.statistics[firstStat:lastStat]) == 0:
return 10000.0, 0.0
return (convergenceSum / len(self.statistics[firstStat:lastStat]),
numCorrect / len(self.statistics[firstStat:lastStat]) ) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getAlgorithmInstance(self, layer="L2", column=0):
""" Returns an instance of the underlying algorithm. For example, layer=L2 and column=1 could return the actual instance of ColumnPooler that is responsible for column 1. """ |
assert ( (column>=0) and (column<self.numColumns)), ("Column number not "
"in valid range")
if layer == "L2":
return self.L2Columns[column].getAlgorithmInstance()
elif layer == "L4":
return self.L4Columns[column].getAlgorithmInstance()
else:
raise Exception("Invalid layer. Must be 'L4' or 'L2'") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getCurrentObjectOverlaps(self):
""" Get every L2's current overlap with each L2 object representation that has been learned. :return: 2D numpy array. Each row represents a cortical column. Each column represents an object. Each value represents the cortical column's current L2 overlap with the specified object. """ |
overlaps = np.zeros((self.numColumns,
len(self.objectL2Representations)),
dtype="uint32")
for i, representations in enumerate(self.objectL2RepresentationsMatrices):
activeCells = self.L2Columns[i]._pooler.getActiveCells()
overlaps[i, :] = representations.rightVecSumAtNZSparse(activeCells)
return overlaps |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isObjectClassified(self, objectName, minOverlap=None, maxL2Size=None):
""" Return True if objectName is currently unambiguously classified by every L2 column. Classification is correct and unambiguous if the current L2 overlap with the true object is greater than minOverlap and if the size of the L2 representation is no more than maxL2Size :param minOverlap: min overlap to consider the object as recognized. Defaults to half of the SDR size :param maxL2Size: max size for the L2 representation Defaults to 1.5 * SDR size :return: True/False """ |
L2Representation = self.getL2Representations()
objectRepresentation = self.objectL2Representations[objectName]
sdrSize = self.config["L2Params"]["sdrSize"]
if minOverlap is None:
minOverlap = sdrSize / 2
if maxL2Size is None:
maxL2Size = 1.5*sdrSize
numCorrectClassifications = 0
for col in xrange(self.numColumns):
overlapWithObject = len(objectRepresentation[col] & L2Representation[col])
if ( overlapWithObject >= minOverlap and
len(L2Representation[col]) <= maxL2Size ):
numCorrectClassifications += 1
return numCorrectClassifications == self.numColumns |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getDefaultL4Params(self, inputSize, numInputBits):
""" Returns a good default set of parameters to use in the L4 region. """ |
sampleSize = int(1.5 * numInputBits)
if numInputBits == 20:
activationThreshold = 13
minThreshold = 13
elif numInputBits == 10:
activationThreshold = 8
minThreshold = 8
else:
activationThreshold = int(numInputBits * .6)
minThreshold = activationThreshold
return {
"columnCount": inputSize,
"cellsPerColumn": 16,
"learn": True,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
"minThreshold": minThreshold,
"basalPredictedSegmentDecrement": 0.0,
"apicalPredictedSegmentDecrement": 0.0,
"activationThreshold": activationThreshold,
"reducedBasalThreshold": int(activationThreshold*0.6),
"sampleSize": sampleSize,
"implementation": "ApicalTiebreak",
"seed": self.seed
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getDefaultL2Params(self, inputSize, numInputBits):
""" Returns a good default set of parameters to use in the L2 region. """ |
if numInputBits == 20:
sampleSizeProximal = 10
minThresholdProximal = 5
elif numInputBits == 10:
sampleSizeProximal = 6
minThresholdProximal = 3
else:
sampleSizeProximal = int(numInputBits * .6)
minThresholdProximal = int(sampleSizeProximal * .6)
return {
"inputWidth": inputSize * 16,
"cellCount": 4096,
"sdrSize": 40,
"synPermProximalInc": 0.1,
"synPermProximalDec": 0.001,
"initialProximalPermanence": 0.6,
"minThresholdProximal": minThresholdProximal,
"sampleSizeProximal": sampleSizeProximal,
"connectedPermanenceProximal": 0.5,
"synPermDistalInc": 0.1,
"synPermDistalDec": 0.001,
"initialDistalPermanence": 0.41,
"activationThresholdDistal": 13,
"sampleSizeDistal": 20,
"connectedPermanenceDistal": 0.5,
"seed": self.seed,
"learningMode": True,
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generateFeatures(numFeatures):
"""Return string features. If <=62 features are requested, output will be single character """ |
# Capital letters, lowercase letters, numbers
candidates = ([chr(i+65) for i in xrange(26)] +
[chr(i+97) for i in xrange(26)] +
[chr(i+48) for i in xrange(10)])
if numFeatures > len(candidates):
candidates = ["F{}".format(i) for i in xrange(numFeatures)]
return candidates
return candidates[:numFeatures] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addMonitor(self, monitor):
""" Subscribe to SingleLayer2DExperiment events. @param monitor (SingleLayer2DExperimentMonitor) An object that implements a set of monitor methods @return (object) An opaque object that can be used to refer to this monitor. """ |
token = self.nextMonitorToken
self.nextMonitorToken += 1
self.monitors[token] = monitor
return token |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def doTimestep(self, locationSDR, transitionSDR, featureSDR, egocentricLocation, learn):
""" Run one timestep. """ |
for monitor in self.monitors.values():
monitor.beforeTimestep(locationSDR, transitionSDR, featureSDR,
egocentricLocation, learn)
params = {
"newLocation": locationSDR,
"deltaLocation": transitionSDR,
"featureLocationInput": self.inputLayer.getActiveCells(),
"featureLocationGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
"learn": learn,
}
self.locationLayer.compute(**params)
for monitor in self.monitors.values():
monitor.afterLocationCompute(**params)
params = {
"activeColumns": featureSDR,
"basalInput": self.locationLayer.getActiveCells(),
"apicalInput": self.objectLayer.getActiveCells(),
}
self.inputLayer.compute(**params)
for monitor in self.monitors.values():
monitor.afterInputCompute(**params)
params = {
"feedforwardInput": self.inputLayer.getActiveCells(),
"feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
"learn": learn,
}
self.objectLayer.compute(**params)
for monitor in self.monitors.values():
monitor.afterObjectCompute(**params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def learnTransitions(self):
""" Train the location layer to do path integration. For every location, teach it each previous-location + motor command pair. """ |
print "Learning transitions"
for (i, j), locationSDR in self.locations.iteritems():
print "i, j", (i, j)
for (di, dj), transitionSDR in self.transitions.iteritems():
i2 = i + di
j2 = j + dj
if (0 <= i2 < self.diameter and
0 <= j2 < self.diameter):
for _ in xrange(5):
self.locationLayer.reset()
self.locationLayer.compute(newLocation=self.locations[(i,j)])
self.locationLayer.compute(deltaLocation=transitionSDR,
newLocation=self.locations[(i2, j2)])
self.locationLayer.reset() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def learnObjects(self, objectPlacements):
""" Learn each provided object in egocentric space. Touch every location on each object. This method doesn't try move the sensor along a path. Instead it just leaps the sensor to each object location, resetting the location layer with each leap. This method simultaneously learns 4 sets of synapses: - location -> input - input -> location - input -> object - object -> input """ |
for monitor in self.monitors.values():
monitor.afterPlaceObjects(objectPlacements)
for objectName, objectDict in self.objects.iteritems():
self.reset()
objectPlacement = objectPlacements[objectName]
for locationName, featureName in objectDict.iteritems():
egocentricLocation = (locationName[0] + objectPlacement[0],
locationName[1] + objectPlacement[1])
locationSDR = self.locations[egocentricLocation]
featureSDR = self.features[featureName]
transitionSDR = np.empty(0)
self.locationLayer.reset()
self.inputLayer.reset()
for _ in xrange(10):
self.doTimestep(locationSDR, transitionSDR, featureSDR,
egocentricLocation, learn=True)
self.inputRepresentations[(featureName, egocentricLocation)] = (
self.inputLayer.getActiveCells())
self.objectRepresentations[objectName] = self.objectLayer.getActiveCells()
self.learnedObjectPlacements[objectName] = objectPlacement |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _selectTransition(self, allocentricLocation, objectDict, visitCounts):
""" Choose the transition that lands us in the location we've touched the least often. Break ties randomly, i.e. choose the first candidate in a shuffled list. """ |
candidates = list(transition
for transition in self.transitions.keys()
if (allocentricLocation[0] + transition[0],
allocentricLocation[1] + transition[1]) in objectDict)
random.shuffle(candidates)
selectedVisitCount = None
selectedTransition = None
selectedAllocentricLocation = None
for transition in candidates:
candidateLocation = (allocentricLocation[0] + transition[0],
allocentricLocation[1] + transition[1])
if (selectedVisitCount is None or
visitCounts[candidateLocation] < selectedVisitCount):
selectedVisitCount = visitCounts[candidateLocation]
selectedTransition = transition
selectedAllocentricLocation = candidateLocation
return selectedAllocentricLocation, selectedTransition |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.