docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Create our state object. Parameters: --------------------------------------------------------------------- hsObj: Reference to the HypersesarchV2 instance cjDAO: ClientJobsDAO instance logger: logger to use jobID: our JobID
def __init__(self, hsObj): # Save constructor parameters self._hsObj = hsObj # Convenient access to the logger self.logger = self._hsObj.logger # This contains our current state, and local working changes self._state = None # This contains the state we last read from the database ...
108,464
Set our state to that obtained from the engWorkerState field of the job record. Parameters: --------------------------------------------------------------------- stateJSON: JSON encoded state from job record
def readStateFromDB(self): self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID, ['engWorkerState'])[0] # Init if no prior state yet if self._priorStateJSON is None: swarms = dict() # Fast Swarm, first and only sprin...
108,465
Return the field contributions statistics. Parameters: --------------------------------------------------------------------- retval: Dictionary where the keys are the field names and the values are how much each field contributed to the best score.
def getFieldContributions(self): #in the fast swarm, there is only 1 sprint and field contributions are #not defined if self._hsObj._fixedFields is not None: return dict(), dict() # Get the predicted field encoder name predictedEncoderName = self._hsObj._predictedFieldEncoder # ----...
108,467
Return the list of all swarms in the given sprint. Parameters: --------------------------------------------------------------------- retval: list of active swarm Ids in the given sprint
def getAllSwarms(self, sprintIdx): swarmIds = [] for swarmId, info in self._state['swarms'].iteritems(): if info['sprintIdx'] == sprintIdx: swarmIds.append(swarmId) return swarmIds
108,468
Return the list of all completed swarms. Parameters: --------------------------------------------------------------------- retval: list of active swarm Ids
def getCompletedSwarms(self): swarmIds = [] for swarmId, info in self._state['swarms'].iteritems(): if info['status'] == 'completed': swarmIds.append(swarmId) return swarmIds
108,469
Return the list of all completing swarms. Parameters: --------------------------------------------------------------------- retval: list of active swarm Ids
def getCompletingSwarms(self): swarmIds = [] for swarmId, info in self._state['swarms'].iteritems(): if info['status'] == 'completing': swarmIds.append(swarmId) return swarmIds
108,470
Return the best model ID and it's errScore from the given sprint, which may still be in progress. This returns the best score from all models in the sprint which have matured so far. Parameters: --------------------------------------------------------------------- retval: (modelId, errScore)
def bestModelInSprint(self, sprintIdx): # Get all the swarms in this sprint swarms = self.getAllSwarms(sprintIdx) # Get the best model and score from each swarm bestModelId = None bestErrScore = numpy.inf for swarmId in swarms: (modelId, errScore) = self._hsObj._resultsDB.bestModelId...
108,471
Change the given swarm's state to 'newState'. If 'newState' is 'completed', then bestModelId and bestErrScore must be provided. Parameters: --------------------------------------------------------------------- swarmId: swarm Id newStatus: new status, either 'active', 'completing', 'complete...
def setSwarmState(self, swarmId, newStatus): assert (newStatus in ['active', 'completing', 'completed', 'killed']) # Set the swarm status swarmInfo = self._state['swarms'][swarmId] if swarmInfo['status'] == newStatus: return # If some other worker noticed it as completed, setting it to ...
108,472
Recursively applies f to the values in dict d. Args: d: The dict to recurse over. f: A function to apply to values in d that takes the value and a list of keys from the root of the dict to the value.
def rApply(d, f): remainingDicts = [(d, ())] while len(remainingDicts) > 0: current, prevKeys = remainingDicts.pop() for k, v in current.iteritems(): keys = prevKeys + (k,) if isinstance(v, dict): remainingDicts.insert(0, (v, keys)) else: f(v, keys)
108,522
Do one iteration of inference and/or learning and return the result Parameters: -------------------------------------------- rfInput: Input vector. Shape is: (1, inputVectorLen). resetSignal: True if reset is asserted
def _doBottomUpCompute(self, rfInput, resetSignal): # Conditional compute break self._conditionalBreak() # Save the rfInput for the spInputNonZeros parameter self._spatialPoolerInput = rfInput.reshape(-1) assert(rfInput.shape[0] == 1) # Run inference using the spatial pooler. We learn on...
108,582
Return (isInt, intValue) for a given floating point number. Parameters: ---------------------------------------------------------------------- x: floating point number to evaluate precision: desired precision retval: (isInt, intValue) isInt: True if x is close enough to an integer value ...
def _isInt(x, precision = 0.0001): xInt = int(round(x)) return (abs(x - xInt) < precision * x, xInt)
108,644
Returns the experiment description schema. This implementation loads it in from file experimentDescriptionSchema.json. Parameters: -------------------------------------------------------------------------- Returns: returns a dict representing the experiment description schema.
def _getExperimentDescriptionSchema(): installPath = os.path.dirname(os.path.abspath(__file__)) schemaFilePath = os.path.join(installPath, "experimentDescriptionSchema.json") return json.loads(open(schemaFilePath, 'r').read())
108,654
Generates the Metrics for a given InferenceType Parameters: ------------------------------------------------------------------------- options: ExpGenerator options retval: (metricsList, optimizeMetricLabel) metricsList: list of metric string names optimizeMetricLabel: Name of the metric...
def _generateMetricSpecs(options): inferenceType = options['inferenceType'] inferenceArgs = options['inferenceArgs'] predictionSteps = inferenceArgs['predictionSteps'] metricWindow = options['metricWindow'] if metricWindow is None: metricWindow = int(Configuration.get("nupic.opf.metricWindow")) metr...
108,657
Add noise to the given input. Parameters: ----------------------------------------------- input: the input to add noise to noise: how much noise to add doForeground: If true, turn off some of the 1 bits in the input doBackground: If true, turn on some of the 0 bits in the input
def addNoise(input, noise=0.1, doForeground=True, doBackground=True): if doForeground and doBackground: return numpy.abs(input - (numpy.random.random(input.shape) < noise)) else: if doForeground: return numpy.logical_and(input, numpy.random.random(input.shape) > noise) if doBackground: r...
108,706
Generate a coincidence matrix. This is used to generate random inputs to the temporal learner and to compare the predicted output against. It generates a matrix of nCoinc rows, each row has length 'length' and has a total of 'activity' bits on. Parameters: ----------------------------------------------- n...
def generateCoincMatrix(nCoinc=10, length=500, activity=50): coincMatrix0 = SM32(int(nCoinc), int(length)) theOnes = numpy.array([1.0] * activity, dtype=numpy.float32) for rowIdx in xrange(nCoinc): coinc = numpy.array(random.sample(xrange(length), activity), dtype=numpy.uint32) coinc.s...
108,707
Generate a non overlapping coincidence matrix. This is used to generate random inputs to the temporal learner and to compare the predicted output against. It generates a matrix of nCoinc rows, each row has length 'length' and has a total of 'activity' bits on. Parameters: -----------------------------------...
def generateSimpleCoincMatrix(nCoinc=10, length=500, activity=50): assert nCoinc*activity<=length, "can't generate non-overlapping coincidences" coincMatrix = SM32(0, length) coinc = numpy.zeros(length, dtype='int32') for i in xrange(nCoinc): coinc[:] = 0 coinc[i*activity:(i+1)*activity] = 1 ...
108,712
Convert a list of sequences of pattern indices, and a pattern lookup table into a an array of patterns Parameters: ----------------------------------------------- seq: the sequence, given as indices into the patternMatrix patternMatrix: a SparseMatrix contaning the possible patterns used in ...
def vectorsFromSeqList(seqList, patternMatrix): totalLen = 0 for seq in seqList: totalLen += len(seq) vectors = numpy.zeros((totalLen, patternMatrix.shape[1]), dtype='bool') vecOffset = 0 for seq in seqList: seq = numpy.array(seq, dtype='uint32') for idx,coinc in enumerate(seq): vectors...
108,715
Returns 3 things for a vector: * the total on time * the number of runs * a list of the durations of each run. Parameters: ----------------------------------------------- input stream: 11100000001100000000011111100000 return value: (11, 3, [3, 2, 6])
def _listOfOnTimesInVec(vector): # init counters durations = [] numOnTimes = 0 totalOnTime = 0 # Find where the nonzeros are nonzeros = numpy.array(vector).nonzero()[0] # Nothing to do if vector is empty if len(nonzeros) == 0: return (0, 0, []) # Special case of only 1 on bit if len(non...
108,724
Returns the stability for the population averaged over multiple time steps Parameters: ----------------------------------------------- vectors: the vectors for which the stability is calculated numSamples the number of time steps where stability is counted At each time step, count the fracti...
def populationStability(vectors, numSamples=None): # ---------------------------------------------------------------------- # Calculate the stability numVectors = len(vectors) if numSamples is None: numSamples = numVectors-1 countOn = range(numVectors-1) else: countOn = numpy.random.randint(0...
108,730
Returns the percent of the outputs that remain completely stable over N time steps. Parameters: ----------------------------------------------- vectors: the vectors for which the stability is calculated numSamples: the number of time steps where stability is counted For each window of numSample...
def percentOutputsStableOverNTimeSteps(vectors, numSamples=None): # ---------------------------------------------------------------------- # Calculate the stability totalSamples = len(vectors) windowSize = numSamples # Process each window numWindows = 0 pctStable = 0 for wStart in range(0, totalSa...
108,731
Set the random seed and the numpy seed Parameters: -------------------------------------------------------------------- seed: random seed
def setSeed(self, seed): rand.seed(seed) np.random.seed(seed)
108,788
Add multiple fields to the dataset. Parameters: ------------------------------------------------------------------- fieldsInfo: A list of dictionaries, containing a field name, specs for the data classes and encoder params for the corresponding field.
def addMultipleFields(self, fieldsInfo): assert all(x in field for x in ['name', 'fieldSpec', 'encoderParams'] for field \ in fieldsInfo) for spec in fieldsInfo: self.addField(spec.pop('name'), spec.pop('fieldSpec'), spec.pop('encoderParams'))
108,790
Initialize field using relevant encoder parameters. Parameters: ------------------------------------------------------------------- name: Field name encoderParams: Parameters for the encoder. Returns the index of the field
def defineField(self, name, encoderParams=None): self.fields.append(_field(name, encoderParams)) return len(self.fields)-1
108,791
Set flag for field at index. Flags are special characters such as 'S' for sequence or 'T' for timestamp. Parameters: -------------------------------------------------------------------- index: index of field whose flag is being set flag: special character
def setFlag(self, index, flag): assert len(self.fields)>index self.fields[index].flag=flag
108,792
Encode a record as a sparse distributed representation Parameters: -------------------------------------------------------------------- record: Record to be encoded toBeAdded: Whether the encodings corresponding to the record are added to the corresponding fields
def encodeRecord(self, record, toBeAdded=True): encoding=[self.fields[i].encodeValue(record[i], toBeAdded) for i in \ xrange(len(self.fields))] return encoding
108,797
Encodes a list of records. Parameters: -------------------------------------------------------------------- records: One or more records. (i,j)th element of this 2D array specifies the value at field j of record i. If unspecified, records previously generated and sto...
def encodeAllRecords(self, records=None, toBeAdded=True): if records is None: records = self.getAllRecords() if self.verbosity>0: print 'Encoding', len(records), 'records.' encodings = [self.encodeRecord(record, toBeAdded) for record in records] return encodings
108,798
Add 'value' to the field i. Parameters: -------------------------------------------------------------------- value: value to be added i: value is added to field i
def addValueToField(self, i, value=None): assert(len(self.fields)>i) if value is None: value = self.fields[i].dataClass.getNext() self.fields[i].addValue(value) return value else: self.fields[i].addValue(value)
108,799
Export all the records into a csv file in numenta format. Example header format: fieldName1 fieldName2 fieldName3 date string float T S Parameters: -------------------------------------------------------------------- path: Relative path of the file to...
def saveRecords(self, path='myOutput'): numRecords = self.fields[0].numRecords assert (all(field.numRecords==numRecords for field in self.fields)) import csv with open(path+'.csv', 'wb') as f: writer = csv.writer(f) writer.writerow(self.getAllFieldNames()) writer.writerow(self.ge...
108,807
Instantiate our results database Parameters: -------------------------------------------------------------------- hsObj: Reference to the HypersearchV2 instance
def __init__(self, hsObj): self._hsObj = hsObj # This list holds all the results we have so far on every model. In # addition, we maintain mutliple other data structures which provide # faster access into portions of this list self._allResults = [] # Models that completed with errors an...
108,829
Return the modelID of the model with the given paramsHash, or None if not found. Parameters: --------------------------------------------------------------------- paramsHash: paramsHash to look for retval: modelId, or None if not found
def getModelIDFromParamsHash(self, paramsHash): entryIdx = self. _paramsHashToIndexes.get(paramsHash, None) if entryIdx is not None: return self._allResults[entryIdx]['modelID'] else: return None
108,831
Return particle info for a specific modelId. Parameters: --------------------------------------------------------------------- modelId: which model Id retval: (particleState, modelId, errScore, completed, matured)
def getParticleInfo(self, modelId): entry = self._allResults[self._modelIDToIdx[modelId]] return (entry['modelParams']['particleState'], modelId, entry['errScore'], entry['completed'], entry['matured'])
108,834
Return a list of swarm generations that have completed and the best (minimal) errScore seen for each of them. Parameters: --------------------------------------------------------------------- retval: list of tuples. Each tuple is of the form: (swarmId, genIdx, bestErrScore)
def getMaturedSwarmGenerations(self): # Return results go in this list result = [] # For each of the swarm generations which have had model result updates # since the last time we were called, see which have completed. modifiedSwarmGens = sorted(self._modifiedSwarmGens) # Walk through th...
108,837
If there are any models that haven't been updated in a while, consider them dead, and mark them as hidden in our resultsDB. We also change the paramsHash and particleHash of orphaned models so that we can re-generate that particle and/or model again if we desire. Parameters: -----------------------...
def _checkForOrphanedModels (self): self.logger.debug("Checking for orphaned models older than %s" % \ (self._modelOrphanIntervalSecs)) while True: orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID, self._modelOrphanI...
108,844
Back up a file Parameters: ---------------------------------------------------------------------- retval: Filepath of the back-up
def _backupFile(filePath): assert os.path.exists(filePath) stampNum = 0 (prefix, suffix) = os.path.splitext(filePath) while True: backupPath = "%s.%d%s" % (prefix, stampNum, suffix) stampNum += 1 if not os.path.exists(backupPath): break shutil.copyfile(filePath, backupPath) return bac...
108,864
Pick up the latest search from a saved jobID and monitor it to completion Parameters: ---------------------------------------------------------------------- retval: nothing
def pickupSearch(self): self.__searchJob = self.loadSavedHyperSearchJob( permWorkDir=self._options["permWorkDir"], outputLabel=self._options["outputLabel"]) self.monitorSearchJob()
108,867
Launch worker processes to execute the given command line Parameters: ----------------------------------------------- cmdLine: The command line for each worker numWorkers: number of workers to launch
def _launchWorkers(self, cmdLine, numWorkers): self._workers = [] for i in range(numWorkers): stdout = tempfile.NamedTemporaryFile(delete=False) stderr = tempfile.NamedTemporaryFile(delete=False) p = subprocess.Popen(cmdLine, bufsize=1, env=os.environ, shell=True, ...
108,869
Starts HyperSearch as a worker or runs it inline for the "dryRun" action Parameters: ---------------------------------------------------------------------- retval: the new _HyperSearchJob instance representing the HyperSearch job
def __startSearch(self): # This search uses a pre-existing permutations script params = _ClientJobUtils.makeSearchJobParamsDict(options=self._options, forRunning=True) if self._options["action"] == "dryRun": args = [sys.argv[0], "--params=%s" ...
108,870
Instantiates a _HyperSearchJob instance from info saved in file Parameters: ---------------------------------------------------------------------- permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID retval: _HyperSearchJob...
def loadSavedHyperSearchJob(cls, permWorkDir, outputLabel): jobID = cls.__loadHyperSearchJobID(permWorkDir=permWorkDir, outputLabel=outputLabel) searchJob = _HyperSearchJob(nupicJobID=jobID) return searchJob
108,872
Saves the given _HyperSearchJob instance's jobID to file Parameters: ---------------------------------------------------------------------- permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID hyperSearchJob: _HyperSearchJob...
def __saveHyperSearchJobID(cls, permWorkDir, outputLabel, hyperSearchJob): jobID = hyperSearchJob.getJobID() filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir, outputLabel=outputLabel) if os.path.exists(filePath): _backupFile(fi...
108,873
Loads a saved jobID from file Parameters: ---------------------------------------------------------------------- permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID retval: HyperSearch jobID; raises exception if not fou...
def __loadHyperSearchJobID(cls, permWorkDir, outputLabel): filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir, outputLabel=outputLabel) jobID = None with open(filePath, "r") as jobIdPickleFile: jobInfo = pickle.load(jobIdPickleFi...
108,874
Returns filepath where to store HyperSearch JobID Parameters: ---------------------------------------------------------------------- permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID retval: Filepath where to store Hyper...
def __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel): # Get the base path and figure out the path of the report file. basePath = permWorkDir # Form the name of the output csv file that will contain all the results filename = "%s_HyperSearchJobID.pkl" % (outputLabel,) filepath = os....
108,875
Emit model info to csv file Parameters: ---------------------------------------------------------------------- modelInfo: _NupicModelInfo instance retval: nothing
def emit(self, modelInfo): # Open/init csv file, if needed if self.__csvFileObj is None: # sets up self.__sortedVariableNames and self.__csvFileObj self.__openAndInitCSVFile(modelInfo) csv = self.__csvFileObj # Emit model info row to report.csv print >> csv, "%s, " % (self.__searc...
108,879
Close file and print report/backup csv file paths Parameters: ---------------------------------------------------------------------- retval: nothing
def finalize(self): if self.__csvFileObj is not None: # Done with file self.__csvFileObj.close() self.__csvFileObj = None print "Report csv saved in %s" % (self.__reportCSVPath,) if self.__backupCSVPath: print "Previous report csv file was backed up to %s" % \ ...
108,880
- Backs up old report csv file; - opens the report csv file in append or overwrite mode (per self.__replaceReport); - emits column fields; - sets up self.__sortedVariableNames, self.__csvFileObj, self.__backupCSVPath, and self.__reportCSVPath Parameters: --------------------------------...
def __openAndInitCSVFile(self, modelInfo): # Get the base path and figure out the path of the report file. basePath = self.__outputDirAbsPath # Form the name of the output csv file that will contain all the results reportCSVName = "%s_Report.csv" % (self.__outputLabel,) reportCSVPath = self.__...
108,881
_NupicJob constructor Parameters: ---------------------------------------------------------------------- retval: Nupic Client JobID of the job
def __init__(self, nupicJobID): self.__nupicJobID = nupicJobID jobInfo = _clientJobsDB().jobInfo(nupicJobID) assert jobInfo is not None, "jobID=%s not found" % nupicJobID assert jobInfo.jobId == nupicJobID, "%s != %s" % (jobInfo.jobId, nupicJobID) _emit(Verbosity.DEBUG, "_NupicJob: \n%s" % ppr...
108,882
Queuries DB for model IDs of all currently instantiated models associated with this HyperSearch job. See also: _iterModels() Parameters: ---------------------------------------------------------------------- retval: A sequence of Nupic modelIDs
def queryModelIDs(self): jobID = self.getJobID() modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID) modelIDs = tuple(x[0] for x in modelCounterPairs) return modelIDs
108,885
Unwraps self.__rawInfo.params into the equivalent python dictionary and caches it in self.__cachedParams. Returns the unwrapped params Parameters: ---------------------------------------------------------------------- retval: Model params dictionary as correpsonding to the json ...
def __unwrapParams(self): if self.__cachedParams is None: self.__cachedParams = json.loads(self.__rawInfo.params) assert self.__cachedParams is not None, \ "%s resulted in None" % self.__rawInfo.params return self.__cachedParams
108,892
Retrives a dictionary of metrics that combines all report and optimization metrics Parameters: ---------------------------------------------------------------------- retval: a dictionary of optimization metrics that were collected for the model; an empty dictionary if there ...
def getAllMetrics(self): result = self.getReportMetrics() result.update(self.getOptimizationMetrics()) return result
108,893
Unwraps self.__rawInfo.results and caches it in self.__cachedResults; Returns the unwrapped params Parameters: ---------------------------------------------------------------------- retval: ModelResults namedtuple instance
def __unwrapResults(self): if self.__cachedResults is None: if self.__rawInfo.results is not None: resultList = json.loads(self.__rawInfo.results) assert len(resultList) == 2, \ "Expected 2 elements, but got %s (%s)." % ( len(resultList), resultList) ...
108,894
Returns the periodic checks to see if the model should continue running. Parameters: ----------------------------------------------------------------------- terminationFunc: The function that will be called in the model main loop as a wrapper around this function. Must have a par...
def getTerminationCallbacks(self, terminationFunc): activities = [None] * len(ModelTerminator._MILESTONES) for index, (iteration, _) in enumerate(ModelTerminator._MILESTONES): cb = functools.partial(terminationFunc, index=index) activities[index] = PeriodicActivityRequest(repeating =False, ...
108,904
Tell the writer which metrics should be written Parameters: ----------------------------------------------------------------------- metricsNames: A list of metric lables to be written
def setLoggedMetrics(self, metricNames): if metricNames is None: self.__metricNames = set([]) else: self.__metricNames = set(metricNames)
109,057
[virtual method override] Save a checkpoint of the prediction output stream. The checkpoint comprises up to maxRows of the most recent inference records. Parameters: ---------------------------------------------------------------------- checkpointSink: A File-like object where predictions check...
def checkpoint(self, checkpointSink, maxRows): checkpointSink.truncate() if self.__dataset is None: if self.__checkpointCache is not None: self.__checkpointCache.seek(0) shutil.copyfileobj(self.__checkpointCache, checkpointSink) checkpointSink.flush() return el...
109,061
Generates a set of input record Params: numRecords - how many records to generate elemSize - the size of each record (num 0s or 1s) numSet - how many 1s in each record Returns: a list of inputs
def generateRandomInput(numRecords, elemSize = 400, numSet = 42): inputs = [] for _ in xrange(numRecords): input = np.zeros(elemSize, dtype=realDType) for _ in range(0,numSet): ind = np.random.random_integers(0, elemSize-1, 1)[0] input[ind] = 1 while abs(input.sum() - numSet) > 0.1: ...
109,078
Stores the current model results in the manager's internal store Parameters: ----------------------------------------------------------------------- results: A ModelResults object that contains the current timestep's input/inferences
def _addResults(self, results): # ----------------------------------------------------------------------- # If the model potentially has temporal inferences. if self.__isTemporal: shiftedInferences = self.__inferenceShifter.shift(results).inferences self.__currentResult = copy.deepcopy(resu...
109,110
Get the actual value for this field Parameters: ----------------------------------------------------------------------- sensorInputElement: The inference element (part of the inference) that is being used for this metric
def _getGroundTruth(self, inferenceElement): sensorInputElement = InferenceElement.getInputElement(inferenceElement) if sensorInputElement is None: return None return getattr(self.__currentGroundTruth.sensorInput, sensorInputElement)
109,111
Creates the required metrics modules Parameters: ----------------------------------------------------------------------- metricSpecs: A sequence of MetricSpec objects that specify which metric modules to instantiate
def __constructMetricsModules(self, metricSpecs): if not metricSpecs: return self.__metricSpecs = metricSpecs for spec in metricSpecs: if not InferenceElement.validate(spec.inferenceElement): raise ValueError("Invalid inference element for metric spec: %r" %spec) self.__metr...
109,112
Generates the ClientJobs database name for the given version of the database Parameters: ---------------------------------------------------------------- dbVersion: ClientJobs database version number retval: the ClientJobs database name for the given DB version
def __getDBNameForVersion(cls, dbVersion): # DB Name prefix for the given version prefix = cls.__getDBNamePrefixForVersion(dbVersion) # DB Name suffix suffix = Configuration.get('nupic.cluster.database.nameSuffix') # Replace dash and dot with underscore (e.g. 'ec2-user' or ec2.user will brea...
109,139
Get the instance of the ClientJobsDAO created for this process (or perhaps at some point in the future, for this thread). Parameters: ---------------------------------------------------------------- retval: instance of ClientJobsDAO
def get(): # Instantiate if needed if ClientJobsDAO._instance is None: cjDAO = ClientJobsDAO() cjDAO.connect() ClientJobsDAO._instance = cjDAO # Return the instance to the caller return ClientJobsDAO._instance
109,140
Instantiate a ClientJobsDAO instance. Parameters: ----------------------------------------------------------------
def __init__(self): self._logger = _LOGGER # Usage error to instantiate more than 1 instance per process assert (ClientJobsDAO._instance is None) # Create the name of the current version database self.dbName = self._getDBName() # NOTE: we set the table names here; the rest of the table ...
109,141
Convert a database internal column name to a public name. This takes something of the form word1_word2_word3 and converts it to: word1Word2Word3. If the db field name starts with '_', it is stripped out so that the name is compatible with collections.namedtuple. for example: _word1_word2_word3 => word1W...
def _columnNameDBToPublic(self, dbName): words = dbName.split('_') if dbName.startswith('_'): words = words[1:] pubWords = [words[0]] for word in words[1:]: pubWords.append(word[0].upper() + word[1:]) return ''.join(pubWords)
109,142
Initialize tables, if needed Parameters: ---------------------------------------------------------------- cursor: SQL cursor deleteOldVersions: if true, delete any old versions of the DB left on the server recreate: if true, recreate the database ...
def _initTables(self, cursor, deleteOldVersions, recreate): # Delete old versions if they exist if deleteOldVersions: self._logger.info( "Dropping old versions of client_jobs DB; called from: %r", traceback.format_stack()) for i in range(self._DB_VERSION): cursor.execut...
109,144
For use only by Nupic Scheduler (also known as ClientJobManager) Look through the jobs table and see if any new job requests have been queued up. If so, pick one and mark it as starting up and create the model table to hold the results Parameters: ---------------------------------------------------...
def jobStartNext(self): # NOTE: cursor.execute('SELECT @update_id') trick is unreliable: if a # connection loss occurs during cursor.execute, then the server-cached # information is lost, and we cannot get the updated job ID; so, we use # this select instead row = self._getOneMatchingRowWit...
109,156
Look through the jobs table and count the running jobs whose cancel field is true. Parameters: ---------------------------------------------------------------- retval: A count of running jobs with the cancel field set to true.
def jobCountCancellingJobs(self,): with ConnectionFactory.get() as conn: query = 'SELECT COUNT(job_id) '\ 'FROM %s ' \ 'WHERE (status<>%%s AND cancel is TRUE)' \ % (self.jobsTableName,) conn.cursor.execute(query, [self.STATUS_COMPLETED]) rows = conn....
109,160
Look through the jobs table and get the list of running jobs whose cancel field is true. Parameters: ---------------------------------------------------------------- retval: A (possibly empty) sequence of running job IDs with cancel field set to true
def jobGetCancellingJobs(self,): with ConnectionFactory.get() as conn: query = 'SELECT job_id '\ 'FROM %s ' \ 'WHERE (status<>%%s AND cancel is TRUE)' \ % (self.jobsTableName,) conn.cursor.execute(query, [self.STATUS_COMPLETED]) rows = conn.cursor.fet...
109,161
Generator to allow iterating slices at dynamic intervals Parameters: ---------------------------------------------------------------- data: Any data structure that supports slicing (i.e. list or tuple) *intervals: Iterable of intervals. The sum of intervals should be less than, o...
def partitionAtIntervals(data, intervals): assert sum(intervals) <= len(data) start = 0 for interval in intervals: end = start + interval yield data[start:end] start = end raise StopIteration
109,162
Return a list of namedtuples from the result of a join query. A single database result is partitioned at intervals corresponding to the fields in namedTuples. The return value is the result of applying namedtuple._make() to each of the partitions, for each of the namedTuples. Parameters: --------...
def _combineResults(result, *namedTuples): results = ClientJobsDAO.partitionAtIntervals( result, [len(nt._fields) for nt in namedTuples]) return [nt._make(result) for nt, result in zip(namedTuples, results)]
109,163
Get all info about a job Parameters: ---------------------------------------------------------------- job: jobID of the job to query retval: namedtuple containing the job info.
def jobInfo(self, jobID): row = self._getOneMatchingRowWithRetries( self._jobs, dict(job_id=jobID), [self._jobs.pubToDBNameDict[n] for n in self._jobs.jobInfoNamedTuple._fields]) if row is None: raise RuntimeError("jobID=%s not found within the jobs table" % (jobID)) # Create...
109,165
Change the status on the given job Parameters: ---------------------------------------------------------------- job: jobID of the job to change status status: new status string (ClientJobsDAO.STATUS_xxxxx) useConnectionID: True if the connection id of the calling function must be th...
def jobSetStatus(self, jobID, status, useConnectionID=True,): # Get a database connection and cursor with ConnectionFactory.get() as conn: query = 'UPDATE %s SET status=%%s, ' \ ' _eng_last_update_time=UTC_TIMESTAMP() ' \ ' WHERE job_id=%%s' \ ...
109,166
Change the status on the given job to completed Parameters: ---------------------------------------------------------------- job: jobID of the job to mark as completed completionReason: completionReason string completionMsg: completionMsg string useConnectionID: True i...
def jobSetCompleted(self, jobID, completionReason, completionMsg, useConnectionID = True): # Get a database connection and cursor with ConnectionFactory.get() as conn: query = 'UPDATE %s SET status=%%s, ' \ ' completion_reason=%%s, ' \ '...
109,167
Cancel the given job. This will update the cancel field in the jobs table and will result in the job being cancelled. Parameters: ---------------------------------------------------------------- jobID: jobID of the job to mark as completed to False for hypersearch workers
def jobCancel(self, jobID): self._logger.info('Canceling jobID=%s', jobID) # NOTE: jobSetFields does retries on transient mysql failures self.jobSetFields(jobID, {"cancel" : True}, useConnectionID=False)
109,168
Update the results string and last-update-time fields of a model. Parameters: ---------------------------------------------------------------- jobID: job ID of model to modify results: new results (json dict string)
def jobUpdateResults(self, jobID, results): with ConnectionFactory.get() as conn: query = 'UPDATE %s SET _eng_last_update_time=UTC_TIMESTAMP(), ' \ ' results=%%s ' \ ' WHERE job_id=%%s' % (self.jobsTableName,) conn.cursor.execute(query, [results, jo...
109,179
Delete all models from the models table Parameters: ----------------------------------------------------------------
def modelsClearAll(self): self._logger.info('Deleting all rows from models table %r', self.modelsTableName) with ConnectionFactory.get() as conn: query = 'DELETE FROM %s' % (self.modelsTableName) conn.cursor.execute(query)
109,180
Get ALL info for a set of models WARNING!!!: The order of the results are NOT necessarily in the same order as the order of the model IDs passed in!!! Parameters: ---------------------------------------------------------------- modelIDs: list of model IDs retval: list of nametuples con...
def modelsInfo(self, modelIDs): assert isinstance(modelIDs, self._SEQUENCE_TYPES), ( "wrong modelIDs type: %s") % (type(modelIDs),) assert modelIDs, "modelIDs is empty" rows = self._getMatchingRowsWithRetries( self._models, dict(model_id=modelIDs), [self._models.pubToDBNameDict[f] ...
109,182
Get the params and paramsHash for a set of models. WARNING!!!: The order of the results are NOT necessarily in the same order as the order of the model IDs passed in!!! Parameters: ---------------------------------------------------------------- modelIDs: list of model IDs retval: list...
def modelsGetParams(self, modelIDs): assert isinstance(modelIDs, self._SEQUENCE_TYPES), ( "Wrong modelIDs type: %r") % (type(modelIDs),) assert len(modelIDs) >= 1, "modelIDs is empty" rows = self._getMatchingRowsWithRetries( self._models, {'model_id' : modelIDs}, [self._models.pubToD...
109,187
Update the results string, and/or num_records fields of a model. This will fail if the model does not currently belong to this client (connection_id doesn't match). Parameters: ---------------------------------------------------------------- modelID: model ID of model to modify results: ...
def modelUpdateResults(self, modelID, results=None, metricValue =None, numRecords=None): assignmentExpressions = ['_eng_last_update_time=UTC_TIMESTAMP()', 'update_counter=update_counter+1'] assignmentValues = [] if results is not None: assig...
109,190
Look through the models table for an orphaned model, which is a model that is not completed yet, whose _eng_last_update_time is more than maxUpdateInterval seconds ago. If one is found, change its _eng_worker_conn_id to the current worker's and return the model id. Parameters: ----------------...
def modelAdoptNextOrphan(self, jobId, maxUpdateInterval): @g_retrySQL def findCandidateModelWithRetries(): modelID = None with ConnectionFactory.get() as conn: # TODO: may need a table index on job_id/status for speed query = 'SELECT model_id FROM %s ' \ ' WHE...
109,192
Compare the results and return True if success, False if failure Parameters: -------------------------------------------------------------------- coincs: Which cells are we comparing? comparedTo: The set of 40 cells we being compared to (they have no overlap with seen) seen: ...
def printOverlaps(comparedTo, coincs, seen): inputOverlap = 0 cellOverlap = 0 for y in comparedTo: closestInputs = [] closestCells = [] if len(seen)>0: inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))]) cellOverlap = max([len(seen[m][0].intersection(y[1]...
109,348
Compute prediction accuracy by checking if the next page in the sequence is within the top N predictions calculated by the model Args: model: HTM model size: Sample size top: top N predictions to use Returns: Probability the next page in the sequence is within the top N predicted pages
def computeAccuracy(model, size, top): accuracy = [] # Load MSNBC web data file filename = os.path.join(os.path.dirname(__file__), "msnbc990928.zip") with zipfile.ZipFile(filename) as archive: with archive.open("msnbc990928.seq") as datafile: # Skip header lines (first 7 lines) for _ in xran...
109,394
Reads the user session record from the file's cursor position Args: datafile: Data file whose cursor points at the beginning of the record Returns: list of pages in the order clicked by the user
def readUserSession(datafile): for line in datafile: pages = line.split() total = len(pages) # Select user sessions with 2 or more pages if total < 2: continue # Exclude outliers by removing extreme long sessions if total > 500: continue return [PAGE_CATEGORIES[int(i) - 1]...
109,395
Returns the maximum delay for the InferenceElements in the inference dictionary Parameters: ----------------------------------------------------------------------- inferences: A dictionary where the keys are InferenceElements
def getMaxDelay(inferences): maxDelay = 0 for inferenceElement, inference in inferences.iteritems(): if isinstance(inference, dict): for key in inference.iterkeys(): maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement, ...
109,418
Parse the given XML file and return a dict describing the file. Parameters: ---------------------------------------------------------------- filename: name of XML file to parse (no path) path: path of the XML file. If None, then use the standard configuration search path. re...
def _readConfigFile(cls, filename, path=None): outputProperties = dict() # Get the path to the config files. if path is None: filePath = cls.findConfigFile(filename) else: filePath = os.path.join(path, filename) # ------------------------------------------------------------------...
109,423
Set multiple custom properties and persist them to the custom configuration store. Parameters: ---------------------------------------------------------------- properties: a dict of property name/value pairs to set
def setCustomProperties(cls, properties): _getLogger().info("Setting custom configuration properties=%r; caller=%r", properties, traceback.format_stack()) _CustomConfigurationFileWrapper.edit(properties) for propertyName, value in properties.iteritems(): cls.set(propertyNa...
109,424
If persistent is True, delete the temporary file Parameters: ---------------------------------------------------------------- persistent: if True, custom configuration file is deleted
def clear(cls, persistent=False): if persistent: try: os.unlink(cls.getPath()) except OSError, e: if e.errno != errno.ENOENT: _getLogger().exception("Error %s while trying to remove dynamic " \ "configuration file: %s", e.errno, ...
109,427
Edits the XML configuration file with the parameters specified by properties Parameters: ---------------------------------------------------------------- properties: dict of settings to be applied to the custom configuration store (key is property name, value is value)
def edit(cls, properties): copyOfProperties = copy(properties) configFilePath = cls.getPath() try: with open(configFilePath, 'r') as fp: contents = fp.read() except IOError, e: if e.errno != errno.ENOENT: _getLogger().exception("Error %s reading custom configuration st...
109,429
Copy specific variables from particleState into this particle. Parameters: -------------------------------------------------------------- particleState: dict produced by a particle's getState() method varNames: which variables to copy
def copyVarStatesFrom(self, particleState, varNames): # Set this to false if you don't want the variable to move anymore # after we set the state allowedToMove = True for varName in particleState['varStates']: if varName in varNames: # If this particle doesn't include this field, d...
109,436
Return the position of this particle. This returns a dict() of key value pairs where each key is the name of the flattened permutation variable and the value is its chosen value. Parameters: -------------------------------------------------------------- retval: dict() of flattened permutation c...
def getPosition(self): result = dict() for (varName, value) in self.permuteVars.iteritems(): result[varName] = value.getPosition() return result
109,437
Return the position of a particle given its state dict. Parameters: -------------------------------------------------------------- retval: dict() of particle position, keys are the variable names, values are their positions
def getPositionFromState(pState): result = dict() for (varName, value) in pState['varStates'].iteritems(): result[varName] = value['position'] return result
109,438
Agitate this particle so that it is likely to go to a new position. Every time agitate is called, the particle is jiggled an even greater amount. Parameters: -------------------------------------------------------------- retval: None
def agitate(self): for (varName, var) in self.permuteVars.iteritems(): var.agitate() self.newPosition()
109,439
Choose a new position based on results obtained so far from all other particles. Parameters: -------------------------------------------------------------- whichVars: If not None, only move these variables retval: new position
def newPosition(self, whichVars=None): # TODO: incorporate data from choice variables.... # TODO: make sure we're calling this when appropriate. # Get the global best position for this swarm generation globalBestPosition = None # If speculative particles are enabled, use the global best conside...
109,440
Give the timestamp of a record (a datetime object), compute the record's timestamp index - this is the timestamp divided by the aggregation period. Parameters: ------------------------------------------------------------------------ recordTS: datetime instance retval: record timestamp index, o...
def _computeTimestampRecordIdx(self, recordTS): if self._aggregationPeriod is None: return None # Base record index on number of elapsed months if aggregation is in # months if self._aggregationPeriod['months'] > 0: assert self._aggregationPeriod['seconds'] == 0 result = int( ...
109,578
Given the name of an aggregation function, returns the function pointer and param. Parameters: ------------------------------------------------------------------------ funcName: a string (name of function) or funcPtr retval: (funcPtr, param)
def _getFuncPtrAndParams(self, funcName): params = None if isinstance(funcName, basestring): if funcName == 'sum': fp = _aggr_sum elif funcName == 'first': fp = _aggr_first elif funcName == 'last': fp = _aggr_last elif funcName == 'mean': fp = _aggr_...
109,603
Generate the aggregated output record Parameters: ------------------------------------------------------------------------ retval: outputRecord
def _createAggregateRecord(self): record = [] for i, (fieldIdx, aggFP, paramIdx) in enumerate(self._fields): if aggFP is None: # this field is not supposed to be aggregated. continue values = self._slice[i] refIndex = None if paramIdx is not None: record.append(ag...
109,604
Parse command line options Args: args: command line arguments (not including sys.argv[0]) Returns: namedtuple ParseCommandLineOptionsResult
def _parseCommandLineOptions(args): usageStr = ( "%prog [options] descriptionPyDirectory\n" "This script runs a single OPF Model described by description.py " "located in the given directory." ) parser = optparse.OptionParser(usage=usageStr) parser.add_option("-c", help="C...
109,629
Creates and runs the experiment Args: options: namedtuple ParseCommandLineOptionsResult model: For testing: may pass in an existing OPF Model instance to use instead of creating a new one. Returns: reference to OPFExperiment instance that was constructed (this is provided to aid with debuggi...
def _runExperimentImpl(options, model=None): json_helpers.validate(options.privateOptions, schemaDict=g_parsedPrivateCommandLineOptionsSchema) # Load the experiment's description.py module experimentDir = options.experimentDir descriptionPyModule = helpers.loadExperimentDescriptionSc...
109,632
Constructor Args: model: The OPF Model instance against which to run the task task: A dictionary conforming to opfTaskSchema.json cmdOptions: ParseCommandLineOptionsResult namedtuple
def __init__(self, model, task, cmdOptions): validateOpfJsonValue(task, "opfTaskSchema.json") # Set up our logger self.__logger = logging.getLogger(".".join( ['com.numenta', self.__class__.__module__, self.__class__.__name__])) #self.__logger.setLevel(logging.DEBUG) self.__logger.debug(...
109,639
The main function of the HypersearchWorker script. This parses the command line arguments, instantiates a HypersearchWorker instance, and then runs it. Parameters: ---------------------------------------------------------------------- retval: jobID of the job we ran. This is used by unit test code ...
def main(argv): parser = OptionParser(helpString) parser.add_option("--jobID", action="store", type="int", default=None, help="jobID of the job within the dbTable [default: %default].") parser.add_option("--modelID", action="store", type="str", default=None, help=("Tell worker to re-run this...
109,738
Instantiate the Hypersearch worker Parameters: --------------------------------------------------------------------- options: The command line options. See the main() method for a description of these options cmdLineArgs: Copy of the command line arguments, so we can place the...
def __init__(self, options, cmdLineArgs): # Save options self._options = options # Instantiate our logger self.logger = logging.getLogger(".".join( ['com.numenta.nupic.swarming', self.__class__.__name__])) # Override log level? if options.logLevel is not None: self.logger....
109,739
Run this worker. Parameters: ---------------------------------------------------------------------- retval: jobID of the job we ran. This is used by unit test code when calling this working using the --params command line option (which tells this worker to insert the...
def run(self): # Easier access to options options = self._options # --------------------------------------------------------------------- # Connect to the jobs database self.logger.info("Connecting to the jobs database") cjDAO = ClientJobsDAO.get() # Get our worker ID self._worker...
109,741
Adds a value over a range of rows. Args: reader: A FileRecordStream object with input data. writer: A FileRecordStream object to write output data to. column: The column of data to modify. start: The first row in the range to modify. end: The last row in the range to modify. value: The value ...
def add(reader, writer, column, start, stop, value): for i, row in enumerate(reader): if i >= start and i <= stop: row[column] = type(value)(row[column]) + value writer.appendRecord(row)
109,798
Multiplies a value over a range of rows. Args: reader: A FileRecordStream object with input data. writer: A FileRecordStream object to write output data to. column: The column of data to modify. start: The first row in the range to modify. end: The last row in the range to modify. multiple: ...
def scale(reader, writer, column, start, stop, multiple): for i, row in enumerate(reader): if i >= start and i <= stop: row[column] = type(multiple)(row[column]) * multiple writer.appendRecord(row)
109,799