text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def int_check(*args, func=None): """Check if arguments are integrals.""" func = func or inspect.stack()[2][3] for var in args: if not isinstance(var, numbers.Integral): name = type(var).__name__ raise ComplexError( f'Function {func} expected integral number, {name} got instead.')
[ "def", "int_check", "(", "*", "args", ",", "func", "=", "None", ")", ":", "func", "=", "func", "or", "inspect", ".", "stack", "(", ")", "[", "2", "]", "[", "3", "]", "for", "var", "in", "args", ":", "if", "not", "isinstance", "(", "var", ",", "numbers", ".", "Integral", ")", ":", "name", "=", "type", "(", "var", ")", ".", "__name__", "raise", "ComplexError", "(", "f'Function {func} expected integral number, {name} got instead.'", ")" ]
41.625
11.25
def calculate(self, where, calcExpression, sqlFormat="standard"): """ The calculate operation is performed on a feature service layer resource. It updates the values of one or more fields in an existing feature service layer based on SQL expressions or scalar values. The calculate operation can only be used if the supportsCalculate property of the layer is true. Neither the Shape field nor system fields can be updated using calculate. System fields include ObjectId and GlobalId. See Calculate a field for more information on supported expressions Inputs: where - A where clause can be used to limit the updated records. Any legal SQL where clause operating on the fields in the layer is allowed. calcExpression - The array of field/value info objects that contain the field or fields to update and their scalar values or SQL expression. Allowed types are dictionary and list. List must be a list of dictionary objects. Calculation Format is as follows: {"field" : "<field name>", "value" : "<value>"} sqlFormat - The SQL format for the calcExpression. It can be either standard SQL92 (standard) or native SQL (native). The default is standard. Values: standard, native Output: JSON as string Usage: >>>sh = arcrest.AGOLTokenSecurityHandler("user", "pw") >>>fl = arcrest.agol.FeatureLayer(url="someurl", securityHandler=sh, initialize=True) >>>print fl.calculate(where="OBJECTID < 2", calcExpression={"field": "ZONE", "value" : "R1"}) {'updatedFeatureCount': 1, 'success': True} """ url = self._url + "/calculate" params = { "f" : "json", "where" : where, } if isinstance(calcExpression, dict): params["calcExpression"] = json.dumps([calcExpression], default=_date_handler) elif isinstance(calcExpression, list): params["calcExpression"] = json.dumps(calcExpression, default=_date_handler) if sqlFormat.lower() in ['native', 'standard']: params['sqlFormat'] = sqlFormat.lower() else: params['sqlFormat'] = "standard" return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
[ "def", "calculate", "(", "self", ",", "where", ",", "calcExpression", ",", "sqlFormat", "=", "\"standard\"", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/calculate\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"where\"", ":", "where", ",", "}", "if", "isinstance", "(", "calcExpression", ",", "dict", ")", ":", "params", "[", "\"calcExpression\"", "]", "=", "json", ".", "dumps", "(", "[", "calcExpression", "]", ",", "default", "=", "_date_handler", ")", "elif", "isinstance", "(", "calcExpression", ",", "list", ")", ":", "params", "[", "\"calcExpression\"", "]", "=", "json", ".", "dumps", "(", "calcExpression", ",", "default", "=", "_date_handler", ")", "if", "sqlFormat", ".", "lower", "(", ")", "in", "[", "'native'", ",", "'standard'", "]", ":", "params", "[", "'sqlFormat'", "]", "=", "sqlFormat", ".", "lower", "(", ")", "else", ":", "params", "[", "'sqlFormat'", "]", "=", "\"standard\"", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_port", "=", "self", ".", "_proxy_port", ",", "proxy_url", "=", "self", ".", "_proxy_url", ")" ]
50.186441
20.864407
def create_bmi_config_file(self, filename: str = "bmi_config.txt") -> None: """ Create a BMI config file to initialize the model. Args: filename: The filename with which the config file should be saved. """ s0 = self.construct_default_initial_state() s0.to_csv(filename, index_label="variable")
[ "def", "create_bmi_config_file", "(", "self", ",", "filename", ":", "str", "=", "\"bmi_config.txt\"", ")", "->", "None", ":", "s0", "=", "self", ".", "construct_default_initial_state", "(", ")", "s0", ".", "to_csv", "(", "filename", ",", "index_label", "=", "\"variable\"", ")" ]
42.5
20.25
def serialize_operator_match(self, op): """ Serializer for :meth:`SpiffWorkflow.operators.NotEqual`. Example:: <matches> <value>text</value> <value><attribute>foobar</attribute></value> </matches> """ elem = etree.Element('matches') return self.serialize_value_list(elem, op.args)
[ "def", "serialize_operator_match", "(", "self", ",", "op", ")", ":", "elem", "=", "etree", ".", "Element", "(", "'matches'", ")", "return", "self", ".", "serialize_value_list", "(", "elem", ",", "op", ".", "args", ")" ]
28.769231
15.846154
def perform_command(self): """ Perform command and return the appropriate exit code. :rtype: int """ if len(self.actual_arguments) < 2: return self.print_help() input_file_path = self.actual_arguments[0] output_file_path = self.actual_arguments[1] output_text_format = self.has_option_with_value(u"--format") if output_text_format is None: output_text_format = u"%.18e" output_binary = self.has_option([u"-b", u"--binary"]) output_npz = self.has_option([u"-z", u"--npz"]) output_npy = self.has_option([u"-n", u"--npy"]) delete_first = self.has_option([u"-d", u"--delete-first"]) transpose = self.has_option([u"-t", u"--transpose"]) self.check_c_extensions("cmfcc") if not self.check_input_file(input_file_path): return self.ERROR_EXIT_CODE if not self.check_output_file(output_file_path): return self.ERROR_EXIT_CODE try: mfccs = AudioFileMFCC(input_file_path, rconf=self.rconf, logger=self.logger).all_mfcc if delete_first: mfccs = mfccs[1:, :] if transpose: mfccs = mfccs.transpose() if output_binary: # save as a raw C float64 binary file mapped = numpy.memmap(output_file_path, dtype="float64", mode="w+", shape=mfccs.shape) mapped[:] = mfccs[:] mapped.flush() del mapped elif output_npz: # save as a .npz compressed binary file with io.open(output_file_path, "wb") as output_file: numpy.savez(output_file, mfccs) elif output_npy: # save as a .npy binary file with io.open(output_file_path, "wb") as output_file: numpy.save(output_file, mfccs) else: # save as a text file # NOTE: in Python 2, passing the fmt value a Unicode string crashes NumPy # hence, converting back to bytes, which works in Python 3 too numpy.savetxt(output_file_path, mfccs, fmt=gf.safe_bytes(output_text_format)) self.print_info(u"MFCCs shape: %d %d" % (mfccs.shape)) self.print_success(u"MFCCs saved to '%s'" % (output_file_path)) return self.NO_ERROR_EXIT_CODE except AudioFileConverterError: self.print_error(u"Unable to call the ffmpeg executable '%s'" % (self.rconf[RuntimeConfiguration.FFMPEG_PATH])) self.print_error(u"Make sure the path to ffmpeg is correct") except (AudioFileUnsupportedFormatError, AudioFileNotInitializedError): self.print_error(u"Cannot read file '%s'" % (input_file_path)) self.print_error(u"Check that its format is supported by ffmpeg") except OSError: self.print_error(u"Cannot write file '%s'" % (output_file_path)) return self.ERROR_EXIT_CODE
[ "def", "perform_command", "(", "self", ")", ":", "if", "len", "(", "self", ".", "actual_arguments", ")", "<", "2", ":", "return", "self", ".", "print_help", "(", ")", "input_file_path", "=", "self", ".", "actual_arguments", "[", "0", "]", "output_file_path", "=", "self", ".", "actual_arguments", "[", "1", "]", "output_text_format", "=", "self", ".", "has_option_with_value", "(", "u\"--format\"", ")", "if", "output_text_format", "is", "None", ":", "output_text_format", "=", "u\"%.18e\"", "output_binary", "=", "self", ".", "has_option", "(", "[", "u\"-b\"", ",", "u\"--binary\"", "]", ")", "output_npz", "=", "self", ".", "has_option", "(", "[", "u\"-z\"", ",", "u\"--npz\"", "]", ")", "output_npy", "=", "self", ".", "has_option", "(", "[", "u\"-n\"", ",", "u\"--npy\"", "]", ")", "delete_first", "=", "self", ".", "has_option", "(", "[", "u\"-d\"", ",", "u\"--delete-first\"", "]", ")", "transpose", "=", "self", ".", "has_option", "(", "[", "u\"-t\"", ",", "u\"--transpose\"", "]", ")", "self", ".", "check_c_extensions", "(", "\"cmfcc\"", ")", "if", "not", "self", ".", "check_input_file", "(", "input_file_path", ")", ":", "return", "self", ".", "ERROR_EXIT_CODE", "if", "not", "self", ".", "check_output_file", "(", "output_file_path", ")", ":", "return", "self", ".", "ERROR_EXIT_CODE", "try", ":", "mfccs", "=", "AudioFileMFCC", "(", "input_file_path", ",", "rconf", "=", "self", ".", "rconf", ",", "logger", "=", "self", ".", "logger", ")", ".", "all_mfcc", "if", "delete_first", ":", "mfccs", "=", "mfccs", "[", "1", ":", ",", ":", "]", "if", "transpose", ":", "mfccs", "=", "mfccs", ".", "transpose", "(", ")", "if", "output_binary", ":", "# save as a raw C float64 binary file", "mapped", "=", "numpy", ".", "memmap", "(", "output_file_path", ",", "dtype", "=", "\"float64\"", ",", "mode", "=", "\"w+\"", ",", "shape", "=", "mfccs", ".", "shape", ")", "mapped", "[", ":", "]", "=", "mfccs", "[", ":", "]", "mapped", ".", "flush", "(", ")", "del", "mapped", "elif", "output_npz", ":", "# save as a .npz compressed binary file", "with", "io", ".", "open", "(", "output_file_path", ",", "\"wb\"", ")", "as", "output_file", ":", "numpy", ".", "savez", "(", "output_file", ",", "mfccs", ")", "elif", "output_npy", ":", "# save as a .npy binary file", "with", "io", ".", "open", "(", "output_file_path", ",", "\"wb\"", ")", "as", "output_file", ":", "numpy", ".", "save", "(", "output_file", ",", "mfccs", ")", "else", ":", "# save as a text file", "# NOTE: in Python 2, passing the fmt value a Unicode string crashes NumPy", "# hence, converting back to bytes, which works in Python 3 too", "numpy", ".", "savetxt", "(", "output_file_path", ",", "mfccs", ",", "fmt", "=", "gf", ".", "safe_bytes", "(", "output_text_format", ")", ")", "self", ".", "print_info", "(", "u\"MFCCs shape: %d %d\"", "%", "(", "mfccs", ".", "shape", ")", ")", "self", ".", "print_success", "(", "u\"MFCCs saved to '%s'\"", "%", "(", "output_file_path", ")", ")", "return", "self", ".", "NO_ERROR_EXIT_CODE", "except", "AudioFileConverterError", ":", "self", ".", "print_error", "(", "u\"Unable to call the ffmpeg executable '%s'\"", "%", "(", "self", ".", "rconf", "[", "RuntimeConfiguration", ".", "FFMPEG_PATH", "]", ")", ")", "self", ".", "print_error", "(", "u\"Make sure the path to ffmpeg is correct\"", ")", "except", "(", "AudioFileUnsupportedFormatError", ",", "AudioFileNotInitializedError", ")", ":", "self", ".", "print_error", "(", "u\"Cannot read file '%s'\"", "%", "(", "input_file_path", ")", ")", "self", ".", "print_error", "(", "u\"Check that its format is supported by ffmpeg\"", ")", "except", "OSError", ":", "self", ".", "print_error", "(", "u\"Cannot write file '%s'\"", "%", "(", "output_file_path", ")", ")", "return", "self", ".", "ERROR_EXIT_CODE" ]
46.671875
20.515625
def now(cls, tzinfo=None): """[tz] -> new datetime with tz's local day and time.""" obj = cls.utcnow() if tzinfo is None: tzinfo = localtz() return obj.astimezone(tzinfo)
[ "def", "now", "(", "cls", ",", "tzinfo", "=", "None", ")", ":", "obj", "=", "cls", ".", "utcnow", "(", ")", "if", "tzinfo", "is", "None", ":", "tzinfo", "=", "localtz", "(", ")", "return", "obj", ".", "astimezone", "(", "tzinfo", ")" ]
31.166667
12.166667
def print_loopy(self, as_url=True): """Return Parameters ---------- out_file : Optional[str] A file name in which the Loopy network is saved. Returns ------- full_str : str The string representing the Loopy network. """ init_str = '' node_id = 1 node_list = {} for node, data in self.graph.nodes(data=True): node_name = data['name'] nodex = int(500*numpy.random.rand()) nodey = int(500*numpy.random.rand()) hue = int(5*numpy.random.rand()) node_attr = [node_id, nodex, nodey, 1, node_name, hue] node_list[node] = node_attr node_id += 1 nodes = list(node_list.values()) edges = [] for s, t, data in self.graph.edges(data=True): s_id = node_list[s][0] t_id = node_list[t][0] if data['polarity'] == 'positive': pol = 1 else: pol = -1 edge = [s_id, t_id, 89, pol, 0] edges.append(edge) labels = [] components = [nodes, edges, labels] model = json.dumps(components, separators=(',', ':')) if as_url: model = 'http://ncase.me/loopy/v1/?data=' + model return model
[ "def", "print_loopy", "(", "self", ",", "as_url", "=", "True", ")", ":", "init_str", "=", "''", "node_id", "=", "1", "node_list", "=", "{", "}", "for", "node", ",", "data", "in", "self", ".", "graph", ".", "nodes", "(", "data", "=", "True", ")", ":", "node_name", "=", "data", "[", "'name'", "]", "nodex", "=", "int", "(", "500", "*", "numpy", ".", "random", ".", "rand", "(", ")", ")", "nodey", "=", "int", "(", "500", "*", "numpy", ".", "random", ".", "rand", "(", ")", ")", "hue", "=", "int", "(", "5", "*", "numpy", ".", "random", ".", "rand", "(", ")", ")", "node_attr", "=", "[", "node_id", ",", "nodex", ",", "nodey", ",", "1", ",", "node_name", ",", "hue", "]", "node_list", "[", "node", "]", "=", "node_attr", "node_id", "+=", "1", "nodes", "=", "list", "(", "node_list", ".", "values", "(", ")", ")", "edges", "=", "[", "]", "for", "s", ",", "t", ",", "data", "in", "self", ".", "graph", ".", "edges", "(", "data", "=", "True", ")", ":", "s_id", "=", "node_list", "[", "s", "]", "[", "0", "]", "t_id", "=", "node_list", "[", "t", "]", "[", "0", "]", "if", "data", "[", "'polarity'", "]", "==", "'positive'", ":", "pol", "=", "1", "else", ":", "pol", "=", "-", "1", "edge", "=", "[", "s_id", ",", "t_id", ",", "89", ",", "pol", ",", "0", "]", "edges", ".", "append", "(", "edge", ")", "labels", "=", "[", "]", "components", "=", "[", "nodes", ",", "edges", ",", "labels", "]", "model", "=", "json", ".", "dumps", "(", "components", ",", "separators", "=", "(", "','", ",", "':'", ")", ")", "if", "as_url", ":", "model", "=", "'http://ncase.me/loopy/v1/?data='", "+", "model", "return", "model" ]
30.325581
16.023256
def _read_bands(self): """ Reads a band with rasterio """ bands = [] try: for i, band in enumerate(self.bands): bands.append(rasterio.open(self.bands_path[i]).read_band(1)) except IOError as e: exit(e.message, 1) return bands
[ "def", "_read_bands", "(", "self", ")", ":", "bands", "=", "[", "]", "try", ":", "for", "i", ",", "band", "in", "enumerate", "(", "self", ".", "bands", ")", ":", "bands", ".", "append", "(", "rasterio", ".", "open", "(", "self", ".", "bands_path", "[", "i", "]", ")", ".", "read_band", "(", "1", ")", ")", "except", "IOError", "as", "e", ":", "exit", "(", "e", ".", "message", ",", "1", ")", "return", "bands" ]
27
21.363636
async def kickban(self, channel, target, reason=None, range=0): """ Kick and ban user from channel. """ await self.ban(channel, target, range) await self.kick(channel, target, reason)
[ "async", "def", "kickban", "(", "self", ",", "channel", ",", "target", ",", "reason", "=", "None", ",", "range", "=", "0", ")", ":", "await", "self", ".", "ban", "(", "channel", ",", "target", ",", "range", ")", "await", "self", ".", "kick", "(", "channel", ",", "target", ",", "reason", ")" ]
36.333333
6.333333
def _set_global_defaults(xmlglobals): """Sets the default attributes on tags that were specified in <global> tags in the XML file.""" for key, val in xmlglobals.items(): if key != "defaults": for name, tag in val.items(): _update_from_globals(tag, xmlglobals, None)
[ "def", "_set_global_defaults", "(", "xmlglobals", ")", ":", "for", "key", ",", "val", "in", "xmlglobals", ".", "items", "(", ")", ":", "if", "key", "!=", "\"defaults\"", ":", "for", "name", ",", "tag", "in", "val", ".", "items", "(", ")", ":", "_update_from_globals", "(", "tag", ",", "xmlglobals", ",", "None", ")" ]
43.857143
5
def _compute(self, inputs, outputs): """ Run one iteration of TMRegion's compute """ #if self.topDownMode and (not 'topDownIn' in inputs): # raise RuntimeError("The input topDownIn must be linked in if " # "topDownMode is True") if self._tfdr is None: raise RuntimeError("TM has not been initialized") # Conditional compute break self._conditionalBreak() self._iterations += 1 # Get our inputs as numpy array buInputVector = inputs['bottomUpIn'] # Handle reset signal resetSignal = False if 'resetIn' in inputs: assert len(inputs['resetIn']) == 1 if inputs['resetIn'][0] != 0: self._tfdr.reset() self._sequencePos = 0 # Position within the current sequence if self.computePredictedActiveCellIndices: prevPredictedState = self._tfdr.getPredictedState().reshape(-1).astype('float32') if self.anomalyMode: prevPredictedColumns = self._tfdr.topDownCompute().copy().nonzero()[0] # Perform inference and/or learning tpOutput = self._tfdr.compute(buInputVector, self.learningMode, self.inferenceMode) self._sequencePos += 1 # OR'ing together the cells in each column? if self.orColumnOutputs: tpOutput= tpOutput.reshape(self.columnCount, self.cellsPerColumn).max(axis=1) # Direct logging of non-zero TM outputs if self._fpLogTPOutput: output = tpOutput.reshape(-1) outputNZ = tpOutput.nonzero()[0] outStr = " ".join(["%d" % int(token) for token in outputNZ]) print >>self._fpLogTPOutput, output.size, outStr # Write the bottom up out to our node outputs outputs['bottomUpOut'][:] = tpOutput.flat if self.topDownMode: # Top-down compute outputs['topDownOut'][:] = self._tfdr.topDownCompute().copy() # Set output for use with anomaly classification region if in anomalyMode if self.anomalyMode: activeLearnCells = self._tfdr.getLearnActiveStateT() size = activeLearnCells.shape[0] * activeLearnCells.shape[1] outputs['lrnActiveStateT'][:] = activeLearnCells.reshape(size) activeColumns = buInputVector.nonzero()[0] outputs['anomalyScore'][:] = anomaly.computeRawAnomalyScore( activeColumns, prevPredictedColumns) if self.computePredictedActiveCellIndices: # Reshape so we are dealing with 1D arrays activeState = self._tfdr._getActiveState().reshape(-1).astype('float32') activeIndices = numpy.where(activeState != 0)[0] predictedIndices= numpy.where(prevPredictedState != 0)[0] predictedActiveIndices = numpy.intersect1d(activeIndices, predictedIndices) outputs["activeCells"].fill(0) outputs["activeCells"][activeIndices] = 1 outputs["predictedActiveCells"].fill(0) outputs["predictedActiveCells"][predictedActiveIndices] = 1
[ "def", "_compute", "(", "self", ",", "inputs", ",", "outputs", ")", ":", "#if self.topDownMode and (not 'topDownIn' in inputs):", "# raise RuntimeError(\"The input topDownIn must be linked in if \"", "# \"topDownMode is True\")", "if", "self", ".", "_tfdr", "is", "None", ":", "raise", "RuntimeError", "(", "\"TM has not been initialized\"", ")", "# Conditional compute break", "self", ".", "_conditionalBreak", "(", ")", "self", ".", "_iterations", "+=", "1", "# Get our inputs as numpy array", "buInputVector", "=", "inputs", "[", "'bottomUpIn'", "]", "# Handle reset signal", "resetSignal", "=", "False", "if", "'resetIn'", "in", "inputs", ":", "assert", "len", "(", "inputs", "[", "'resetIn'", "]", ")", "==", "1", "if", "inputs", "[", "'resetIn'", "]", "[", "0", "]", "!=", "0", ":", "self", ".", "_tfdr", ".", "reset", "(", ")", "self", ".", "_sequencePos", "=", "0", "# Position within the current sequence", "if", "self", ".", "computePredictedActiveCellIndices", ":", "prevPredictedState", "=", "self", ".", "_tfdr", ".", "getPredictedState", "(", ")", ".", "reshape", "(", "-", "1", ")", ".", "astype", "(", "'float32'", ")", "if", "self", ".", "anomalyMode", ":", "prevPredictedColumns", "=", "self", ".", "_tfdr", ".", "topDownCompute", "(", ")", ".", "copy", "(", ")", ".", "nonzero", "(", ")", "[", "0", "]", "# Perform inference and/or learning", "tpOutput", "=", "self", ".", "_tfdr", ".", "compute", "(", "buInputVector", ",", "self", ".", "learningMode", ",", "self", ".", "inferenceMode", ")", "self", ".", "_sequencePos", "+=", "1", "# OR'ing together the cells in each column?", "if", "self", ".", "orColumnOutputs", ":", "tpOutput", "=", "tpOutput", ".", "reshape", "(", "self", ".", "columnCount", ",", "self", ".", "cellsPerColumn", ")", ".", "max", "(", "axis", "=", "1", ")", "# Direct logging of non-zero TM outputs", "if", "self", ".", "_fpLogTPOutput", ":", "output", "=", "tpOutput", ".", "reshape", "(", "-", "1", ")", "outputNZ", "=", "tpOutput", ".", "nonzero", "(", ")", "[", "0", "]", "outStr", "=", "\" \"", ".", "join", "(", "[", "\"%d\"", "%", "int", "(", "token", ")", "for", "token", "in", "outputNZ", "]", ")", "print", ">>", "self", ".", "_fpLogTPOutput", ",", "output", ".", "size", ",", "outStr", "# Write the bottom up out to our node outputs", "outputs", "[", "'bottomUpOut'", "]", "[", ":", "]", "=", "tpOutput", ".", "flat", "if", "self", ".", "topDownMode", ":", "# Top-down compute", "outputs", "[", "'topDownOut'", "]", "[", ":", "]", "=", "self", ".", "_tfdr", ".", "topDownCompute", "(", ")", ".", "copy", "(", ")", "# Set output for use with anomaly classification region if in anomalyMode", "if", "self", ".", "anomalyMode", ":", "activeLearnCells", "=", "self", ".", "_tfdr", ".", "getLearnActiveStateT", "(", ")", "size", "=", "activeLearnCells", ".", "shape", "[", "0", "]", "*", "activeLearnCells", ".", "shape", "[", "1", "]", "outputs", "[", "'lrnActiveStateT'", "]", "[", ":", "]", "=", "activeLearnCells", ".", "reshape", "(", "size", ")", "activeColumns", "=", "buInputVector", ".", "nonzero", "(", ")", "[", "0", "]", "outputs", "[", "'anomalyScore'", "]", "[", ":", "]", "=", "anomaly", ".", "computeRawAnomalyScore", "(", "activeColumns", ",", "prevPredictedColumns", ")", "if", "self", ".", "computePredictedActiveCellIndices", ":", "# Reshape so we are dealing with 1D arrays", "activeState", "=", "self", ".", "_tfdr", ".", "_getActiveState", "(", ")", ".", "reshape", "(", "-", "1", ")", ".", "astype", "(", "'float32'", ")", "activeIndices", "=", "numpy", ".", "where", "(", "activeState", "!=", "0", ")", "[", "0", "]", "predictedIndices", "=", "numpy", ".", "where", "(", "prevPredictedState", "!=", "0", ")", "[", "0", "]", "predictedActiveIndices", "=", "numpy", ".", "intersect1d", "(", "activeIndices", ",", "predictedIndices", ")", "outputs", "[", "\"activeCells\"", "]", ".", "fill", "(", "0", ")", "outputs", "[", "\"activeCells\"", "]", "[", "activeIndices", "]", "=", "1", "outputs", "[", "\"predictedActiveCells\"", "]", ".", "fill", "(", "0", ")", "outputs", "[", "\"predictedActiveCells\"", "]", "[", "predictedActiveIndices", "]", "=", "1" ]
36.558442
20.220779
def complete_func(self, findstart, base): """Handle omni completion.""" self.log.debug('complete_func: in %s %s', findstart, base) def detect_row_column_start(): row, col = self.editor.cursor() start = col line = self.editor.getline() while start > 0 and line[start - 1] not in " .,([{": start -= 1 # Start should be 1 when startcol is zero return row, col, start if start else 1 if str(findstart) == "1": row, col, startcol = detect_row_column_start() # Make request to get response ASAP self.complete(row, col) self.completion_started = True # We always allow autocompletion, even with empty seeds return startcol else: result = [] # Only handle snd invocation if fst has already been done if self.completion_started: # Unqueing messages until we get suggestions self.unqueue(timeout=self.completion_timeout, should_wait=True) suggestions = self.suggestions or [] self.log.debug('complete_func: suggestions in') for m in suggestions: result.append(m) self.suggestions = None self.completion_started = False return result
[ "def", "complete_func", "(", "self", ",", "findstart", ",", "base", ")", ":", "self", ".", "log", ".", "debug", "(", "'complete_func: in %s %s'", ",", "findstart", ",", "base", ")", "def", "detect_row_column_start", "(", ")", ":", "row", ",", "col", "=", "self", ".", "editor", ".", "cursor", "(", ")", "start", "=", "col", "line", "=", "self", ".", "editor", ".", "getline", "(", ")", "while", "start", ">", "0", "and", "line", "[", "start", "-", "1", "]", "not", "in", "\" .,([{\"", ":", "start", "-=", "1", "# Start should be 1 when startcol is zero", "return", "row", ",", "col", ",", "start", "if", "start", "else", "1", "if", "str", "(", "findstart", ")", "==", "\"1\"", ":", "row", ",", "col", ",", "startcol", "=", "detect_row_column_start", "(", ")", "# Make request to get response ASAP", "self", ".", "complete", "(", "row", ",", "col", ")", "self", ".", "completion_started", "=", "True", "# We always allow autocompletion, even with empty seeds", "return", "startcol", "else", ":", "result", "=", "[", "]", "# Only handle snd invocation if fst has already been done", "if", "self", ".", "completion_started", ":", "# Unqueing messages until we get suggestions", "self", ".", "unqueue", "(", "timeout", "=", "self", ".", "completion_timeout", ",", "should_wait", "=", "True", ")", "suggestions", "=", "self", ".", "suggestions", "or", "[", "]", "self", ".", "log", ".", "debug", "(", "'complete_func: suggestions in'", ")", "for", "m", "in", "suggestions", ":", "result", ".", "append", "(", "m", ")", "self", ".", "suggestions", "=", "None", "self", ".", "completion_started", "=", "False", "return", "result" ]
39.2
15.628571
def from_unknown_text(text, strict=False): """ Detect crs string format and parse into crs object with appropriate function. Arguments: - *text*: The crs text representation of unknown type. - *strict* (optional): When True, the parser is strict about names having to match exactly with upper and lowercases. Default is not strict (False). Returns: - CRS object. """ if text.startswith("+"): crs = from_proj4(text, strict) elif text.startswith(("PROJCS[","GEOGCS[")): crs = from_unknown_wkt(text, strict) #elif text.startswith("urn:"): # crs = from_ogc_urn(text, strict) elif text.startswith("EPSG:"): crs = from_epsg_code(text.split(":")[1]) elif text.startswith("ESRI:"): crs = from_esri_code(text.split(":")[1]) elif text.startswith("SR-ORG:"): crs = from_sr_code(text.split(":")[1]) else: raise FormatError("Could not auto-detect the type of crs format, make sure it is one of the supported formats") return crs
[ "def", "from_unknown_text", "(", "text", ",", "strict", "=", "False", ")", ":", "if", "text", ".", "startswith", "(", "\"+\"", ")", ":", "crs", "=", "from_proj4", "(", "text", ",", "strict", ")", "elif", "text", ".", "startswith", "(", "(", "\"PROJCS[\"", ",", "\"GEOGCS[\"", ")", ")", ":", "crs", "=", "from_unknown_wkt", "(", "text", ",", "strict", ")", "#elif text.startswith(\"urn:\"):", "# crs = from_ogc_urn(text, strict)", "elif", "text", ".", "startswith", "(", "\"EPSG:\"", ")", ":", "crs", "=", "from_epsg_code", "(", "text", ".", "split", "(", "\":\"", ")", "[", "1", "]", ")", "elif", "text", ".", "startswith", "(", "\"ESRI:\"", ")", ":", "crs", "=", "from_esri_code", "(", "text", ".", "split", "(", "\":\"", ")", "[", "1", "]", ")", "elif", "text", ".", "startswith", "(", "\"SR-ORG:\"", ")", ":", "crs", "=", "from_sr_code", "(", "text", ".", "split", "(", "\":\"", ")", "[", "1", "]", ")", "else", ":", "raise", "FormatError", "(", "\"Could not auto-detect the type of crs format, make sure it is one of the supported formats\"", ")", "return", "crs" ]
28.222222
24.222222
def trace_grad(fn, args): """Trace a function, and return a VJP and the function's output.""" from tensorflow.python.eager.backprop import make_vjp result, vjp = make_vjp(fn)(*args) return result, vjp
[ "def", "trace_grad", "(", "fn", ",", "args", ")", ":", "from", "tensorflow", ".", "python", ".", "eager", ".", "backprop", "import", "make_vjp", "result", ",", "vjp", "=", "make_vjp", "(", "fn", ")", "(", "*", "args", ")", "return", "result", ",", "vjp" ]
40.8
11
def get_label_set(self, type_str=None): """Get a set of label_str for the tree rooted at this node. Args: type_str: SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include information from nodes of that type. Returns: set: The labels of the nodes leading up to this node from the root. """ return {v.label_str for v in self.node_gen if type_str in (None, v.type_str)}
[ "def", "get_label_set", "(", "self", ",", "type_str", "=", "None", ")", ":", "return", "{", "v", ".", "label_str", "for", "v", "in", "self", ".", "node_gen", "if", "type_str", "in", "(", "None", ",", "v", ".", "type_str", ")", "}" ]
35.384615
24.923077
def index_firstnot(ol,value): ''' from elist.elist import * ol = [1,'a',3,'a',4,'a',5] index_firstnot(ol,'a') ####index_firstnot, array_indexnot, indexOfnot are the same array_indexnot(ol,'a') indexOfnot(ol,'a') ''' length = ol.__len__() for i in range(0,length): if(value == ol[i]): pass else: return(i) return(None)
[ "def", "index_firstnot", "(", "ol", ",", "value", ")", ":", "length", "=", "ol", ".", "__len__", "(", ")", "for", "i", "in", "range", "(", "0", ",", "length", ")", ":", "if", "(", "value", "==", "ol", "[", "i", "]", ")", ":", "pass", "else", ":", "return", "(", "i", ")", "return", "(", "None", ")" ]
25.6875
17.8125
def prep_jid(nocache=False, passed_jid=None, recurse_count=0): ''' Return a job id and prepare the job id directory. This is the function responsible for making sure jids don't collide (unless it is passed a jid). So do what you have to do to make sure that stays the case ''' if recurse_count >= 5: err = 'prep_jid could not store a jid after {0} tries.'.format(recurse_count) log.error(err) raise salt.exceptions.SaltCacheError(err) if passed_jid is None: # this can be a None or an empty string. jid = salt.utils.jid.gen_jid(__opts__) else: jid = passed_jid jid_dir = salt.utils.jid.jid_dir(jid, _job_dir(), __opts__['hash_type']) # Make sure we create the jid dir, otherwise someone else is using it, # meaning we need a new jid. if not os.path.isdir(jid_dir): try: os.makedirs(jid_dir) except OSError: time.sleep(0.1) if passed_jid is None: return prep_jid(nocache=nocache, recurse_count=recurse_count+1) try: with salt.utils.files.fopen(os.path.join(jid_dir, 'jid'), 'wb+') as fn_: fn_.write(salt.utils.stringutils.to_bytes(jid)) if nocache: with salt.utils.files.fopen(os.path.join(jid_dir, 'nocache'), 'wb+'): pass except IOError: log.warning( 'Could not write out jid file for job %s. Retrying.', jid) time.sleep(0.1) return prep_jid(passed_jid=jid, nocache=nocache, recurse_count=recurse_count+1) return jid
[ "def", "prep_jid", "(", "nocache", "=", "False", ",", "passed_jid", "=", "None", ",", "recurse_count", "=", "0", ")", ":", "if", "recurse_count", ">=", "5", ":", "err", "=", "'prep_jid could not store a jid after {0} tries.'", ".", "format", "(", "recurse_count", ")", "log", ".", "error", "(", "err", ")", "raise", "salt", ".", "exceptions", ".", "SaltCacheError", "(", "err", ")", "if", "passed_jid", "is", "None", ":", "# this can be a None or an empty string.", "jid", "=", "salt", ".", "utils", ".", "jid", ".", "gen_jid", "(", "__opts__", ")", "else", ":", "jid", "=", "passed_jid", "jid_dir", "=", "salt", ".", "utils", ".", "jid", ".", "jid_dir", "(", "jid", ",", "_job_dir", "(", ")", ",", "__opts__", "[", "'hash_type'", "]", ")", "# Make sure we create the jid dir, otherwise someone else is using it,", "# meaning we need a new jid.", "if", "not", "os", ".", "path", ".", "isdir", "(", "jid_dir", ")", ":", "try", ":", "os", ".", "makedirs", "(", "jid_dir", ")", "except", "OSError", ":", "time", ".", "sleep", "(", "0.1", ")", "if", "passed_jid", "is", "None", ":", "return", "prep_jid", "(", "nocache", "=", "nocache", ",", "recurse_count", "=", "recurse_count", "+", "1", ")", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "os", ".", "path", ".", "join", "(", "jid_dir", ",", "'jid'", ")", ",", "'wb+'", ")", "as", "fn_", ":", "fn_", ".", "write", "(", "salt", ".", "utils", ".", "stringutils", ".", "to_bytes", "(", "jid", ")", ")", "if", "nocache", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "os", ".", "path", ".", "join", "(", "jid_dir", ",", "'nocache'", ")", ",", "'wb+'", ")", ":", "pass", "except", "IOError", ":", "log", ".", "warning", "(", "'Could not write out jid file for job %s. Retrying.'", ",", "jid", ")", "time", ".", "sleep", "(", "0.1", ")", "return", "prep_jid", "(", "passed_jid", "=", "jid", ",", "nocache", "=", "nocache", ",", "recurse_count", "=", "recurse_count", "+", "1", ")", "return", "jid" ]
36.488372
24.581395
def getOverlayAutoCurveDistanceRangeInMeters(self, ulOverlayHandle): """ For high-quality curved overlays only, gets the distance range in meters from the overlay used to automatically curve the surface around the viewer. Min is distance is when the surface will be most curved. Max is when least curved. """ fn = self.function_table.getOverlayAutoCurveDistanceRangeInMeters pfMinDistanceInMeters = c_float() pfMaxDistanceInMeters = c_float() result = fn(ulOverlayHandle, byref(pfMinDistanceInMeters), byref(pfMaxDistanceInMeters)) return result, pfMinDistanceInMeters.value, pfMaxDistanceInMeters.value
[ "def", "getOverlayAutoCurveDistanceRangeInMeters", "(", "self", ",", "ulOverlayHandle", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getOverlayAutoCurveDistanceRangeInMeters", "pfMinDistanceInMeters", "=", "c_float", "(", ")", "pfMaxDistanceInMeters", "=", "c_float", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "byref", "(", "pfMinDistanceInMeters", ")", ",", "byref", "(", "pfMaxDistanceInMeters", ")", ")", "return", "result", ",", "pfMinDistanceInMeters", ".", "value", ",", "pfMaxDistanceInMeters", ".", "value" ]
60.727273
33.272727
def generate_folder_names(name, project): """Creates sensible folder names.""" out_data_dir = prms.Paths.outdatadir project_dir = os.path.join(out_data_dir, project) batch_dir = os.path.join(project_dir, name) raw_dir = os.path.join(batch_dir, "raw_data") return out_data_dir, project_dir, batch_dir, raw_dir
[ "def", "generate_folder_names", "(", "name", ",", "project", ")", ":", "out_data_dir", "=", "prms", ".", "Paths", ".", "outdatadir", "project_dir", "=", "os", ".", "path", ".", "join", "(", "out_data_dir", ",", "project", ")", "batch_dir", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "name", ")", "raw_dir", "=", "os", ".", "path", ".", "join", "(", "batch_dir", ",", "\"raw_data\"", ")", "return", "out_data_dir", ",", "project_dir", ",", "batch_dir", ",", "raw_dir" ]
40.75
10.75
def centered(coordinates): """ Centers coordinate distribution with respect to its mean on all three axes. This is used as the input to the regression model, so it can be converted easily into radial coordinates. """ coordinates = N.array(coordinates) means = N.mean(coordinates,axis=0) return coordinates - means
[ "def", "centered", "(", "coordinates", ")", ":", "coordinates", "=", "N", ".", "array", "(", "coordinates", ")", "means", "=", "N", ".", "mean", "(", "coordinates", ",", "axis", "=", "0", ")", "return", "coordinates", "-", "means" ]
34
8.6
def quit(self): """Restore previous stdout/stderr and destroy the window.""" sys.stdout = self._oldstdout sys.stderr = self._oldstderr self.destroy()
[ "def", "quit", "(", "self", ")", ":", "sys", ".", "stdout", "=", "self", ".", "_oldstdout", "sys", ".", "stderr", "=", "self", ".", "_oldstderr", "self", ".", "destroy", "(", ")" ]
35.4
10.2
def cBurkPot(self, R, Rs, rho0, r_core): """ :param R: projected distance :param Rs: scale radius :param rho0: central core density :param r_core: core radius """ x = R * Rs ** -1 p = Rs * r_core ** -1 hx = self._H(x, p) return 2 * rho0 * Rs ** 3 * hx
[ "def", "cBurkPot", "(", "self", ",", "R", ",", "Rs", ",", "rho0", ",", "r_core", ")", ":", "x", "=", "R", "*", "Rs", "**", "-", "1", "p", "=", "Rs", "*", "r_core", "**", "-", "1", "hx", "=", "self", ".", "_H", "(", "x", ",", "p", ")", "return", "2", "*", "rho0", "*", "Rs", "**", "3", "*", "hx" ]
24.692308
11
def on_event(self, evt, is_final): """ this is invoked from in response to COM PumpWaitingMessages - different thread """ for msg in XmlHelper.message_iter(evt): for node, error in XmlHelper.security_iter(msg.GetElement('securityData')): if error: self.security_errors.append(error) else: self.on_security_node(node) if is_final and self.response_type == 'frame': index = self.response.pop('security') frame = DataFrame(self.response, columns=self.fields, index=index) frame.index.name = 'security' self.response = frame
[ "def", "on_event", "(", "self", ",", "evt", ",", "is_final", ")", ":", "for", "msg", "in", "XmlHelper", ".", "message_iter", "(", "evt", ")", ":", "for", "node", ",", "error", "in", "XmlHelper", ".", "security_iter", "(", "msg", ".", "GetElement", "(", "'securityData'", ")", ")", ":", "if", "error", ":", "self", ".", "security_errors", ".", "append", "(", "error", ")", "else", ":", "self", ".", "on_security_node", "(", "node", ")", "if", "is_final", "and", "self", ".", "response_type", "==", "'frame'", ":", "index", "=", "self", ".", "response", ".", "pop", "(", "'security'", ")", "frame", "=", "DataFrame", "(", "self", ".", "response", ",", "columns", "=", "self", ".", "fields", ",", "index", "=", "index", ")", "frame", ".", "index", ".", "name", "=", "'security'", "self", ".", "response", "=", "frame" ]
47.428571
16
def _loop_use_cache(self, helper_function, num, fragment): """ Synthesize all fragments using the cache """ self.log([u"Examining fragment %d (cache)...", num]) fragment_info = (fragment.language, fragment.filtered_text) if self.cache.is_cached(fragment_info): self.log(u"Fragment cached: retrieving audio data from cache") # read data from file, whose path is in the cache file_handler, file_path = self.cache.get(fragment_info) self.log([u"Reading cached fragment at '%s'...", file_path]) succeeded, data = self._read_audio_data(file_path) if not succeeded: self.log_crit(u"An unexpected error occurred while reading cached audio file") return (False, None) self.log([u"Reading cached fragment at '%s'... done", file_path]) else: self.log(u"Fragment not cached: synthesizing and caching") # creating destination file file_info = gf.tmp_file(suffix=u".cache.wav", root=self.rconf[RuntimeConfiguration.TMP_PATH]) file_handler, file_path = file_info self.log([u"Synthesizing fragment to '%s'...", file_path]) # synthesize and get the duration of the output file voice_code = self._language_to_voice_code(fragment.language) self.log(u"Calling helper function") succeeded, data = helper_function( text=fragment.filtered_text, voice_code=voice_code, output_file_path=file_path, return_audio_data=True ) # check output if not succeeded: self.log_crit(u"An unexpected error occurred in helper_function") return (False, None) self.log([u"Synthesizing fragment to '%s'... done", file_path]) duration, sr_nu, enc_nu, samples = data if duration > 0: self.log(u"Fragment has > 0 duration, adding it to cache") self.cache.add(fragment_info, file_info) self.log(u"Added fragment to cache") else: self.log(u"Fragment has zero duration, not adding it to cache") self.log([u"Closing file handler for cached output file path '%s'", file_path]) gf.close_file_handler(file_handler) self.log([u"Examining fragment %d (cache)... done", num]) return (True, data)
[ "def", "_loop_use_cache", "(", "self", ",", "helper_function", ",", "num", ",", "fragment", ")", ":", "self", ".", "log", "(", "[", "u\"Examining fragment %d (cache)...\"", ",", "num", "]", ")", "fragment_info", "=", "(", "fragment", ".", "language", ",", "fragment", ".", "filtered_text", ")", "if", "self", ".", "cache", ".", "is_cached", "(", "fragment_info", ")", ":", "self", ".", "log", "(", "u\"Fragment cached: retrieving audio data from cache\"", ")", "# read data from file, whose path is in the cache", "file_handler", ",", "file_path", "=", "self", ".", "cache", ".", "get", "(", "fragment_info", ")", "self", ".", "log", "(", "[", "u\"Reading cached fragment at '%s'...\"", ",", "file_path", "]", ")", "succeeded", ",", "data", "=", "self", ".", "_read_audio_data", "(", "file_path", ")", "if", "not", "succeeded", ":", "self", ".", "log_crit", "(", "u\"An unexpected error occurred while reading cached audio file\"", ")", "return", "(", "False", ",", "None", ")", "self", ".", "log", "(", "[", "u\"Reading cached fragment at '%s'... done\"", ",", "file_path", "]", ")", "else", ":", "self", ".", "log", "(", "u\"Fragment not cached: synthesizing and caching\"", ")", "# creating destination file", "file_info", "=", "gf", ".", "tmp_file", "(", "suffix", "=", "u\".cache.wav\"", ",", "root", "=", "self", ".", "rconf", "[", "RuntimeConfiguration", ".", "TMP_PATH", "]", ")", "file_handler", ",", "file_path", "=", "file_info", "self", ".", "log", "(", "[", "u\"Synthesizing fragment to '%s'...\"", ",", "file_path", "]", ")", "# synthesize and get the duration of the output file", "voice_code", "=", "self", ".", "_language_to_voice_code", "(", "fragment", ".", "language", ")", "self", ".", "log", "(", "u\"Calling helper function\"", ")", "succeeded", ",", "data", "=", "helper_function", "(", "text", "=", "fragment", ".", "filtered_text", ",", "voice_code", "=", "voice_code", ",", "output_file_path", "=", "file_path", ",", "return_audio_data", "=", "True", ")", "# check output", "if", "not", "succeeded", ":", "self", ".", "log_crit", "(", "u\"An unexpected error occurred in helper_function\"", ")", "return", "(", "False", ",", "None", ")", "self", ".", "log", "(", "[", "u\"Synthesizing fragment to '%s'... done\"", ",", "file_path", "]", ")", "duration", ",", "sr_nu", ",", "enc_nu", ",", "samples", "=", "data", "if", "duration", ">", "0", ":", "self", ".", "log", "(", "u\"Fragment has > 0 duration, adding it to cache\"", ")", "self", ".", "cache", ".", "add", "(", "fragment_info", ",", "file_info", ")", "self", ".", "log", "(", "u\"Added fragment to cache\"", ")", "else", ":", "self", ".", "log", "(", "u\"Fragment has zero duration, not adding it to cache\"", ")", "self", ".", "log", "(", "[", "u\"Closing file handler for cached output file path '%s'\"", ",", "file_path", "]", ")", "gf", ".", "close_file_handler", "(", "file_handler", ")", "self", ".", "log", "(", "[", "u\"Examining fragment %d (cache)... done\"", ",", "num", "]", ")", "return", "(", "True", ",", "data", ")" ]
50.916667
21.875
def commit_input_confirm_timeout(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") commit = ET.Element("commit") config = commit input = ET.SubElement(commit, "input") confirm_timeout = ET.SubElement(input, "confirm-timeout") confirm_timeout.text = kwargs.pop('confirm_timeout') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "commit_input_confirm_timeout", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "commit", "=", "ET", ".", "Element", "(", "\"commit\"", ")", "config", "=", "commit", "input", "=", "ET", ".", "SubElement", "(", "commit", ",", "\"input\"", ")", "confirm_timeout", "=", "ET", ".", "SubElement", "(", "input", ",", "\"confirm-timeout\"", ")", "confirm_timeout", ".", "text", "=", "kwargs", ".", "pop", "(", "'confirm_timeout'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
37.166667
12.416667
def default_filename_decoder(): """ Creates a decoder which parses CWR filenames following the old or the new convention. :return: a CWR filename decoder for the old and the new conventions """ factory = default_filename_grammar_factory() grammar_old = factory.get_rule('filename_old') grammar_new = factory.get_rule('filename_new') return FileNameDecoder(grammar_old, grammar_new)
[ "def", "default_filename_decoder", "(", ")", ":", "factory", "=", "default_filename_grammar_factory", "(", ")", "grammar_old", "=", "factory", ".", "get_rule", "(", "'filename_old'", ")", "grammar_new", "=", "factory", ".", "get_rule", "(", "'filename_new'", ")", "return", "FileNameDecoder", "(", "grammar_old", ",", "grammar_new", ")" ]
31.384615
20.153846
def _add_plugin_options(self, available_plugins: Set[Type[Plugin]]) -> None: """Recovers the list of command line options implemented by the available plugins and adds them to the command line parser. """ for plugin_class in available_plugins: # Add the current plugin's commands to the parser group = OptionGroup(self._parser, plugin_class.get_title(), plugin_class.get_description()) for option in plugin_class.get_cli_option_group(): group.add_option(option) self._parser.add_option_group(group)
[ "def", "_add_plugin_options", "(", "self", ",", "available_plugins", ":", "Set", "[", "Type", "[", "Plugin", "]", "]", ")", "->", "None", ":", "for", "plugin_class", "in", "available_plugins", ":", "# Add the current plugin's commands to the parser", "group", "=", "OptionGroup", "(", "self", ".", "_parser", ",", "plugin_class", ".", "get_title", "(", ")", ",", "plugin_class", ".", "get_description", "(", ")", ")", "for", "option", "in", "plugin_class", ".", "get_cli_option_group", "(", ")", ":", "group", ".", "add_option", "(", "option", ")", "self", ".", "_parser", ".", "add_option_group", "(", "group", ")" ]
58.5
17.6
def loads(s, encoding=None, cls=JSONTreeDecoder, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kargs): """JSON load from string function that defaults the loading class to be JSONTreeDecoder """ return json.loads(s, encoding, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kargs)
[ "def", "loads", "(", "s", ",", "encoding", "=", "None", ",", "cls", "=", "JSONTreeDecoder", ",", "object_hook", "=", "None", ",", "parse_float", "=", "None", ",", "parse_int", "=", "None", ",", "parse_constant", "=", "None", ",", "object_pairs_hook", "=", "None", ",", "*", "*", "kargs", ")", ":", "return", "json", ".", "loads", "(", "s", ",", "encoding", ",", "cls", ",", "object_hook", ",", "parse_float", ",", "parse_int", ",", "parse_constant", ",", "object_pairs_hook", ",", "*", "*", "kargs", ")" ]
45.333333
10.666667
def remote_file_size(self, remote_cmd="", remote_file=None): """Get the file size of the remote file.""" return self._remote_file_size_unix( remote_cmd=remote_cmd, remote_file=remote_file )
[ "def", "remote_file_size", "(", "self", ",", "remote_cmd", "=", "\"\"", ",", "remote_file", "=", "None", ")", ":", "return", "self", ".", "_remote_file_size_unix", "(", "remote_cmd", "=", "remote_cmd", ",", "remote_file", "=", "remote_file", ")" ]
44.2
14.4
def convert_bb_to_faces(voxel_grid): """ Converts a voxel grid defined by min and max coordinates to a voxel grid defined by faces. :param voxel_grid: voxel grid defined by the bounding box of all voxels :return: voxel grid with face data """ new_vg = [] for v in voxel_grid: # Vertices p1 = v[0] p2 = [v[1][0], v[0][1], v[0][2]] p3 = [v[1][0], v[1][1], v[0][2]] p4 = [v[0][0], v[1][1], v[0][2]] p5 = [v[0][0], v[0][1], v[1][2]] p6 = [v[1][0], v[0][1], v[1][2]] p7 = v[1] p8 = [v[0][0], v[1][1], v[1][2]] # Faces fb = [p1, p2, p3, p4] # bottom face ft = [p5, p6, p7, p8] # top face fs1 = [p1, p2, p6, p5] # side face 1 fs2 = [p2, p3, p7, p6] # side face 2 fs3 = [p3, p4, p8, p7] # side face 3 fs4 = [p1, p4, p8, p5] # side face 4 # Append to return list new_vg.append([fb, fs1, fs2, fs3, fs4, ft]) return new_vg
[ "def", "convert_bb_to_faces", "(", "voxel_grid", ")", ":", "new_vg", "=", "[", "]", "for", "v", "in", "voxel_grid", ":", "# Vertices", "p1", "=", "v", "[", "0", "]", "p2", "=", "[", "v", "[", "1", "]", "[", "0", "]", ",", "v", "[", "0", "]", "[", "1", "]", ",", "v", "[", "0", "]", "[", "2", "]", "]", "p3", "=", "[", "v", "[", "1", "]", "[", "0", "]", ",", "v", "[", "1", "]", "[", "1", "]", ",", "v", "[", "0", "]", "[", "2", "]", "]", "p4", "=", "[", "v", "[", "0", "]", "[", "0", "]", ",", "v", "[", "1", "]", "[", "1", "]", ",", "v", "[", "0", "]", "[", "2", "]", "]", "p5", "=", "[", "v", "[", "0", "]", "[", "0", "]", ",", "v", "[", "0", "]", "[", "1", "]", ",", "v", "[", "1", "]", "[", "2", "]", "]", "p6", "=", "[", "v", "[", "1", "]", "[", "0", "]", ",", "v", "[", "0", "]", "[", "1", "]", ",", "v", "[", "1", "]", "[", "2", "]", "]", "p7", "=", "v", "[", "1", "]", "p8", "=", "[", "v", "[", "0", "]", "[", "0", "]", ",", "v", "[", "1", "]", "[", "1", "]", ",", "v", "[", "1", "]", "[", "2", "]", "]", "# Faces", "fb", "=", "[", "p1", ",", "p2", ",", "p3", ",", "p4", "]", "# bottom face", "ft", "=", "[", "p5", ",", "p6", ",", "p7", ",", "p8", "]", "# top face", "fs1", "=", "[", "p1", ",", "p2", ",", "p6", ",", "p5", "]", "# side face 1", "fs2", "=", "[", "p2", ",", "p3", ",", "p7", ",", "p6", "]", "# side face 2", "fs3", "=", "[", "p3", ",", "p4", ",", "p8", ",", "p7", "]", "# side face 3", "fs4", "=", "[", "p1", ",", "p4", ",", "p8", ",", "p5", "]", "# side face 4", "# Append to return list", "new_vg", ".", "append", "(", "[", "fb", ",", "fs1", ",", "fs2", ",", "fs3", ",", "fs4", ",", "ft", "]", ")", "return", "new_vg" ]
35.703704
10.481481
def decode_and_filter(line: bytes, context: RunContext) -> typing.Optional[str]: """ Decodes a line that was captured from the running process using a given encoding (defaults to UTF8) Runs that line into the filters, and output the decoded line back if no filter catches it. :param line: line to parse :type line: str :param context: run context :type context: RunContext :return: optional line :rtype: str """ line_str: str = line.decode(context.console_encoding, errors='replace') filtered_line: typing.Optional[str] = filter_line(line_str, context) if filtered_line: return filtered_line.rstrip() return None
[ "def", "decode_and_filter", "(", "line", ":", "bytes", ",", "context", ":", "RunContext", ")", "->", "typing", ".", "Optional", "[", "str", "]", ":", "line_str", ":", "str", "=", "line", ".", "decode", "(", "context", ".", "console_encoding", ",", "errors", "=", "'replace'", ")", "filtered_line", ":", "typing", ".", "Optional", "[", "str", "]", "=", "filter_line", "(", "line_str", ",", "context", ")", "if", "filtered_line", ":", "return", "filtered_line", ".", "rstrip", "(", ")", "return", "None" ]
34.789474
25.315789
def scan(self, func, sequences=None, outputs=None, non_sequences=None, block=None, **kwargs): """ A loop function, the usage is identical with the theano one. :type block: deepy.layers.Block """ results, updates = Scanner(func, sequences, outputs, non_sequences, neural_computation=True, **kwargs).compute() if block and updates: if type(updates) == dict: updates = updates.items() block.register_updates(*updates) return results
[ "def", "scan", "(", "self", ",", "func", ",", "sequences", "=", "None", ",", "outputs", "=", "None", ",", "non_sequences", "=", "None", ",", "block", "=", "None", ",", "*", "*", "kwargs", ")", ":", "results", ",", "updates", "=", "Scanner", "(", "func", ",", "sequences", ",", "outputs", ",", "non_sequences", ",", "neural_computation", "=", "True", ",", "*", "*", "kwargs", ")", ".", "compute", "(", ")", "if", "block", "and", "updates", ":", "if", "type", "(", "updates", ")", "==", "dict", ":", "updates", "=", "updates", ".", "items", "(", ")", "block", ".", "register_updates", "(", "*", "updates", ")", "return", "results" ]
46.818182
18.090909
def digest_auth_user(self, realm, user_name, environ): """Computes digest hash A1 part.""" user = self._get_realm_entry(realm, user_name) if user is None: return False password = user.get("password") environ["wsgidav.auth.roles"] = user.get("roles", []) return self._compute_http_digest_a1(realm, user_name, password)
[ "def", "digest_auth_user", "(", "self", ",", "realm", ",", "user_name", ",", "environ", ")", ":", "user", "=", "self", ".", "_get_realm_entry", "(", "realm", ",", "user_name", ")", "if", "user", "is", "None", ":", "return", "False", "password", "=", "user", ".", "get", "(", "\"password\"", ")", "environ", "[", "\"wsgidav.auth.roles\"", "]", "=", "user", ".", "get", "(", "\"roles\"", ",", "[", "]", ")", "return", "self", ".", "_compute_http_digest_a1", "(", "realm", ",", "user_name", ",", "password", ")" ]
46.25
14.125
def _cwl_workflow_template(inputs, top_level=False): """Retrieve CWL inputs shared amongst different workflows. """ ready_inputs = [] for inp in inputs: cur_inp = copy.deepcopy(inp) for attr in ["source", "valueFrom", "wf_duplicate"]: cur_inp.pop(attr, None) if top_level: cur_inp = workflow._flatten_nested_input(cur_inp) cur_inp = _clean_record(cur_inp) ready_inputs.append(cur_inp) return {"class": "Workflow", "cwlVersion": "v1.0", "hints": [], "requirements": [{"class": "EnvVarRequirement", "envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]}, {"class": "ScatterFeatureRequirement"}, {"class": "SubworkflowFeatureRequirement"}], "inputs": ready_inputs, "outputs": [], "steps": []}
[ "def", "_cwl_workflow_template", "(", "inputs", ",", "top_level", "=", "False", ")", ":", "ready_inputs", "=", "[", "]", "for", "inp", "in", "inputs", ":", "cur_inp", "=", "copy", ".", "deepcopy", "(", "inp", ")", "for", "attr", "in", "[", "\"source\"", ",", "\"valueFrom\"", ",", "\"wf_duplicate\"", "]", ":", "cur_inp", ".", "pop", "(", "attr", ",", "None", ")", "if", "top_level", ":", "cur_inp", "=", "workflow", ".", "_flatten_nested_input", "(", "cur_inp", ")", "cur_inp", "=", "_clean_record", "(", "cur_inp", ")", "ready_inputs", ".", "append", "(", "cur_inp", ")", "return", "{", "\"class\"", ":", "\"Workflow\"", ",", "\"cwlVersion\"", ":", "\"v1.0\"", ",", "\"hints\"", ":", "[", "]", ",", "\"requirements\"", ":", "[", "{", "\"class\"", ":", "\"EnvVarRequirement\"", ",", "\"envDef\"", ":", "[", "{", "\"envName\"", ":", "\"MPLCONFIGDIR\"", ",", "\"envValue\"", ":", "\".\"", "}", "]", "}", ",", "{", "\"class\"", ":", "\"ScatterFeatureRequirement\"", "}", ",", "{", "\"class\"", ":", "\"SubworkflowFeatureRequirement\"", "}", "]", ",", "\"inputs\"", ":", "ready_inputs", ",", "\"outputs\"", ":", "[", "]", ",", "\"steps\"", ":", "[", "]", "}" ]
41.590909
14.363636
def apply_option(self, cmd, option, active=True): """Apply a command-line option.""" return re.sub(r'{{{}\:(?P<option>[^}}]*)}}'.format(option), '\g<option>' if active else '', cmd)
[ "def", "apply_option", "(", "self", ",", "cmd", ",", "option", ",", "active", "=", "True", ")", ":", "return", "re", ".", "sub", "(", "r'{{{}\\:(?P<option>[^}}]*)}}'", ".", "format", "(", "option", ")", ",", "'\\g<option>'", "if", "active", "else", "''", ",", "cmd", ")" ]
54
13.5
def compile_dictionary(self, lang, wordlists, encoding, output): """Compile user dictionary.""" cmd = [ self.binary, '--lang', lang, '--encoding', codecs.lookup(filters.PYTHON_ENCODING_NAMES.get(encoding, encoding).lower()).name, 'create', 'master', output ] wordlist = '' try: output_location = os.path.dirname(output) if not os.path.exists(output_location): os.makedirs(output_location) if os.path.exists(output): os.remove(output) self.log("Compiling Dictionary...", 1) # Read word lists and create a unique set of words words = set() for wordlist in wordlists: with open(wordlist, 'rb') as src: for word in src.read().split(b'\n'): words.add(word.replace(b'\r', b'')) # Compile wordlist against language util.call( [ self.binary, '--lang', lang, '--encoding=utf-8', 'create', 'master', output ], input_text=b'\n'.join(sorted(words)) + b'\n' ) except Exception: self.log(cmd, 0) self.log("Current wordlist: '%s'" % wordlist, 0) self.log("Problem compiling dictionary. Check the binary path and options.", 0) raise
[ "def", "compile_dictionary", "(", "self", ",", "lang", ",", "wordlists", ",", "encoding", ",", "output", ")", ":", "cmd", "=", "[", "self", ".", "binary", ",", "'--lang'", ",", "lang", ",", "'--encoding'", ",", "codecs", ".", "lookup", "(", "filters", ".", "PYTHON_ENCODING_NAMES", ".", "get", "(", "encoding", ",", "encoding", ")", ".", "lower", "(", ")", ")", ".", "name", ",", "'create'", ",", "'master'", ",", "output", "]", "wordlist", "=", "''", "try", ":", "output_location", "=", "os", ".", "path", ".", "dirname", "(", "output", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "output_location", ")", ":", "os", ".", "makedirs", "(", "output_location", ")", "if", "os", ".", "path", ".", "exists", "(", "output", ")", ":", "os", ".", "remove", "(", "output", ")", "self", ".", "log", "(", "\"Compiling Dictionary...\"", ",", "1", ")", "# Read word lists and create a unique set of words", "words", "=", "set", "(", ")", "for", "wordlist", "in", "wordlists", ":", "with", "open", "(", "wordlist", ",", "'rb'", ")", "as", "src", ":", "for", "word", "in", "src", ".", "read", "(", ")", ".", "split", "(", "b'\\n'", ")", ":", "words", ".", "add", "(", "word", ".", "replace", "(", "b'\\r'", ",", "b''", ")", ")", "# Compile wordlist against language", "util", ".", "call", "(", "[", "self", ".", "binary", ",", "'--lang'", ",", "lang", ",", "'--encoding=utf-8'", ",", "'create'", ",", "'master'", ",", "output", "]", ",", "input_text", "=", "b'\\n'", ".", "join", "(", "sorted", "(", "words", ")", ")", "+", "b'\\n'", ")", "except", "Exception", ":", "self", ".", "log", "(", "cmd", ",", "0", ")", "self", ".", "log", "(", "\"Current wordlist: '%s'\"", "%", "wordlist", ",", "0", ")", "self", ".", "log", "(", "\"Problem compiling dictionary. Check the binary path and options.\"", ",", "0", ")", "raise" ]
33.954545
19.363636
def make_request(url, method='GET', headers=None, timeout=30, **kwargs): """A wrapper around requests to set defaults & call raise_for_status().""" headers = headers or {} headers['User-Agent'] = 'treeherder/{}'.format(settings.SITE_HOSTNAME) # Work around bug 1305768. if 'queue.taskcluster.net' in url: headers['x-taskcluster-skip-cache'] = 'true' response = requests.request(method, url, headers=headers, timeout=timeout, **kwargs) if response.history: params = { 'url': url, 'redirects': len(response.history), 'duration': sum(r.elapsed.total_seconds() for r in response.history) } newrelic.agent.record_custom_event('RedirectedRequest', params=params) response.raise_for_status() return response
[ "def", "make_request", "(", "url", ",", "method", "=", "'GET'", ",", "headers", "=", "None", ",", "timeout", "=", "30", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "headers", "or", "{", "}", "headers", "[", "'User-Agent'", "]", "=", "'treeherder/{}'", ".", "format", "(", "settings", ".", "SITE_HOSTNAME", ")", "# Work around bug 1305768.", "if", "'queue.taskcluster.net'", "in", "url", ":", "headers", "[", "'x-taskcluster-skip-cache'", "]", "=", "'true'", "response", "=", "requests", ".", "request", "(", "method", ",", "url", ",", "headers", "=", "headers", ",", "timeout", "=", "timeout", ",", "*", "*", "kwargs", ")", "if", "response", ".", "history", ":", "params", "=", "{", "'url'", ":", "url", ",", "'redirects'", ":", "len", "(", "response", ".", "history", ")", ",", "'duration'", ":", "sum", "(", "r", ".", "elapsed", ".", "total_seconds", "(", ")", "for", "r", "in", "response", ".", "history", ")", "}", "newrelic", ".", "agent", ".", "record_custom_event", "(", "'RedirectedRequest'", ",", "params", "=", "params", ")", "response", ".", "raise_for_status", "(", ")", "return", "response" ]
39.652174
17.652174
def resolve_absolute_name(self, name): ''' Resolve a field from an absolute name. An absolute name is just like unix absolute path, starts with '/' and each name component is separated by '/'. :param name: absolute name, e.g. "/container/subcontainer/field" :return: field with this absolute name :raises: KittyException if field could not be resolved ''' current = self while current.enclosing: current = current.enclosing if name != '/': components = name.split('/')[1:] for component in components: current = current.get_field_by_name(component) return current
[ "def", "resolve_absolute_name", "(", "self", ",", "name", ")", ":", "current", "=", "self", "while", "current", ".", "enclosing", ":", "current", "=", "current", ".", "enclosing", "if", "name", "!=", "'/'", ":", "components", "=", "name", ".", "split", "(", "'/'", ")", "[", "1", ":", "]", "for", "component", "in", "components", ":", "current", "=", "current", ".", "get_field_by_name", "(", "component", ")", "return", "current" ]
38.611111
16.611111
def update_preference_communication_channel_id(self, notification, communication_channel_id, notification_preferences_frequency): """ Update a preference. Change the preference for a single notification for a single communication channel """ path = {} data = {} params = {} # REQUIRED - PATH - communication_channel_id """ID""" path["communication_channel_id"] = communication_channel_id # REQUIRED - PATH - notification """ID""" path["notification"] = notification # REQUIRED - notification_preferences[frequency] """The desired frequency for this notification""" data["notification_preferences[frequency]"] = notification_preferences_frequency self.logger.debug("PUT /api/v1/users/self/communication_channels/{communication_channel_id}/notification_preferences/{notification} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/users/self/communication_channels/{communication_channel_id}/notification_preferences/{notification}".format(**path), data=data, params=params, no_data=True)
[ "def", "update_preference_communication_channel_id", "(", "self", ",", "notification", ",", "communication_channel_id", ",", "notification_preferences_frequency", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - communication_channel_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"communication_channel_id\"", "]", "=", "communication_channel_id", "# REQUIRED - PATH - notification\r", "\"\"\"ID\"\"\"", "path", "[", "\"notification\"", "]", "=", "notification", "# REQUIRED - notification_preferences[frequency]\r", "\"\"\"The desired frequency for this notification\"\"\"", "data", "[", "\"notification_preferences[frequency]\"", "]", "=", "notification_preferences_frequency", "self", ".", "logger", ".", "debug", "(", "\"PUT /api/v1/users/self/communication_channels/{communication_channel_id}/notification_preferences/{notification} with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"PUT\"", ",", "\"/api/v1/users/self/communication_channels/{communication_channel_id}/notification_preferences/{notification}\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "no_data", "=", "True", ")" ]
50.875
36.875
def get_rendered_transform_path(self): """ Generates a rendered transform path that is calculated from all parents. :return: """ path = self.transform_path parent = self.parent while parent is not None: path = "{0}/{1}".format(parent.transform_path, path) parent = parent.parent return path
[ "def", "get_rendered_transform_path", "(", "self", ")", ":", "path", "=", "self", ".", "transform_path", "parent", "=", "self", ".", "parent", "while", "parent", "is", "not", "None", ":", "path", "=", "\"{0}/{1}\"", ".", "format", "(", "parent", ".", "transform_path", ",", "path", ")", "parent", "=", "parent", ".", "parent", "return", "path" ]
26.785714
13.5
def yaml_to_param(obj, name): """ Return the top-level element of a document sub-tree containing the YAML serialization of a Python object. """ return from_pyvalue(u"yaml:%s" % name, unicode(yaml.dump(obj)))
[ "def", "yaml_to_param", "(", "obj", ",", "name", ")", ":", "return", "from_pyvalue", "(", "u\"yaml:%s\"", "%", "name", ",", "unicode", "(", "yaml", ".", "dump", "(", "obj", ")", ")", ")" ]
34.5
10.5
def get_athlete(self, athlete_id=None): """ Gets the specified athlete; if athlete_id is None then retrieves a detail-level representation of currently authenticated athlete; otherwise summary-level representation returned of athlete. http://strava.github.io/api/v3/athlete/#get-details http://strava.github.io/api/v3/athlete/#get-another-details :return: The athlete model object. :rtype: :class:`stravalib.model.Athlete` """ if athlete_id is None: raw = self.protocol.get('/athlete') else: raise NotImplementedError("The /athletes/{id} endpoint was removed by Strava. " "See https://developers.strava.com/docs/january-2018-update/") # raw = self.protocol.get('/athletes/{athlete_id}', athlete_id=athlete_id) return model.Athlete.deserialize(raw, bind_client=self)
[ "def", "get_athlete", "(", "self", ",", "athlete_id", "=", "None", ")", ":", "if", "athlete_id", "is", "None", ":", "raw", "=", "self", ".", "protocol", ".", "get", "(", "'/athlete'", ")", "else", ":", "raise", "NotImplementedError", "(", "\"The /athletes/{id} endpoint was removed by Strava. \"", "\"See https://developers.strava.com/docs/january-2018-update/\"", ")", "# raw = self.protocol.get('/athletes/{athlete_id}', athlete_id=athlete_id)", "return", "model", ".", "Athlete", ".", "deserialize", "(", "raw", ",", "bind_client", "=", "self", ")" ]
41.818182
26.090909
def create_index(self, cardinality): """ Create an index for the table with the given cardinality. Parameters ---------- cardinality : int The cardinality to create a index for. """ DatabaseConnector.create_index(self, cardinality) query = "CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);".format(cardinality) self.execute_sql(query) if self.lowercase: for i in reversed(range(cardinality)): if i != 0: query = "CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));".format(cardinality, i) self.execute_sql(query) if self.normalize: query = "CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);".format(cardinality) self.execute_sql(query) else: query = "CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);".format(cardinality) self.execute_sql(query) elif self.normalize: query = "CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);".format(cardinality) self.execute_sql(query)
[ "def", "create_index", "(", "self", ",", "cardinality", ")", ":", "DatabaseConnector", ".", "create_index", "(", "self", ",", "cardinality", ")", "query", "=", "\"CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);\"", ".", "format", "(", "cardinality", ")", "self", ".", "execute_sql", "(", "query", ")", "if", "self", ".", "lowercase", ":", "for", "i", "in", "reversed", "(", "range", "(", "cardinality", ")", ")", ":", "if", "i", "!=", "0", ":", "query", "=", "\"CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));\"", ".", "format", "(", "cardinality", ",", "i", ")", "self", ".", "execute_sql", "(", "query", ")", "if", "self", ".", "normalize", ":", "query", "=", "\"CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);\"", ".", "format", "(", "cardinality", ")", "self", ".", "execute_sql", "(", "query", ")", "else", ":", "query", "=", "\"CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);\"", ".", "format", "(", "cardinality", ")", "self", ".", "execute_sql", "(", "query", ")", "elif", "self", ".", "normalize", ":", "query", "=", "\"CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);\"", ".", "format", "(", "cardinality", ")", "self", ".", "execute_sql", "(", "query", ")" ]
37.371429
30.571429
def regex(regex): """Return strategy that generates strings that match given regex. Regex can be either a string or compiled regex (through `re.compile()`). You can use regex flags (such as `re.IGNORECASE`, `re.DOTALL` or `re.UNICODE`) to control generation. Flags can be passed either in compiled regex (specify flags in call to `re.compile()`) or inside pattern with (?iLmsux) group. Some tricky regular expressions are partly supported or not supported at all. "^" and "$" do not affect generation. Positive lookahead/lookbehind groups are considered normal groups. Negative lookahead/lookbehind groups do not do anything. Ternary regex groups ('(?(name)yes-pattern|no-pattern)') are not supported at all. """ if not hasattr(regex, 'pattern'): regex = re.compile(regex) pattern = regex.pattern flags = regex.flags codes = sre.parse(pattern) return _strategy(codes, Context(flags=flags)).filter(regex.match)
[ "def", "regex", "(", "regex", ")", ":", "if", "not", "hasattr", "(", "regex", ",", "'pattern'", ")", ":", "regex", "=", "re", ".", "compile", "(", "regex", ")", "pattern", "=", "regex", ".", "pattern", "flags", "=", "regex", ".", "flags", "codes", "=", "sre", ".", "parse", "(", "pattern", ")", "return", "_strategy", "(", "codes", ",", "Context", "(", "flags", "=", "flags", ")", ")", ".", "filter", "(", "regex", ".", "match", ")" ]
40.166667
28
def cache(self, value): """Enable or disable caching of pages/frames. Clear cache if False.""" value = bool(value) if self._cache and not value: self._clear() self._cache = value
[ "def", "cache", "(", "self", ",", "value", ")", ":", "value", "=", "bool", "(", "value", ")", "if", "self", ".", "_cache", "and", "not", "value", ":", "self", ".", "_clear", "(", ")", "self", ".", "_cache", "=", "value" ]
36.166667
10.166667
def sample(self, signum, frame): #pylint: disable=unused-argument """Samples current stack and adds result in self._stats. Args: signum: Signal that activates handler. frame: Frame on top of the stack when signal is handled. """ stack = [] while frame and frame != self.base_frame: stack.append(( frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno)) frame = frame.f_back self._stats[tuple(stack)] += 1 signal.setitimer(signal.ITIMER_PROF, _SAMPLE_INTERVAL)
[ "def", "sample", "(", "self", ",", "signum", ",", "frame", ")", ":", "#pylint: disable=unused-argument", "stack", "=", "[", "]", "while", "frame", "and", "frame", "!=", "self", ".", "base_frame", ":", "stack", ".", "append", "(", "(", "frame", ".", "f_code", ".", "co_name", ",", "frame", ".", "f_code", ".", "co_filename", ",", "frame", ".", "f_code", ".", "co_firstlineno", ")", ")", "frame", "=", "frame", ".", "f_back", "self", ".", "_stats", "[", "tuple", "(", "stack", ")", "]", "+=", "1", "signal", ".", "setitimer", "(", "signal", ".", "ITIMER_PROF", ",", "_SAMPLE_INTERVAL", ")" ]
38.75
13.5625
def as_list_data(self): """Return an Element to be used in a list. Most lists want an element with tag of list_type, and subelements of id and name. Returns: Element: list representation of object. """ element = ElementTree.Element(self.list_type) id_ = ElementTree.SubElement(element, "id") id_.text = self.id name = ElementTree.SubElement(element, "name") name.text = self.name return element
[ "def", "as_list_data", "(", "self", ")", ":", "element", "=", "ElementTree", ".", "Element", "(", "self", ".", "list_type", ")", "id_", "=", "ElementTree", ".", "SubElement", "(", "element", ",", "\"id\"", ")", "id_", ".", "text", "=", "self", ".", "id", "name", "=", "ElementTree", ".", "SubElement", "(", "element", ",", "\"name\"", ")", "name", ".", "text", "=", "self", ".", "name", "return", "element" ]
32.133333
15.933333
def rotate(self, n): '''Rotate Sequence by n bases. :param n: Number of bases to rotate. :type n: int :returns: The current sequence reoriented at `index`. :rtype: coral.sequence._sequence.Sequence :raises: ValueError if applied to linear sequence or `index` is negative. ''' if not self.circular and n != 0: raise ValueError('Cannot rotate a linear sequence') else: rotated = self[-n:] + self[:-n] return rotated.circularize()
[ "def", "rotate", "(", "self", ",", "n", ")", ":", "if", "not", "self", ".", "circular", "and", "n", "!=", "0", ":", "raise", "ValueError", "(", "'Cannot rotate a linear sequence'", ")", "else", ":", "rotated", "=", "self", "[", "-", "n", ":", "]", "+", "self", "[", ":", "-", "n", "]", "return", "rotated", ".", "circularize", "(", ")" ]
33.6875
17.6875
def remove_state(self, state): """ Remove this conversation from the given state, and potentially deactivate the state if no more conversations are in it. The relation name will be interpolated in the state name, and it is recommended that it be included to avoid conflicts with states from other relations. For example:: conversation.remove_state('{relation_name}.state') If called from a converation handling the relation "foo", this will remove the conversation from the "foo.state" state, and, if no more conversations are in this the state, will deactivate it. """ state = state.format(relation_name=self.relation_name) value = _get_flag_value(state) if not value: return if self.key in value['conversations']: value['conversations'].remove(self.key) if value['conversations']: set_flag(state, value) else: clear_flag(state)
[ "def", "remove_state", "(", "self", ",", "state", ")", ":", "state", "=", "state", ".", "format", "(", "relation_name", "=", "self", ".", "relation_name", ")", "value", "=", "_get_flag_value", "(", "state", ")", "if", "not", "value", ":", "return", "if", "self", ".", "key", "in", "value", "[", "'conversations'", "]", ":", "value", "[", "'conversations'", "]", ".", "remove", "(", "self", ".", "key", ")", "if", "value", "[", "'conversations'", "]", ":", "set_flag", "(", "state", ",", "value", ")", "else", ":", "clear_flag", "(", "state", ")" ]
39.88
20.12
def set(self, section, option, value=None): """Set an option.""" if value: value = self._interpolation.before_set(self, section, option, value) if not section or section == self.default_section: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise from_none(NoSectionError(section)) sectdict[self.optionxform(option)] = value
[ "def", "set", "(", "self", ",", "section", ",", "option", ",", "value", "=", "None", ")", ":", "if", "value", ":", "value", "=", "self", ".", "_interpolation", ".", "before_set", "(", "self", ",", "section", ",", "option", ",", "value", ")", "if", "not", "section", "or", "section", "==", "self", ".", "default_section", ":", "sectdict", "=", "self", ".", "_defaults", "else", ":", "try", ":", "sectdict", "=", "self", ".", "_sections", "[", "section", "]", "except", "KeyError", ":", "raise", "from_none", "(", "NoSectionError", "(", "section", ")", ")", "sectdict", "[", "self", ".", "optionxform", "(", "option", ")", "]", "=", "value" ]
40.461538
15.076923
def get_token_data(self): """ Get token and data from keystone """ token_data = self._keystone_auth.conn.auth_ref token = token_data['auth_token'] self.set_token(token) if self.cache.is_redis_ok(): try: self.cache.set_cache_token(token_data) except CacheException: self.logger.error('Token not setted in cache.') token_data = { 'expires_at': token_data['expires_at'], 'token': token } return token_data
[ "def", "get_token_data", "(", "self", ")", ":", "token_data", "=", "self", ".", "_keystone_auth", ".", "conn", ".", "auth_ref", "token", "=", "token_data", "[", "'auth_token'", "]", "self", ".", "set_token", "(", "token", ")", "if", "self", ".", "cache", ".", "is_redis_ok", "(", ")", ":", "try", ":", "self", ".", "cache", ".", "set_cache_token", "(", "token_data", ")", "except", "CacheException", ":", "self", ".", "logger", ".", "error", "(", "'Token not setted in cache.'", ")", "token_data", "=", "{", "'expires_at'", ":", "token_data", "[", "'expires_at'", "]", ",", "'token'", ":", "token", "}", "return", "token_data" ]
28
18.947368
def _fetch(self, key): """Helper function to fetch values from owning section. Returns a 2-tuple: the value, and the section where it was found. """ # switch off interpolation before we try and fetch anything ! save_interp = self.section.main.interpolation self.section.main.interpolation = False # Start at section that "owns" this InterpolationEngine current_section = self.section while True: # try the current section first val = current_section.get(key) if val is not None and not isinstance(val, Section): break # try "DEFAULT" next val = current_section.get('DEFAULT', {}).get(key) if val is not None and not isinstance(val, Section): break # move up to parent and try again # top-level's parent is itself if current_section.parent is current_section: # reached top level, time to give up break current_section = current_section.parent # restore interpolation to previous value before returning self.section.main.interpolation = save_interp if val is None: raise MissingInterpolationOption(key) return val, current_section
[ "def", "_fetch", "(", "self", ",", "key", ")", ":", "# switch off interpolation before we try and fetch anything !", "save_interp", "=", "self", ".", "section", ".", "main", ".", "interpolation", "self", ".", "section", ".", "main", ".", "interpolation", "=", "False", "# Start at section that \"owns\" this InterpolationEngine", "current_section", "=", "self", ".", "section", "while", "True", ":", "# try the current section first", "val", "=", "current_section", ".", "get", "(", "key", ")", "if", "val", "is", "not", "None", "and", "not", "isinstance", "(", "val", ",", "Section", ")", ":", "break", "# try \"DEFAULT\" next", "val", "=", "current_section", ".", "get", "(", "'DEFAULT'", ",", "{", "}", ")", ".", "get", "(", "key", ")", "if", "val", "is", "not", "None", "and", "not", "isinstance", "(", "val", ",", "Section", ")", ":", "break", "# move up to parent and try again", "# top-level's parent is itself", "if", "current_section", ".", "parent", "is", "current_section", ":", "# reached top level, time to give up", "break", "current_section", "=", "current_section", ".", "parent", "# restore interpolation to previous value before returning", "self", ".", "section", ".", "main", ".", "interpolation", "=", "save_interp", "if", "val", "is", "None", ":", "raise", "MissingInterpolationOption", "(", "key", ")", "return", "val", ",", "current_section" ]
40.65625
16.34375
def deaccent(text): """ Remove accentuation from the given string. """ norm = unicodedata.normalize("NFD", text) result = "".join(ch for ch in norm if unicodedata.category(ch) != 'Mn') return unicodedata.normalize("NFC", result)
[ "def", "deaccent", "(", "text", ")", ":", "norm", "=", "unicodedata", ".", "normalize", "(", "\"NFD\"", ",", "text", ")", "result", "=", "\"\"", ".", "join", "(", "ch", "for", "ch", "in", "norm", "if", "unicodedata", ".", "category", "(", "ch", ")", "!=", "'Mn'", ")", "return", "unicodedata", ".", "normalize", "(", "\"NFC\"", ",", "result", ")" ]
35.142857
10.571429
def _maybe_decode(self, value, encoding='utf-8'): """If a bytes object is passed in, in the Python 3 environment, decode it using the specified encoding to turn it to a str instance. :param mixed value: The value to possibly decode :param str encoding: The encoding to use :rtype: str """ if _PYTHON3 and isinstance(value, bytes): try: return value.decode(encoding) except Exception as err: self.logger.exception('Error decoding value: %s', err) raise MessageException( str(err), 'decoding-{}'.format(encoding)) return value
[ "def", "_maybe_decode", "(", "self", ",", "value", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "_PYTHON3", "and", "isinstance", "(", "value", ",", "bytes", ")", ":", "try", ":", "return", "value", ".", "decode", "(", "encoding", ")", "except", "Exception", "as", "err", ":", "self", ".", "logger", ".", "exception", "(", "'Error decoding value: %s'", ",", "err", ")", "raise", "MessageException", "(", "str", "(", "err", ")", ",", "'decoding-{}'", ".", "format", "(", "encoding", ")", ")", "return", "value" ]
39.176471
16.705882
def all( self, count=500, offset=0, type=None, inactive=None, emailFilter=None, tag=None, messageID=None, fromdate=None, todate=None, ): """ Returns many bounces. :param int count: Number of bounces to return per request. :param int offset: Number of bounces to skip. :param str type: Filter by type of bounce. :param bool inactive: Filter by emails that were deactivated by Postmark due to the bounce. :param str emailFilter: Filter by email address. :param str tag: Filter by tag. :param str messageID: Filter by messageID. :param date fromdate: Filter messages starting from the date specified (inclusive). :param date todate: Filter messages up to the date specified (inclusive). :return: A list of :py:class:`Bounce` instances. :rtype: `list` """ responses = self.call_many( "GET", "/bounces/", count=count, offset=offset, type=type, inactive=inactive, emailFilter=emailFilter, tag=tag, messageID=messageID, fromdate=fromdate, todate=todate, ) return self.expand_responses(responses, "Bounces")
[ "def", "all", "(", "self", ",", "count", "=", "500", ",", "offset", "=", "0", ",", "type", "=", "None", ",", "inactive", "=", "None", ",", "emailFilter", "=", "None", ",", "tag", "=", "None", ",", "messageID", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ",", ")", ":", "responses", "=", "self", ".", "call_many", "(", "\"GET\"", ",", "\"/bounces/\"", ",", "count", "=", "count", ",", "offset", "=", "offset", ",", "type", "=", "type", ",", "inactive", "=", "inactive", ",", "emailFilter", "=", "emailFilter", ",", "tag", "=", "tag", ",", "messageID", "=", "messageID", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ",", ")", "return", "self", ".", "expand_responses", "(", "responses", ",", "\"Bounces\"", ")" ]
32
19.268293
def set_hostname(hostname=None, **kwargs): ''' Set the device's hostname hostname The name to be set comment Provide a comment to the commit dev_timeout : 30 The NETCONF RPC timeout (in seconds) confirm Provide time in minutes for commit confirmation. If this option is specified, the commit will be rolled back in the specified amount of time unless the commit is confirmed. CLI Example: .. code-block:: bash salt 'device_name' junos.set_hostname salt-device ''' conn = __proxy__['junos.conn']() ret = {} if hostname is None: ret['message'] = 'Please provide the hostname.' ret['out'] = False return ret op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) # Added to recent versions of JunOs # Use text format instead set_string = 'set system host-name {0}'.format(hostname) try: conn.cu.load(set_string, format='set') except Exception as exception: ret['message'] = 'Could not load configuration due to error "{0}"'.format( exception) ret['out'] = False return ret try: commit_ok = conn.cu.commit_check() except Exception as exception: ret['message'] = 'Could not commit check due to error "{0}"'.format( exception) ret['out'] = False return ret if commit_ok: try: conn.cu.commit(**op) ret['message'] = 'Successfully changed hostname.' ret['out'] = True except Exception as exception: ret['out'] = False ret['message'] = 'Successfully loaded host-name but commit failed with "{0}"'.format( exception) return ret else: ret['out'] = False ret[ 'message'] = 'Successfully loaded host-name but pre-commit check failed.' conn.cu.rollback() return ret
[ "def", "set_hostname", "(", "hostname", "=", "None", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "__proxy__", "[", "'junos.conn'", "]", "(", ")", "ret", "=", "{", "}", "if", "hostname", "is", "None", ":", "ret", "[", "'message'", "]", "=", "'Please provide the hostname.'", "ret", "[", "'out'", "]", "=", "False", "return", "ret", "op", "=", "dict", "(", ")", "if", "'__pub_arg'", "in", "kwargs", ":", "if", "kwargs", "[", "'__pub_arg'", "]", ":", "if", "isinstance", "(", "kwargs", "[", "'__pub_arg'", "]", "[", "-", "1", "]", ",", "dict", ")", ":", "op", ".", "update", "(", "kwargs", "[", "'__pub_arg'", "]", "[", "-", "1", "]", ")", "else", ":", "op", ".", "update", "(", "kwargs", ")", "# Added to recent versions of JunOs", "# Use text format instead", "set_string", "=", "'set system host-name {0}'", ".", "format", "(", "hostname", ")", "try", ":", "conn", ".", "cu", ".", "load", "(", "set_string", ",", "format", "=", "'set'", ")", "except", "Exception", "as", "exception", ":", "ret", "[", "'message'", "]", "=", "'Could not load configuration due to error \"{0}\"'", ".", "format", "(", "exception", ")", "ret", "[", "'out'", "]", "=", "False", "return", "ret", "try", ":", "commit_ok", "=", "conn", ".", "cu", ".", "commit_check", "(", ")", "except", "Exception", "as", "exception", ":", "ret", "[", "'message'", "]", "=", "'Could not commit check due to error \"{0}\"'", ".", "format", "(", "exception", ")", "ret", "[", "'out'", "]", "=", "False", "return", "ret", "if", "commit_ok", ":", "try", ":", "conn", ".", "cu", ".", "commit", "(", "*", "*", "op", ")", "ret", "[", "'message'", "]", "=", "'Successfully changed hostname.'", "ret", "[", "'out'", "]", "=", "True", "except", "Exception", "as", "exception", ":", "ret", "[", "'out'", "]", "=", "False", "ret", "[", "'message'", "]", "=", "'Successfully loaded host-name but commit failed with \"{0}\"'", ".", "format", "(", "exception", ")", "return", "ret", "else", ":", "ret", "[", "'out'", "]", "=", "False", "ret", "[", "'message'", "]", "=", "'Successfully loaded host-name but pre-commit check failed.'", "conn", ".", "cu", ".", "rollback", "(", ")", "return", "ret" ]
27.28
22.453333
def read_and_save_data(info_df, raw_dir, sep=";", force_raw=False, force_cellpy=False, export_cycles=False, shifted_cycles=False, export_raw=True, export_ica=False, save=True, use_cellpy_stat_file=False, parent_level="CellpyData", last_cycle=None, ): """Reads and saves cell data defined by the info-DataFrame. The function iterates through the ``info_df`` and loads data from the runs. It saves individual data for each run (if selected), as well as returns a list of ``cellpy`` summary DataFrames, a list of the indexes (one for each run; same as used as index in the ``info_df``), as well as a list with indexes of runs (cells) where an error was encountered during loading. Args: use_cellpy_stat_file: use the stat file to perform the calculations. info_df: pandas.DataFrame with information about the runs. raw_dir: path to location where you want to save raw data. sep: delimiter to use when exporting to csv. force_raw: load raw data even-though cellpy-file is up-to-date. force_cellpy: load cellpy files even-though cellpy-file is not up-to-date. export_cycles: set to True for exporting cycles to csv. shifted_cycles: set to True for exporting the cycles with a cumulated shift. export_raw: set to True for exporting raw data to csv. export_ica: set to True for calculating and exporting dQ/dV to csv. save: set to False to prevent saving a cellpy-file. parent_level: optional, should use "cellpydata" for older hdf5-files and default for newer ones. Returns: frames (list of cellpy summary DataFrames), keys (list of indexes), errors (list of indexes that encountered errors). """ no_export = False do_export_dqdv = export_ica keys = [] frames = [] number_of_runs = len(info_df) counter = 0 errors = [] for indx, row in info_df.iterrows(): counter += 1 h_txt = "[" + counter * "|" + (number_of_runs - counter) * "." + "]" l_txt = "starting to process file # %i (index=%s)" % (counter, indx) logger.debug(l_txt) print(h_txt) if not row.raw_file_names and not force_cellpy: logger.info("File(s) not found!") logger.info(indx) logger.debug("File(s) not found for index=%s" % indx) errors.append(indx) continue else: logger.info(f"Processing {indx}") cell_data = cellreader.CellpyData() if not force_cellpy: logger.info("setting cycle mode (%s)..." % row.cell_type) cell_data.set_cycle_mode(row.cell_type) logger.info("loading cell") if not force_cellpy: logger.info("not forcing") try: cell_data.loadcell(raw_files=row.raw_file_names, cellpy_file=row.cellpy_file_names, mass=row.masses, summary_on_raw=True, force_raw=force_raw, use_cellpy_stat_file=use_cellpy_stat_file) except Exception as e: logger.debug('Failed to load: ' + str(e)) errors.append("loadcell:" + str(indx)) continue else: logger.info("forcing") try: cell_data.load(row.cellpy_file_names, parent_level=parent_level) except Exception as e: logger.info(f"Critical exception encountered {type(e)} " "- skipping this file") logger.debug('Failed to load. Error-message: ' + str(e)) errors.append("load:" + str(indx)) continue if not cell_data.check(): logger.info("...not loaded...") logger.debug("Did not pass check(). Could not load cell!") errors.append("check:" + str(indx)) continue logger.info("...loaded successfully...") keys.append(indx) summary_tmp = cell_data.dataset.dfsummary logger.info("Trying to get summary_data") if summary_tmp is None: logger.info("No existing summary made - running make_summary") cell_data.make_summary(find_end_voltage=True, find_ir=True) if summary_tmp.index.name == b"Cycle_Index": logger.debug("Strange: 'Cycle_Index' is a byte-string") summary_tmp.index.name = 'Cycle_Index' if not summary_tmp.index.name == "Cycle_Index": logger.debug("Setting index to Cycle_Index") # check if it is a byte-string if b"Cycle_Index" in summary_tmp.columns: logger.debug("Seems to be a byte-string in the column-headers") summary_tmp.rename(columns={b"Cycle_Index": 'Cycle_Index'}, inplace=True) summary_tmp.set_index("Cycle_Index", inplace=True) frames.append(summary_tmp) if save: if not row.fixed: logger.info("saving cell to %s" % row.cellpy_file_names) cell_data.ensure_step_table = True cell_data.save(row.cellpy_file_names) else: logger.debug("saving cell skipped (set to 'fixed' in info_df)") if no_export: continue if export_raw: logger.info("exporting csv") cell_data.to_csv(raw_dir, sep=sep, cycles=export_cycles, shifted=shifted_cycles, raw=export_raw, last_cycle=last_cycle) if do_export_dqdv: logger.info("exporting dqdv") try: export_dqdv(cell_data, savedir=raw_dir, sep=sep, last_cycle=last_cycle) except Exception as e: logging.error("Could not make/export dq/dv data") logger.debug("Failed to make/export " "dq/dv data (%s): %s" % (indx, str(e))) errors.append("ica:" + str(indx)) if len(errors) > 0: logger.error("Finished with errors!") logger.debug(errors) else: logger.info("Finished") return frames, keys, errors
[ "def", "read_and_save_data", "(", "info_df", ",", "raw_dir", ",", "sep", "=", "\";\"", ",", "force_raw", "=", "False", ",", "force_cellpy", "=", "False", ",", "export_cycles", "=", "False", ",", "shifted_cycles", "=", "False", ",", "export_raw", "=", "True", ",", "export_ica", "=", "False", ",", "save", "=", "True", ",", "use_cellpy_stat_file", "=", "False", ",", "parent_level", "=", "\"CellpyData\"", ",", "last_cycle", "=", "None", ",", ")", ":", "no_export", "=", "False", "do_export_dqdv", "=", "export_ica", "keys", "=", "[", "]", "frames", "=", "[", "]", "number_of_runs", "=", "len", "(", "info_df", ")", "counter", "=", "0", "errors", "=", "[", "]", "for", "indx", ",", "row", "in", "info_df", ".", "iterrows", "(", ")", ":", "counter", "+=", "1", "h_txt", "=", "\"[\"", "+", "counter", "*", "\"|\"", "+", "(", "number_of_runs", "-", "counter", ")", "*", "\".\"", "+", "\"]\"", "l_txt", "=", "\"starting to process file # %i (index=%s)\"", "%", "(", "counter", ",", "indx", ")", "logger", ".", "debug", "(", "l_txt", ")", "print", "(", "h_txt", ")", "if", "not", "row", ".", "raw_file_names", "and", "not", "force_cellpy", ":", "logger", ".", "info", "(", "\"File(s) not found!\"", ")", "logger", ".", "info", "(", "indx", ")", "logger", ".", "debug", "(", "\"File(s) not found for index=%s\"", "%", "indx", ")", "errors", ".", "append", "(", "indx", ")", "continue", "else", ":", "logger", ".", "info", "(", "f\"Processing {indx}\"", ")", "cell_data", "=", "cellreader", ".", "CellpyData", "(", ")", "if", "not", "force_cellpy", ":", "logger", ".", "info", "(", "\"setting cycle mode (%s)...\"", "%", "row", ".", "cell_type", ")", "cell_data", ".", "set_cycle_mode", "(", "row", ".", "cell_type", ")", "logger", ".", "info", "(", "\"loading cell\"", ")", "if", "not", "force_cellpy", ":", "logger", ".", "info", "(", "\"not forcing\"", ")", "try", ":", "cell_data", ".", "loadcell", "(", "raw_files", "=", "row", ".", "raw_file_names", ",", "cellpy_file", "=", "row", ".", "cellpy_file_names", ",", "mass", "=", "row", ".", "masses", ",", "summary_on_raw", "=", "True", ",", "force_raw", "=", "force_raw", ",", "use_cellpy_stat_file", "=", "use_cellpy_stat_file", ")", "except", "Exception", "as", "e", ":", "logger", ".", "debug", "(", "'Failed to load: '", "+", "str", "(", "e", ")", ")", "errors", ".", "append", "(", "\"loadcell:\"", "+", "str", "(", "indx", ")", ")", "continue", "else", ":", "logger", ".", "info", "(", "\"forcing\"", ")", "try", ":", "cell_data", ".", "load", "(", "row", ".", "cellpy_file_names", ",", "parent_level", "=", "parent_level", ")", "except", "Exception", "as", "e", ":", "logger", ".", "info", "(", "f\"Critical exception encountered {type(e)} \"", "\"- skipping this file\"", ")", "logger", ".", "debug", "(", "'Failed to load. Error-message: '", "+", "str", "(", "e", ")", ")", "errors", ".", "append", "(", "\"load:\"", "+", "str", "(", "indx", ")", ")", "continue", "if", "not", "cell_data", ".", "check", "(", ")", ":", "logger", ".", "info", "(", "\"...not loaded...\"", ")", "logger", ".", "debug", "(", "\"Did not pass check(). Could not load cell!\"", ")", "errors", ".", "append", "(", "\"check:\"", "+", "str", "(", "indx", ")", ")", "continue", "logger", ".", "info", "(", "\"...loaded successfully...\"", ")", "keys", ".", "append", "(", "indx", ")", "summary_tmp", "=", "cell_data", ".", "dataset", ".", "dfsummary", "logger", ".", "info", "(", "\"Trying to get summary_data\"", ")", "if", "summary_tmp", "is", "None", ":", "logger", ".", "info", "(", "\"No existing summary made - running make_summary\"", ")", "cell_data", ".", "make_summary", "(", "find_end_voltage", "=", "True", ",", "find_ir", "=", "True", ")", "if", "summary_tmp", ".", "index", ".", "name", "==", "b\"Cycle_Index\"", ":", "logger", ".", "debug", "(", "\"Strange: 'Cycle_Index' is a byte-string\"", ")", "summary_tmp", ".", "index", ".", "name", "=", "'Cycle_Index'", "if", "not", "summary_tmp", ".", "index", ".", "name", "==", "\"Cycle_Index\"", ":", "logger", ".", "debug", "(", "\"Setting index to Cycle_Index\"", ")", "# check if it is a byte-string", "if", "b\"Cycle_Index\"", "in", "summary_tmp", ".", "columns", ":", "logger", ".", "debug", "(", "\"Seems to be a byte-string in the column-headers\"", ")", "summary_tmp", ".", "rename", "(", "columns", "=", "{", "b\"Cycle_Index\"", ":", "'Cycle_Index'", "}", ",", "inplace", "=", "True", ")", "summary_tmp", ".", "set_index", "(", "\"Cycle_Index\"", ",", "inplace", "=", "True", ")", "frames", ".", "append", "(", "summary_tmp", ")", "if", "save", ":", "if", "not", "row", ".", "fixed", ":", "logger", ".", "info", "(", "\"saving cell to %s\"", "%", "row", ".", "cellpy_file_names", ")", "cell_data", ".", "ensure_step_table", "=", "True", "cell_data", ".", "save", "(", "row", ".", "cellpy_file_names", ")", "else", ":", "logger", ".", "debug", "(", "\"saving cell skipped (set to 'fixed' in info_df)\"", ")", "if", "no_export", ":", "continue", "if", "export_raw", ":", "logger", ".", "info", "(", "\"exporting csv\"", ")", "cell_data", ".", "to_csv", "(", "raw_dir", ",", "sep", "=", "sep", ",", "cycles", "=", "export_cycles", ",", "shifted", "=", "shifted_cycles", ",", "raw", "=", "export_raw", ",", "last_cycle", "=", "last_cycle", ")", "if", "do_export_dqdv", ":", "logger", ".", "info", "(", "\"exporting dqdv\"", ")", "try", ":", "export_dqdv", "(", "cell_data", ",", "savedir", "=", "raw_dir", ",", "sep", "=", "sep", ",", "last_cycle", "=", "last_cycle", ")", "except", "Exception", "as", "e", ":", "logging", ".", "error", "(", "\"Could not make/export dq/dv data\"", ")", "logger", ".", "debug", "(", "\"Failed to make/export \"", "\"dq/dv data (%s): %s\"", "%", "(", "indx", ",", "str", "(", "e", ")", ")", ")", "errors", ".", "append", "(", "\"ica:\"", "+", "str", "(", "indx", ")", ")", "if", "len", "(", "errors", ")", ">", "0", ":", "logger", ".", "error", "(", "\"Finished with errors!\"", ")", "logger", ".", "debug", "(", "errors", ")", "else", ":", "logger", ".", "info", "(", "\"Finished\"", ")", "return", "frames", ",", "keys", ",", "errors" ]
41.657895
21.184211
def GetExpirationTime(self): """Computes the timestamp at which this breakpoint will expire.""" # TODO(emrekultursay): Move this to a common method. if '.' not in self.definition['createTime']: fmt = '%Y-%m-%dT%H:%M:%S%Z' else: fmt = '%Y-%m-%dT%H:%M:%S.%f%Z' create_datetime = datetime.strptime( self.definition['createTime'].replace('Z', 'UTC'), fmt) return create_datetime + self.expiration_period
[ "def", "GetExpirationTime", "(", "self", ")", ":", "# TODO(emrekultursay): Move this to a common method.", "if", "'.'", "not", "in", "self", ".", "definition", "[", "'createTime'", "]", ":", "fmt", "=", "'%Y-%m-%dT%H:%M:%S%Z'", "else", ":", "fmt", "=", "'%Y-%m-%dT%H:%M:%S.%f%Z'", "create_datetime", "=", "datetime", ".", "strptime", "(", "self", ".", "definition", "[", "'createTime'", "]", ".", "replace", "(", "'Z'", ",", "'UTC'", ")", ",", "fmt", ")", "return", "create_datetime", "+", "self", ".", "expiration_period" ]
39.454545
13.818182
def tab(self, netloc=None, url=None, extra_id=None, use_tid=False): ''' Get a chromium tab from the pool, optionally one that has an association with a specific netloc/URL. If no url or netloc is specified, the per-thread identifier will be used. If `extra_id` is specified, it's stringified value will be mixed into the pool key If `use_tid` is true, the per-thread identifier will be mixed into the pool key. In all cases, the tab pool is a least-recently-used cache, so the tab that has been accessed the least recently will be automatically closed if a new tab is requested, and there are already `tab_pool_max_size` tabs created. ''' assert self.alive, "Chrome has been shut down! Cannot continue!" if not netloc and url: netloc = urllib.parse.urlparse(url).netloc self.log.debug("Getting tab for netloc: %s (url: %s)", netloc, url) # Coerce to string type so even if it's none, it doesn't hurt anything. key = str(netloc) if extra_id: key += " " + str(extra_id) if use_tid or not key: key += " " + str(threading.get_ident()) if self.__started_pid != os.getpid(): self.log.error("TabPooledChromium instances are not safe to share across multiple processes.") self.log.error("Please create a new in each separate multiprocesssing process.") raise RuntimeError("TabPooledChromium instances are not safe to share across multiple processes.") with self.__counter_lock: self.__active_tabs.setdefault(key, 0) self.__active_tabs[key] += 1 if self.__active_tabs[key] > 1: self.log.warning("Tab with key %s checked out more then once simultaneously") try: lock, tab = self.__tab_cache[key] with lock: yield tab finally: with self.__counter_lock: self.__active_tabs[key] -= 1 if self.__active_tabs[key] == 0: self.__active_tabs.pop(key)
[ "def", "tab", "(", "self", ",", "netloc", "=", "None", ",", "url", "=", "None", ",", "extra_id", "=", "None", ",", "use_tid", "=", "False", ")", ":", "assert", "self", ".", "alive", ",", "\"Chrome has been shut down! Cannot continue!\"", "if", "not", "netloc", "and", "url", ":", "netloc", "=", "urllib", ".", "parse", ".", "urlparse", "(", "url", ")", ".", "netloc", "self", ".", "log", ".", "debug", "(", "\"Getting tab for netloc: %s (url: %s)\"", ",", "netloc", ",", "url", ")", "# Coerce to string type so even if it's none, it doesn't hurt anything.", "key", "=", "str", "(", "netloc", ")", "if", "extra_id", ":", "key", "+=", "\" \"", "+", "str", "(", "extra_id", ")", "if", "use_tid", "or", "not", "key", ":", "key", "+=", "\" \"", "+", "str", "(", "threading", ".", "get_ident", "(", ")", ")", "if", "self", ".", "__started_pid", "!=", "os", ".", "getpid", "(", ")", ":", "self", ".", "log", ".", "error", "(", "\"TabPooledChromium instances are not safe to share across multiple processes.\"", ")", "self", ".", "log", ".", "error", "(", "\"Please create a new in each separate multiprocesssing process.\"", ")", "raise", "RuntimeError", "(", "\"TabPooledChromium instances are not safe to share across multiple processes.\"", ")", "with", "self", ".", "__counter_lock", ":", "self", ".", "__active_tabs", ".", "setdefault", "(", "key", ",", "0", ")", "self", ".", "__active_tabs", "[", "key", "]", "+=", "1", "if", "self", ".", "__active_tabs", "[", "key", "]", ">", "1", ":", "self", ".", "log", ".", "warning", "(", "\"Tab with key %s checked out more then once simultaneously\"", ")", "try", ":", "lock", ",", "tab", "=", "self", ".", "__tab_cache", "[", "key", "]", "with", "lock", ":", "yield", "tab", "finally", ":", "with", "self", ".", "__counter_lock", ":", "self", ".", "__active_tabs", "[", "key", "]", "-=", "1", "if", "self", ".", "__active_tabs", "[", "key", "]", "==", "0", ":", "self", ".", "__active_tabs", ".", "pop", "(", "key", ")" ]
39.822222
27.733333
def get(self, nb=0): """Get the history as a dict of list""" return {i: self.stats_history[i].history_raw(nb=nb) for i in self.stats_history}
[ "def", "get", "(", "self", ",", "nb", "=", "0", ")", ":", "return", "{", "i", ":", "self", ".", "stats_history", "[", "i", "]", ".", "history_raw", "(", "nb", "=", "nb", ")", "for", "i", "in", "self", ".", "stats_history", "}" ]
51.666667
22.666667
def case(self, case_id): """Fetch a case from the database.""" case_obj = self.query(Case).filter_by(case_id=case_id).first() return case_obj
[ "def", "case", "(", "self", ",", "case_id", ")", ":", "case_obj", "=", "self", ".", "query", "(", "Case", ")", ".", "filter_by", "(", "case_id", "=", "case_id", ")", ".", "first", "(", ")", "return", "case_obj" ]
40.5
15.75
def allow_migrate(self, db, model): """ Make sure self._apps go to their own db """ if model._meta.app_label in self._apps: return getattr(model, '_db_alias', model._meta.app_label) == db return None
[ "def", "allow_migrate", "(", "self", ",", "db", ",", "model", ")", ":", "if", "model", ".", "_meta", ".", "app_label", "in", "self", ".", "_apps", ":", "return", "getattr", "(", "model", ",", "'_db_alias'", ",", "model", ".", "_meta", ".", "app_label", ")", "==", "db", "return", "None" ]
35
10.714286
def supports_card_actions(channel_id: str, button_cnt: int = 100) -> bool: """Determine if a number of Card Actions are supported by a Channel. Args: channel_id (str): The Channel to check if the Card Actions are supported in. button_cnt (int, optional): Defaults to 100. The number of Card Actions to check for the Channel. Returns: bool: True if the Channel supports the button_cnt total Card Actions, False if the Channel does not support that number of Card Actions. """ max_actions = { Channels.facebook: 3, Channels.skype: 3, Channels.ms_teams: 3, Channels.line: 99, Channels.slack: 100, Channels.emulator: 100, Channels.direct_line: 100, Channels.webchat: 100, Channels.cortana: 100, } return button_cnt <= max_actions[channel_id] if channel_id in max_actions else False
[ "def", "supports_card_actions", "(", "channel_id", ":", "str", ",", "button_cnt", ":", "int", "=", "100", ")", "->", "bool", ":", "max_actions", "=", "{", "Channels", ".", "facebook", ":", "3", ",", "Channels", ".", "skype", ":", "3", ",", "Channels", ".", "ms_teams", ":", "3", ",", "Channels", ".", "line", ":", "99", ",", "Channels", ".", "slack", ":", "100", ",", "Channels", ".", "emulator", ":", "100", ",", "Channels", ".", "direct_line", ":", "100", ",", "Channels", ".", "webchat", ":", "100", ",", "Channels", ".", "cortana", ":", "100", ",", "}", "return", "button_cnt", "<=", "max_actions", "[", "channel_id", "]", "if", "channel_id", "in", "max_actions", "else", "False" ]
41.652174
25.695652
def load_cov(name): '''Load a datafile with coverage file structure. ''' content = np.genfromtxt(name, skip_header=1, skip_footer=1, usecols=([2])) return content
[ "def", "load_cov", "(", "name", ")", ":", "content", "=", "np", ".", "genfromtxt", "(", "name", ",", "skip_header", "=", "1", ",", "skip_footer", "=", "1", ",", "usecols", "=", "(", "[", "2", "]", ")", ")", "return", "content" ]
29
27.666667
def intercept_(self): """ Intercept (bias) property .. note:: Intercept is defined only for linear learners Intercept (bias) is only defined when the linear model is chosen as base learner (`booster=gblinear`). It is not defined for other base learner types, such as tree learners (`booster=gbtree`). Returns ------- intercept_ : array of shape ``(1,)`` or ``[n_classes]`` """ if getattr(self, 'booster', None) is not None and self.booster != 'gblinear': raise AttributeError('Intercept (bias) is not defined for Booster type {}' .format(self.booster)) b = self.get_booster() return np.array(json.loads(b.get_dump(dump_format='json')[0])['bias'])
[ "def", "intercept_", "(", "self", ")", ":", "if", "getattr", "(", "self", ",", "'booster'", ",", "None", ")", "is", "not", "None", "and", "self", ".", "booster", "!=", "'gblinear'", ":", "raise", "AttributeError", "(", "'Intercept (bias) is not defined for Booster type {}'", ".", "format", "(", "self", ".", "booster", ")", ")", "b", "=", "self", ".", "get_booster", "(", ")", "return", "np", ".", "array", "(", "json", ".", "loads", "(", "b", ".", "get_dump", "(", "dump_format", "=", "'json'", ")", "[", "0", "]", ")", "[", "'bias'", "]", ")" ]
41.684211
26.421053
def stop(self): """Stop the background emulation loop.""" if self._started is False: raise ArgumentError("EmulationLoop.stop() called without calling start()") self.verify_calling_thread(False, "Cannot call EmulationLoop.stop() from inside the event loop") if self._thread.is_alive(): self._loop.call_soon_threadsafe(self._loop.create_task, self._clean_shutdown()) self._thread.join()
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "_started", "is", "False", ":", "raise", "ArgumentError", "(", "\"EmulationLoop.stop() called without calling start()\"", ")", "self", ".", "verify_calling_thread", "(", "False", ",", "\"Cannot call EmulationLoop.stop() from inside the event loop\"", ")", "if", "self", ".", "_thread", ".", "is_alive", "(", ")", ":", "self", ".", "_loop", ".", "call_soon_threadsafe", "(", "self", ".", "_loop", ".", "create_task", ",", "self", ".", "_clean_shutdown", "(", ")", ")", "self", ".", "_thread", ".", "join", "(", ")" ]
40.454545
29.636364
def no_intersection(to_validate, constraint, violation_cfg): """ Returns violation message if validated and constraint sets have no intersection :param to_validate: :param constraint: :param violation_cfg: :return: """ if len(constraint) == 0 or len(set(constraint).intersection(to_validate)) > 0: return None else: violation_cfg[Check.CFG_KEY_VIOLATION_MSG] = violation_cfg[Check.CFG_KEY_VIOLATION_MSG].format(constraint) return violation_cfg
[ "def", "no_intersection", "(", "to_validate", ",", "constraint", ",", "violation_cfg", ")", ":", "if", "len", "(", "constraint", ")", "==", "0", "or", "len", "(", "set", "(", "constraint", ")", ".", "intersection", "(", "to_validate", ")", ")", ">", "0", ":", "return", "None", "else", ":", "violation_cfg", "[", "Check", ".", "CFG_KEY_VIOLATION_MSG", "]", "=", "violation_cfg", "[", "Check", ".", "CFG_KEY_VIOLATION_MSG", "]", ".", "format", "(", "constraint", ")", "return", "violation_cfg" ]
37.769231
24.692308
def new_noncomment(self, start_lineno, end_lineno): """ We are transitioning from a noncomment to a comment. """ block = NonComment(start_lineno, end_lineno) self.blocks.append(block) self.current_block = block
[ "def", "new_noncomment", "(", "self", ",", "start_lineno", ",", "end_lineno", ")", ":", "block", "=", "NonComment", "(", "start_lineno", ",", "end_lineno", ")", "self", ".", "blocks", ".", "append", "(", "block", ")", "self", ".", "current_block", "=", "block" ]
40.833333
6
def or_fault(a, b, out, fault): """Returns True if OR(a, b) == out and fault == 0 or OR(a, b) != out and fault == 1.""" if (a or b) == out: return fault == 0 else: return fault == 1
[ "def", "or_fault", "(", "a", ",", "b", ",", "out", ",", "fault", ")", ":", "if", "(", "a", "or", "b", ")", "==", "out", ":", "return", "fault", "==", "0", "else", ":", "return", "fault", "==", "1" ]
34
14.5
def known_author_patterns(self, val): ''' val must be a dictionary or list of dictionaries e.g., {'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'} or [{'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'}, {'attrribute': 'property', 'value': 'pub_time', 'content': 'content'}] ''' def create_pat_from_dict(val): '''Helper function used to create an AuthorPatterns from a dictionary ''' if "tag" in val: pat = AuthorPattern(tag=val["tag"]) if "attribute" in val: pat.attr = val["attribute"] pat.value = val["value"] elif "attribute" in val: pat = AuthorPattern(attr=val["attribute"], value=val["value"], content=val["content"]) if "subpattern" in val: pat.subpattern = create_pat_from_dict(val["subpattern"]) return pat if isinstance(val, list): self._known_author_patterns = [ x if isinstance(x, AuthorPattern) else create_pat_from_dict(x) for x in val ] + self.known_author_patterns elif isinstance(val, AuthorPattern): self._known_author_patterns.insert(0, val) elif isinstance(val, dict): self._known_author_patterns.insert(0, create_pat_from_dict(val)) else: raise Exception("Unknown type: {}. Use an AuthorPattern.".format(type(val)))
[ "def", "known_author_patterns", "(", "self", ",", "val", ")", ":", "def", "create_pat_from_dict", "(", "val", ")", ":", "'''Helper function used to create an AuthorPatterns from a dictionary\n '''", "if", "\"tag\"", "in", "val", ":", "pat", "=", "AuthorPattern", "(", "tag", "=", "val", "[", "\"tag\"", "]", ")", "if", "\"attribute\"", "in", "val", ":", "pat", ".", "attr", "=", "val", "[", "\"attribute\"", "]", "pat", ".", "value", "=", "val", "[", "\"value\"", "]", "elif", "\"attribute\"", "in", "val", ":", "pat", "=", "AuthorPattern", "(", "attr", "=", "val", "[", "\"attribute\"", "]", ",", "value", "=", "val", "[", "\"value\"", "]", ",", "content", "=", "val", "[", "\"content\"", "]", ")", "if", "\"subpattern\"", "in", "val", ":", "pat", ".", "subpattern", "=", "create_pat_from_dict", "(", "val", "[", "\"subpattern\"", "]", ")", "return", "pat", "if", "isinstance", "(", "val", ",", "list", ")", ":", "self", ".", "_known_author_patterns", "=", "[", "x", "if", "isinstance", "(", "x", ",", "AuthorPattern", ")", "else", "create_pat_from_dict", "(", "x", ")", "for", "x", "in", "val", "]", "+", "self", ".", "known_author_patterns", "elif", "isinstance", "(", "val", ",", "AuthorPattern", ")", ":", "self", ".", "_known_author_patterns", ".", "insert", "(", "0", ",", "val", ")", "elif", "isinstance", "(", "val", ",", "dict", ")", ":", "self", ".", "_known_author_patterns", ".", "insert", "(", "0", ",", "create_pat_from_dict", "(", "val", ")", ")", "else", ":", "raise", "Exception", "(", "\"Unknown type: {}. Use an AuthorPattern.\"", ".", "format", "(", "type", "(", "val", ")", ")", ")" ]
45.617647
21.558824
def incr_obj(obj, **attrs): """Increments context variables """ for name, value in attrs.iteritems(): v = getattr(obj, name, None) if not hasattr(obj, name) or v is None: v = 0 setattr(obj, name, v + value)
[ "def", "incr_obj", "(", "obj", ",", "*", "*", "attrs", ")", ":", "for", "name", ",", "value", "in", "attrs", ".", "iteritems", "(", ")", ":", "v", "=", "getattr", "(", "obj", ",", "name", ",", "None", ")", "if", "not", "hasattr", "(", "obj", ",", "name", ")", "or", "v", "is", "None", ":", "v", "=", "0", "setattr", "(", "obj", ",", "name", ",", "v", "+", "value", ")" ]
30.875
6.375
def issueBatchJob(self, jobNode): """ Issues the following command returning a unique jobID. Command is the string to run, memory is an int giving the number of bytes the job needs to run in and cores is the number of cpus needed for the job and error-file is the path of the file to place any std-err/std-out in. """ localID = self.handleLocalJob(jobNode) if localID: return localID self.checkResourceRequest(jobNode.memory, jobNode.cores, jobNode.disk) jobID = self.getNextJobID() job = ToilJob(jobID=jobID, name=str(jobNode), resources=MesosShape(wallTime=0, **jobNode._requirements), command=jobNode.command, userScript=self.userScript, environment=self.environment.copy(), workerCleanupInfo=self.workerCleanupInfo) jobType = job.resources log.debug("Queueing the job command: %s with job id: %s ...", jobNode.command, str(jobID)) # TODO: round all elements of resources self.taskResources[jobID] = job.resources self.jobQueues.insertJob(job, jobType) log.debug("... queued") return jobID
[ "def", "issueBatchJob", "(", "self", ",", "jobNode", ")", ":", "localID", "=", "self", ".", "handleLocalJob", "(", "jobNode", ")", "if", "localID", ":", "return", "localID", "self", ".", "checkResourceRequest", "(", "jobNode", ".", "memory", ",", "jobNode", ".", "cores", ",", "jobNode", ".", "disk", ")", "jobID", "=", "self", ".", "getNextJobID", "(", ")", "job", "=", "ToilJob", "(", "jobID", "=", "jobID", ",", "name", "=", "str", "(", "jobNode", ")", ",", "resources", "=", "MesosShape", "(", "wallTime", "=", "0", ",", "*", "*", "jobNode", ".", "_requirements", ")", ",", "command", "=", "jobNode", ".", "command", ",", "userScript", "=", "self", ".", "userScript", ",", "environment", "=", "self", ".", "environment", ".", "copy", "(", ")", ",", "workerCleanupInfo", "=", "self", ".", "workerCleanupInfo", ")", "jobType", "=", "job", ".", "resources", "log", ".", "debug", "(", "\"Queueing the job command: %s with job id: %s ...\"", ",", "jobNode", ".", "command", ",", "str", "(", "jobID", ")", ")", "# TODO: round all elements of resources", "self", ".", "taskResources", "[", "jobID", "]", "=", "job", ".", "resources", "self", ".", "jobQueues", ".", "insertJob", "(", "job", ",", "jobType", ")", "log", ".", "debug", "(", "\"... queued\"", ")", "return", "jobID" ]
46.222222
21.037037
def register_routes(app): """Register routes.""" from . import controllers from flask.blueprints import Blueprint for module in _import_submodules_from_package(controllers): bp = getattr(module, 'bp') if bp and isinstance(bp, Blueprint): app.register_blueprint(bp)
[ "def", "register_routes", "(", "app", ")", ":", "from", ".", "import", "controllers", "from", "flask", ".", "blueprints", "import", "Blueprint", "for", "module", "in", "_import_submodules_from_package", "(", "controllers", ")", ":", "bp", "=", "getattr", "(", "module", ",", "'bp'", ")", "if", "bp", "and", "isinstance", "(", "bp", ",", "Blueprint", ")", ":", "app", ".", "register_blueprint", "(", "bp", ")" ]
33.444444
11.444444
def serialize(self, dt): """ Converts the date to a string using the :py:attr:`~_DateParameterBase.date_format`. """ if dt is None: return str(dt) return dt.strftime(self.date_format)
[ "def", "serialize", "(", "self", ",", "dt", ")", ":", "if", "dt", "is", "None", ":", "return", "str", "(", "dt", ")", "return", "dt", ".", "strftime", "(", "self", ".", "date_format", ")" ]
32.714286
14.714286
def get_pubmed_citation_response(pubmed_identifiers: Iterable[str]): """Get the response from PubMed E-Utils for a given list of PubMed identifiers. :param pubmed_identifiers: :rtype: dict """ pubmed_identifiers = list(pubmed_identifiers) url = EUTILS_URL_FMT.format(','.join( pubmed_identifier for pubmed_identifier in pubmed_identifiers if pubmed_identifier )) response = requests.get(url) return response.json()
[ "def", "get_pubmed_citation_response", "(", "pubmed_identifiers", ":", "Iterable", "[", "str", "]", ")", ":", "pubmed_identifiers", "=", "list", "(", "pubmed_identifiers", ")", "url", "=", "EUTILS_URL_FMT", ".", "format", "(", "','", ".", "join", "(", "pubmed_identifier", "for", "pubmed_identifier", "in", "pubmed_identifiers", "if", "pubmed_identifier", ")", ")", "response", "=", "requests", ".", "get", "(", "url", ")", "return", "response", ".", "json", "(", ")" ]
33
14.714286
def _create_column(values, dtype): "Creates a column from values with dtype" if str(dtype) == "tensor(int64)": return numpy.array(values, dtype=numpy.int64) elif str(dtype) == "tensor(float)": return numpy.array(values, dtype=numpy.float32) else: raise OnnxRuntimeAssertionError("Unable to create one column from dtype '{0}'".format(dtype))
[ "def", "_create_column", "(", "values", ",", "dtype", ")", ":", "if", "str", "(", "dtype", ")", "==", "\"tensor(int64)\"", ":", "return", "numpy", ".", "array", "(", "values", ",", "dtype", "=", "numpy", ".", "int64", ")", "elif", "str", "(", "dtype", ")", "==", "\"tensor(float)\"", ":", "return", "numpy", ".", "array", "(", "values", ",", "dtype", "=", "numpy", ".", "float32", ")", "else", ":", "raise", "OnnxRuntimeAssertionError", "(", "\"Unable to create one column from dtype '{0}'\"", ".", "format", "(", "dtype", ")", ")" ]
46.625
16.875
def find_max_and_min_frequencies(name, mass_range_params, freqs): """ ADD DOCS """ cutoff_fns = pnutils.named_frequency_cutoffs if name not in cutoff_fns.keys(): err_msg = "%s not recognized as a valid cutoff frequency choice." %name err_msg += "Recognized choices: " + " ".join(cutoff_fns.keys()) raise ValueError(err_msg) # Can I do this quickly? total_mass_approxs = { "SchwarzISCO": pnutils.f_SchwarzISCO, "LightRing" : pnutils.f_LightRing, "ERD" : pnutils.f_ERD } if name in total_mass_approxs.keys(): # This can be done quickly if the cutoff only depends on total mass # Assumes that lower total mass = higher cutoff frequency upper_f_cutoff = total_mass_approxs[name](mass_range_params.minTotMass) lower_f_cutoff = total_mass_approxs[name](mass_range_params.maxTotMass) else: # Do this numerically # FIXME: Is 1000000 the right choice? I think so, but just highlighting mass1, mass2, spin1z, spin2z = \ get_random_mass(1000000, mass_range_params) mass_dict = {} mass_dict['mass1'] = mass1 mass_dict['mass2'] = mass2 mass_dict['spin1z'] = spin1z mass_dict['spin2z'] = spin2z tmp_freqs = cutoff_fns[name](mass_dict) upper_f_cutoff = tmp_freqs.max() lower_f_cutoff = tmp_freqs.min() cutoffs = numpy.array([lower_f_cutoff,upper_f_cutoff]) if lower_f_cutoff < freqs.min(): warn_msg = "WARNING: " warn_msg += "Lowest frequency cutoff is %s Hz " %(lower_f_cutoff,) warn_msg += "which is lower than the lowest frequency calculated " warn_msg += "for the metric: %s Hz. " %(freqs.min()) warn_msg += "Distances for these waveforms will be calculated at " warn_msg += "the lowest available metric frequency." logging.warn(warn_msg) if upper_f_cutoff > freqs.max(): warn_msg = "WARNING: " warn_msg += "Highest frequency cutoff is %s Hz " %(upper_f_cutoff,) warn_msg += "which is larger than the highest frequency calculated " warn_msg += "for the metric: %s Hz. " %(freqs.max()) warn_msg += "Distances for these waveforms will be calculated at " warn_msg += "the largest available metric frequency." logging.warn(warn_msg) return find_closest_calculated_frequencies(cutoffs, freqs)
[ "def", "find_max_and_min_frequencies", "(", "name", ",", "mass_range_params", ",", "freqs", ")", ":", "cutoff_fns", "=", "pnutils", ".", "named_frequency_cutoffs", "if", "name", "not", "in", "cutoff_fns", ".", "keys", "(", ")", ":", "err_msg", "=", "\"%s not recognized as a valid cutoff frequency choice.\"", "%", "name", "err_msg", "+=", "\"Recognized choices: \"", "+", "\" \"", ".", "join", "(", "cutoff_fns", ".", "keys", "(", ")", ")", "raise", "ValueError", "(", "err_msg", ")", "# Can I do this quickly?", "total_mass_approxs", "=", "{", "\"SchwarzISCO\"", ":", "pnutils", ".", "f_SchwarzISCO", ",", "\"LightRing\"", ":", "pnutils", ".", "f_LightRing", ",", "\"ERD\"", ":", "pnutils", ".", "f_ERD", "}", "if", "name", "in", "total_mass_approxs", ".", "keys", "(", ")", ":", "# This can be done quickly if the cutoff only depends on total mass", "# Assumes that lower total mass = higher cutoff frequency", "upper_f_cutoff", "=", "total_mass_approxs", "[", "name", "]", "(", "mass_range_params", ".", "minTotMass", ")", "lower_f_cutoff", "=", "total_mass_approxs", "[", "name", "]", "(", "mass_range_params", ".", "maxTotMass", ")", "else", ":", "# Do this numerically", "# FIXME: Is 1000000 the right choice? I think so, but just highlighting", "mass1", ",", "mass2", ",", "spin1z", ",", "spin2z", "=", "get_random_mass", "(", "1000000", ",", "mass_range_params", ")", "mass_dict", "=", "{", "}", "mass_dict", "[", "'mass1'", "]", "=", "mass1", "mass_dict", "[", "'mass2'", "]", "=", "mass2", "mass_dict", "[", "'spin1z'", "]", "=", "spin1z", "mass_dict", "[", "'spin2z'", "]", "=", "spin2z", "tmp_freqs", "=", "cutoff_fns", "[", "name", "]", "(", "mass_dict", ")", "upper_f_cutoff", "=", "tmp_freqs", ".", "max", "(", ")", "lower_f_cutoff", "=", "tmp_freqs", ".", "min", "(", ")", "cutoffs", "=", "numpy", ".", "array", "(", "[", "lower_f_cutoff", ",", "upper_f_cutoff", "]", ")", "if", "lower_f_cutoff", "<", "freqs", ".", "min", "(", ")", ":", "warn_msg", "=", "\"WARNING: \"", "warn_msg", "+=", "\"Lowest frequency cutoff is %s Hz \"", "%", "(", "lower_f_cutoff", ",", ")", "warn_msg", "+=", "\"which is lower than the lowest frequency calculated \"", "warn_msg", "+=", "\"for the metric: %s Hz. \"", "%", "(", "freqs", ".", "min", "(", ")", ")", "warn_msg", "+=", "\"Distances for these waveforms will be calculated at \"", "warn_msg", "+=", "\"the lowest available metric frequency.\"", "logging", ".", "warn", "(", "warn_msg", ")", "if", "upper_f_cutoff", ">", "freqs", ".", "max", "(", ")", ":", "warn_msg", "=", "\"WARNING: \"", "warn_msg", "+=", "\"Highest frequency cutoff is %s Hz \"", "%", "(", "upper_f_cutoff", ",", ")", "warn_msg", "+=", "\"which is larger than the highest frequency calculated \"", "warn_msg", "+=", "\"for the metric: %s Hz. \"", "%", "(", "freqs", ".", "max", "(", ")", ")", "warn_msg", "+=", "\"Distances for these waveforms will be calculated at \"", "warn_msg", "+=", "\"the largest available metric frequency.\"", "logging", ".", "warn", "(", "warn_msg", ")", "return", "find_closest_calculated_frequencies", "(", "cutoffs", ",", "freqs", ")" ]
43.454545
18.727273
def failover(self, name): """Force a failover of a named master.""" fut = self.execute(b'FAILOVER', name) return wait_ok(fut)
[ "def", "failover", "(", "self", ",", "name", ")", ":", "fut", "=", "self", ".", "execute", "(", "b'FAILOVER'", ",", "name", ")", "return", "wait_ok", "(", "fut", ")" ]
36.5
8.25
def _open_for_write(self): """open the file in write mode""" def put_request(body): """ :param body: """ ownerid, datasetid = parse_dataset_key(self._dataset_key) response = requests.put( "{}/uploads/{}/{}/files/{}".format( self._api_host, ownerid, datasetid, self._file_name), data=body, headers={ 'User-Agent': self._user_agent, 'Authorization': 'Bearer {}'.format( self._config.auth_token) }) self._response_queue.put(response) body = iter(self._queue.get, self._sentinel) self._thread = Thread(target=put_request, args=(body,)) self._thread.start()
[ "def", "_open_for_write", "(", "self", ")", ":", "def", "put_request", "(", "body", ")", ":", "\"\"\"\n\n :param body:\n \"\"\"", "ownerid", ",", "datasetid", "=", "parse_dataset_key", "(", "self", ".", "_dataset_key", ")", "response", "=", "requests", ".", "put", "(", "\"{}/uploads/{}/{}/files/{}\"", ".", "format", "(", "self", ".", "_api_host", ",", "ownerid", ",", "datasetid", ",", "self", ".", "_file_name", ")", ",", "data", "=", "body", ",", "headers", "=", "{", "'User-Agent'", ":", "self", ".", "_user_agent", ",", "'Authorization'", ":", "'Bearer {}'", ".", "format", "(", "self", ".", "_config", ".", "auth_token", ")", "}", ")", "self", ".", "_response_queue", ".", "put", "(", "response", ")", "body", "=", "iter", "(", "self", ".", "_queue", ".", "get", ",", "self", ".", "_sentinel", ")", "self", ".", "_thread", "=", "Thread", "(", "target", "=", "put_request", ",", "args", "=", "(", "body", ",", ")", ")", "self", ".", "_thread", ".", "start", "(", ")" ]
36.045455
15.272727
def fit_transform(self, X, y=None): """ Fit the imputer and then transform input `X` Note: all imputations should have a `fit_transform` method, but only some (like IterativeImputer) also support inductive mode using `fit` or `fit_transform` on `X_train` and then `transform` on new `X_test`. """ X_original, missing_mask = self.prepare_input_data(X) observed_mask = ~missing_mask X = X_original.copy() if self.normalizer is not None: X = self.normalizer.fit_transform(X) X_filled = self.fill(X, missing_mask, inplace=True) if not isinstance(X_filled, np.ndarray): raise TypeError( "Expected %s.fill() to return NumPy array but got %s" % ( self.__class__.__name__, type(X_filled))) X_result = self.solve(X_filled, missing_mask) if not isinstance(X_result, np.ndarray): raise TypeError( "Expected %s.solve() to return NumPy array but got %s" % ( self.__class__.__name__, type(X_result))) X_result = self.project_result(X=X_result) X_result[observed_mask] = X_original[observed_mask] return X_result
[ "def", "fit_transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "X_original", ",", "missing_mask", "=", "self", ".", "prepare_input_data", "(", "X", ")", "observed_mask", "=", "~", "missing_mask", "X", "=", "X_original", ".", "copy", "(", ")", "if", "self", ".", "normalizer", "is", "not", "None", ":", "X", "=", "self", ".", "normalizer", ".", "fit_transform", "(", "X", ")", "X_filled", "=", "self", ".", "fill", "(", "X", ",", "missing_mask", ",", "inplace", "=", "True", ")", "if", "not", "isinstance", "(", "X_filled", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"Expected %s.fill() to return NumPy array but got %s\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "type", "(", "X_filled", ")", ")", ")", "X_result", "=", "self", ".", "solve", "(", "X_filled", ",", "missing_mask", ")", "if", "not", "isinstance", "(", "X_result", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"Expected %s.solve() to return NumPy array but got %s\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "type", "(", "X_result", ")", ")", ")", "X_result", "=", "self", ".", "project_result", "(", "X", "=", "X_result", ")", "X_result", "[", "observed_mask", "]", "=", "X_original", "[", "observed_mask", "]", "return", "X_result" ]
40.709677
15.806452
def extract_views_from_urlpatterns(self, urlpatterns, base='', namespace=None): """ Return a list of views from a list of urlpatterns. Each object in the returned list is a three-tuple: (view_func, regex, name) """ views = [] for p in urlpatterns: if isinstance(p, (URLPattern, RegexURLPattern)): try: if not p.name: name = p.name elif namespace: name = '{0}:{1}'.format(namespace, p.name) else: name = p.name pattern = describe_pattern(p) views.append((p.callback, base + pattern, name)) except ViewDoesNotExist: continue elif isinstance(p, (URLResolver, RegexURLResolver)): try: patterns = p.url_patterns except ImportError: continue if namespace and p.namespace: _namespace = '{0}:{1}'.format(namespace, p.namespace) else: _namespace = (p.namespace or namespace) pattern = describe_pattern(p) if isinstance(p, LocaleRegexURLResolver): for language in self.LANGUAGES: with translation.override(language[0]): views.extend(self.extract_views_from_urlpatterns(patterns, base + pattern, namespace=_namespace)) else: views.extend(self.extract_views_from_urlpatterns(patterns, base + pattern, namespace=_namespace)) elif hasattr(p, '_get_callback'): try: views.append((p._get_callback(), base + describe_pattern(p), p.name)) except ViewDoesNotExist: continue elif hasattr(p, 'url_patterns') or hasattr(p, '_get_url_patterns'): try: patterns = p.url_patterns except ImportError: continue views.extend(self.extract_views_from_urlpatterns(patterns, base + describe_pattern(p), namespace=namespace)) else: raise TypeError("%s does not appear to be a urlpattern object" % p) return views
[ "def", "extract_views_from_urlpatterns", "(", "self", ",", "urlpatterns", ",", "base", "=", "''", ",", "namespace", "=", "None", ")", ":", "views", "=", "[", "]", "for", "p", "in", "urlpatterns", ":", "if", "isinstance", "(", "p", ",", "(", "URLPattern", ",", "RegexURLPattern", ")", ")", ":", "try", ":", "if", "not", "p", ".", "name", ":", "name", "=", "p", ".", "name", "elif", "namespace", ":", "name", "=", "'{0}:{1}'", ".", "format", "(", "namespace", ",", "p", ".", "name", ")", "else", ":", "name", "=", "p", ".", "name", "pattern", "=", "describe_pattern", "(", "p", ")", "views", ".", "append", "(", "(", "p", ".", "callback", ",", "base", "+", "pattern", ",", "name", ")", ")", "except", "ViewDoesNotExist", ":", "continue", "elif", "isinstance", "(", "p", ",", "(", "URLResolver", ",", "RegexURLResolver", ")", ")", ":", "try", ":", "patterns", "=", "p", ".", "url_patterns", "except", "ImportError", ":", "continue", "if", "namespace", "and", "p", ".", "namespace", ":", "_namespace", "=", "'{0}:{1}'", ".", "format", "(", "namespace", ",", "p", ".", "namespace", ")", "else", ":", "_namespace", "=", "(", "p", ".", "namespace", "or", "namespace", ")", "pattern", "=", "describe_pattern", "(", "p", ")", "if", "isinstance", "(", "p", ",", "LocaleRegexURLResolver", ")", ":", "for", "language", "in", "self", ".", "LANGUAGES", ":", "with", "translation", ".", "override", "(", "language", "[", "0", "]", ")", ":", "views", ".", "extend", "(", "self", ".", "extract_views_from_urlpatterns", "(", "patterns", ",", "base", "+", "pattern", ",", "namespace", "=", "_namespace", ")", ")", "else", ":", "views", ".", "extend", "(", "self", ".", "extract_views_from_urlpatterns", "(", "patterns", ",", "base", "+", "pattern", ",", "namespace", "=", "_namespace", ")", ")", "elif", "hasattr", "(", "p", ",", "'_get_callback'", ")", ":", "try", ":", "views", ".", "append", "(", "(", "p", ".", "_get_callback", "(", ")", ",", "base", "+", "describe_pattern", "(", "p", ")", ",", "p", ".", "name", ")", ")", "except", "ViewDoesNotExist", ":", "continue", "elif", "hasattr", "(", "p", ",", "'url_patterns'", ")", "or", "hasattr", "(", "p", ",", "'_get_url_patterns'", ")", ":", "try", ":", "patterns", "=", "p", ".", "url_patterns", "except", "ImportError", ":", "continue", "views", ".", "extend", "(", "self", ".", "extract_views_from_urlpatterns", "(", "patterns", ",", "base", "+", "describe_pattern", "(", "p", ")", ",", "namespace", "=", "namespace", ")", ")", "else", ":", "raise", "TypeError", "(", "\"%s does not appear to be a urlpattern object\"", "%", "p", ")", "return", "views" ]
46.6
20.72
def surround_parse(self, node, pre_char, post_char): """Parse the subnodes of a given node. Subnodes with tags in the `ignore` list are ignored. Prepend `pre_char` and append `post_char` to the output in self.pieces.""" self.add_text(pre_char) self.subnode_parse(node) self.add_text(post_char)
[ "def", "surround_parse", "(", "self", ",", "node", ",", "pre_char", ",", "post_char", ")", ":", "self", ".", "add_text", "(", "pre_char", ")", "self", ".", "subnode_parse", "(", "node", ")", "self", ".", "add_text", "(", "post_char", ")" ]
47.857143
10.857143
def decompress(self, value: bytes, max_length: int = 0) -> bytes: """Decompress a chunk, returning newly-available data. Some data may be buffered for later processing; `flush` must be called when there is no more input data to ensure that all data was processed. If ``max_length`` is given, some input data may be left over in ``unconsumed_tail``; you must retrieve this value and pass it back to a future call to `decompress` if it is not empty. """ return self.decompressobj.decompress(value, max_length)
[ "def", "decompress", "(", "self", ",", "value", ":", "bytes", ",", "max_length", ":", "int", "=", "0", ")", "->", "bytes", ":", "return", "self", ".", "decompressobj", ".", "decompress", "(", "value", ",", "max_length", ")" ]
47.5
22.916667
def get_name(self): """Get name based on 4 class attributes Each attribute is substituted by '' if attribute does not exist :return: dependent_host_name/dependent_service_description..host_name/service_description :rtype: str TODO: Clean this function (use format for string) """ return getattr(self, 'dependent_host_name', '') + '/'\ + getattr(self, 'dependent_service_description', '') \ + '..' + getattr(self, 'host_name', '') + '/' \ + getattr(self, 'service_description', '')
[ "def", "get_name", "(", "self", ")", ":", "return", "getattr", "(", "self", ",", "'dependent_host_name'", ",", "''", ")", "+", "'/'", "+", "getattr", "(", "self", ",", "'dependent_service_description'", ",", "''", ")", "+", "'..'", "+", "getattr", "(", "self", ",", "'host_name'", ",", "''", ")", "+", "'/'", "+", "getattr", "(", "self", ",", "'service_description'", ",", "''", ")" ]
46.833333
22.333333
def merge_lines(top, bot, icod="top"): """ Merges two lines (top and bot) in the way that the overlapping make senses. Args: top (str): the top line bot (str): the bottom line icod (top or bot): in case of doubt, which line should have priority? Default: "top". Returns: str: The merge of both lines. """ ret = "" for topc, botc in zip(top, bot): if topc == botc: ret += topc elif topc in '┼╪' and botc == " ": ret += "│" elif topc == " ": ret += botc elif topc in '┬╥' and botc in " ║│" and icod == "top": ret += topc elif topc in '┬' and botc == " " and icod == "bot": ret += '│' elif topc in '╥' and botc == " " and icod == "bot": ret += '║' elif topc in '┬│' and botc == "═": ret += '╪' elif topc in '┬│' and botc == "─": ret += '┼' elif topc in '└┘║│░' and botc == " " and icod == "top": ret += topc elif topc in '─═' and botc == " " and icod == "top": ret += topc elif topc in '─═' and botc == " " and icod == "bot": ret += botc elif topc in "║╥" and botc in "═": ret += "╬" elif topc in "║╥" and botc in "─": ret += "╫" elif topc in '╫╬' and botc in " ": ret += "║" elif topc == '└' and botc == "┌": ret += "├" elif topc == '┘' and botc == "┐": ret += "┤" elif botc in "┐┌" and icod == 'top': ret += "┬" elif topc in "┘└" and botc in "─" and icod == 'top': ret += "┴" else: ret += botc return ret
[ "def", "merge_lines", "(", "top", ",", "bot", ",", "icod", "=", "\"top\"", ")", ":", "ret", "=", "\"\"", "for", "topc", ",", "botc", "in", "zip", "(", "top", ",", "bot", ")", ":", "if", "topc", "==", "botc", ":", "ret", "+=", "topc", "elif", "topc", "in", "'┼╪' and", "bot", " == ", " \"", "", "", "ret", "+=", "\"│\"", "elif", "topc", "==", "\" \"", ":", "ret", "+=", "botc", "elif", "topc", "in", "'┬╥' and", "bot", " in ", " ║", "\" and ico", " ==", "\"top", ":", "", "", "ret", "+=", "topc", "elif", "topc", "in", "'┬' a", "d b", "tc =", " \"", "\" a", "d i", "od =", " \"", "ot\":", "", "ret", "+=", "'│'", "elif", "topc", "in", "'╥' a", "d b", "tc =", " \"", "\" a", "d i", "od =", " \"", "ot\":", "", "ret", "+=", "'║'", "elif", "topc", "in", "'┬│' and", "bot", " == ", "═\"", "", "", "ret", "+=", "'╪'", "elif", "topc", "in", "'┬│' and", "bot", " == ", "─\"", "", "", "ret", "+=", "'┼'", "elif", "topc", "in", "'└┘║│░' and botc ", "= \"", "\" an", " i", "od ", "= \"", "op\":", "", "", "", "ret", "+=", "topc", "elif", "topc", "in", "'─═' and", "bot", " == ", " \"", "and", "ico", " == ", "to", "\":", "", "ret", "+=", "topc", "elif", "topc", "in", "'─═' and", "bot", " == ", " \"", "and", "ico", " == ", "bo", "\":", "", "ret", "+=", "botc", "elif", "topc", "in", "\"║╥\" and", "bot", " in ", "═\"", "", "", "ret", "+=", "\"╬\"", "elif", "topc", "in", "\"║╥\" and", "bot", " in ", "─\"", "", "", "ret", "+=", "\"╫\"", "elif", "topc", "in", "'╫╬' and", "bot", " in ", " \"", "", "", "ret", "+=", "\"║\"", "elif", "topc", "==", "'└' a", "d b", "tc =", " \"", "\":", "", "ret", "+=", "\"├\"", "elif", "topc", "==", "'┘' a", "d b", "tc =", " \"", "\":", "", "ret", "+=", "\"┤\"", "elif", "botc", "in", "\"┐┌\" and", "ico", " == ", "to", "':", "", "ret", "+=", "\"┬\"", "elif", "topc", "in", "\"┘└\" and", "bot", " in ", "─\"", "and i", "od ", "= 't", "p'", "", "", "ret", "+=", "\"┴\"", "else", ":", "ret", "+=", "botc", "return", "ret" ]
37.196078
14.45098
def merge(self, base, head, message=''): """Perform a merge from ``head`` into ``base``. :param str base: (required), where you're merging into :param str head: (required), where you're merging from :param str message: (optional), message to be used for the commit :returns: :class:`RepoCommit <github3.repos.commit.RepoCommit>` """ url = self._build_url('merges', base_url=self._api) data = {'base': base, 'head': head} if message: data['commit_message'] = message json = self._json(self._post(url, data=data), 201) return RepoCommit(json, self) if json else None
[ "def", "merge", "(", "self", ",", "base", ",", "head", ",", "message", "=", "''", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'merges'", ",", "base_url", "=", "self", ".", "_api", ")", "data", "=", "{", "'base'", ":", "base", ",", "'head'", ":", "head", "}", "if", "message", ":", "data", "[", "'commit_message'", "]", "=", "message", "json", "=", "self", ".", "_json", "(", "self", ".", "_post", "(", "url", ",", "data", "=", "data", ")", ",", "201", ")", "return", "RepoCommit", "(", "json", ",", "self", ")", "if", "json", "else", "None" ]
46.571429
16.285714
def get_implicit_depends_on(input_hash, depends_on): ''' Add DNAnexus links to non-closed data objects in input_hash to depends_on ''' q = [] for field in input_hash: possible_dep = get_nonclosed_data_obj_link(input_hash[field]) if possible_dep is not None: depends_on.append(possible_dep) elif isinstance(input_hash[field], list) or isinstance(input_hash[field], dict): q.append(input_hash[field]) while len(q) > 0: thing = q.pop() if isinstance(thing, list): for i in range(len(thing)): possible_dep = get_nonclosed_data_obj_link(thing[i]) if possible_dep is not None: depends_on.append(possible_dep) elif isinstance(thing[i], list) or isinstance(thing[i], dict): q.append(thing[i]) else: for field in thing: possible_dep = get_nonclosed_data_obj_link(thing[field]) if possible_dep is not None: depends_on.append(possible_dep) elif isinstance(thing[field], list) or isinstance(thing[field], dict): q.append(thing[field])
[ "def", "get_implicit_depends_on", "(", "input_hash", ",", "depends_on", ")", ":", "q", "=", "[", "]", "for", "field", "in", "input_hash", ":", "possible_dep", "=", "get_nonclosed_data_obj_link", "(", "input_hash", "[", "field", "]", ")", "if", "possible_dep", "is", "not", "None", ":", "depends_on", ".", "append", "(", "possible_dep", ")", "elif", "isinstance", "(", "input_hash", "[", "field", "]", ",", "list", ")", "or", "isinstance", "(", "input_hash", "[", "field", "]", ",", "dict", ")", ":", "q", ".", "append", "(", "input_hash", "[", "field", "]", ")", "while", "len", "(", "q", ")", ">", "0", ":", "thing", "=", "q", ".", "pop", "(", ")", "if", "isinstance", "(", "thing", ",", "list", ")", ":", "for", "i", "in", "range", "(", "len", "(", "thing", ")", ")", ":", "possible_dep", "=", "get_nonclosed_data_obj_link", "(", "thing", "[", "i", "]", ")", "if", "possible_dep", "is", "not", "None", ":", "depends_on", ".", "append", "(", "possible_dep", ")", "elif", "isinstance", "(", "thing", "[", "i", "]", ",", "list", ")", "or", "isinstance", "(", "thing", "[", "i", "]", ",", "dict", ")", ":", "q", ".", "append", "(", "thing", "[", "i", "]", ")", "else", ":", "for", "field", "in", "thing", ":", "possible_dep", "=", "get_nonclosed_data_obj_link", "(", "thing", "[", "field", "]", ")", "if", "possible_dep", "is", "not", "None", ":", "depends_on", ".", "append", "(", "possible_dep", ")", "elif", "isinstance", "(", "thing", "[", "field", "]", ",", "list", ")", "or", "isinstance", "(", "thing", "[", "field", "]", ",", "dict", ")", ":", "q", ".", "append", "(", "thing", "[", "field", "]", ")" ]
41.103448
19.931034
def _set_is_address_family_v4(self, v, load=False): """ Setter method for is_address_family_v4, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v4 (container) If this variable is read-only (config: false) in the source YANG file, then _set_is_address_family_v4 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_is_address_family_v4() directly. YANG Description: ISIS ipv4 address family """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=is_address_family_v4.is_address_family_v4, is_container='container', presence=False, yang_name="is-address-family-v4", rest_name="is-address-family-v4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-address-family-v4', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """is_address_family_v4 must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=is_address_family_v4.is_address_family_v4, is_container='container', presence=False, yang_name="is-address-family-v4", rest_name="is-address-family-v4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-address-family-v4', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__is_address_family_v4 = t if hasattr(self, '_set'): self._set()
[ "def", "_set_is_address_family_v4", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "is_address_family_v4", ".", "is_address_family_v4", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"is-address-family-v4\"", ",", "rest_name", "=", "\"is-address-family-v4\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'isis-address-family-v4'", ",", "u'cli-suppress-show-path'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-isis-operational'", ",", "defining_module", "=", "'brocade-isis-operational'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "False", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"is_address_family_v4 must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=is_address_family_v4.is_address_family_v4, is_container='container', presence=False, yang_name=\"is-address-family-v4\", rest_name=\"is-address-family-v4\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-address-family-v4', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)\"\"\"", ",", "}", ")", "self", ".", "__is_address_family_v4", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
80.083333
38.875
def search(self): """ Construct the Search object. """ s = Search(doc_type=self.doc_types, using=es.client, index=es.index_name) # don't return any fields, just the metadata s = s.fields([]) # Sort from parameters s = s.sort(*self.sorts) # Paginate from parameters s = s[self.page_start:self.page_end] # Same construction as parent class # Allows to give the same signature as simple search # ie. Response(data) instead of Response(search, data) return s.response_class(partial(SearchResult, self))
[ "def", "search", "(", "self", ")", ":", "s", "=", "Search", "(", "doc_type", "=", "self", ".", "doc_types", ",", "using", "=", "es", ".", "client", ",", "index", "=", "es", ".", "index_name", ")", "# don't return any fields, just the metadata", "s", "=", "s", ".", "fields", "(", "[", "]", ")", "# Sort from parameters", "s", "=", "s", ".", "sort", "(", "*", "self", ".", "sorts", ")", "# Paginate from parameters", "s", "=", "s", "[", "self", ".", "page_start", ":", "self", ".", "page_end", "]", "# Same construction as parent class", "# Allows to give the same signature as simple search", "# ie. Response(data) instead of Response(search, data)", "return", "s", ".", "response_class", "(", "partial", "(", "SearchResult", ",", "self", ")", ")" ]
38.375
10.625
def convert_compartment_entry(self, compartment, adjacencies): """Convert compartment entry to YAML dict. Args: compartment: :class:`psamm.datasource.entry.CompartmentEntry`. adjacencies: Sequence of IDs or a single ID of adjacent compartments (or None). """ d = OrderedDict() d['id'] = compartment.id if adjacencies is not None: d['adjacent_to'] = adjacencies order = {key: i for i, key in enumerate(['name'])} prop_keys = set(compartment.properties) for prop in sorted(prop_keys, key=lambda x: (order.get(x, 1000), x)): if compartment.properties[prop] is not None: d[prop] = compartment.properties[prop] return d
[ "def", "convert_compartment_entry", "(", "self", ",", "compartment", ",", "adjacencies", ")", ":", "d", "=", "OrderedDict", "(", ")", "d", "[", "'id'", "]", "=", "compartment", ".", "id", "if", "adjacencies", "is", "not", "None", ":", "d", "[", "'adjacent_to'", "]", "=", "adjacencies", "order", "=", "{", "key", ":", "i", "for", "i", ",", "key", "in", "enumerate", "(", "[", "'name'", "]", ")", "}", "prop_keys", "=", "set", "(", "compartment", ".", "properties", ")", "for", "prop", "in", "sorted", "(", "prop_keys", ",", "key", "=", "lambda", "x", ":", "(", "order", ".", "get", "(", "x", ",", "1000", ")", ",", "x", ")", ")", ":", "if", "compartment", ".", "properties", "[", "prop", "]", "is", "not", "None", ":", "d", "[", "prop", "]", "=", "compartment", ".", "properties", "[", "prop", "]", "return", "d" ]
37.333333
17.571429
def create_rectangular_prism(origin, size): ''' Return a Mesh which is an axis-aligned rectangular prism. One vertex is `origin`; the diametrically opposite vertex is `origin + size`. size: 3x1 array. ''' from lace.topology import quads_to_tris lower_base_plane = np.array([ # Lower base plane origin, origin + np.array([size[0], 0, 0]), origin + np.array([size[0], 0, size[2]]), origin + np.array([0, 0, size[2]]), ]) upper_base_plane = lower_base_plane + np.array([0, size[1], 0]) vertices = np.vstack([lower_base_plane, upper_base_plane]) faces = quads_to_tris(np.array([ [0, 1, 2, 3], # lower base (-y) [7, 6, 5, 4], # upper base (+y) [4, 5, 1, 0], # +z face [5, 6, 2, 1], # +x face [6, 7, 3, 2], # -z face [3, 7, 4, 0], # -x face ])) return Mesh(v=vertices, f=faces)
[ "def", "create_rectangular_prism", "(", "origin", ",", "size", ")", ":", "from", "lace", ".", "topology", "import", "quads_to_tris", "lower_base_plane", "=", "np", ".", "array", "(", "[", "# Lower base plane", "origin", ",", "origin", "+", "np", ".", "array", "(", "[", "size", "[", "0", "]", ",", "0", ",", "0", "]", ")", ",", "origin", "+", "np", ".", "array", "(", "[", "size", "[", "0", "]", ",", "0", ",", "size", "[", "2", "]", "]", ")", ",", "origin", "+", "np", ".", "array", "(", "[", "0", ",", "0", ",", "size", "[", "2", "]", "]", ")", ",", "]", ")", "upper_base_plane", "=", "lower_base_plane", "+", "np", ".", "array", "(", "[", "0", ",", "size", "[", "1", "]", ",", "0", "]", ")", "vertices", "=", "np", ".", "vstack", "(", "[", "lower_base_plane", ",", "upper_base_plane", "]", ")", "faces", "=", "quads_to_tris", "(", "np", ".", "array", "(", "[", "[", "0", ",", "1", ",", "2", ",", "3", "]", ",", "# lower base (-y)", "[", "7", ",", "6", ",", "5", ",", "4", "]", ",", "# upper base (+y)", "[", "4", ",", "5", ",", "1", ",", "0", "]", ",", "# +z face", "[", "5", ",", "6", ",", "2", ",", "1", "]", ",", "# +x face", "[", "6", ",", "7", ",", "3", ",", "2", "]", ",", "# -z face", "[", "3", ",", "7", ",", "4", ",", "0", "]", ",", "# -x face", "]", ")", ")", "return", "Mesh", "(", "v", "=", "vertices", ",", "f", "=", "faces", ")" ]
28.806452
19.709677
def visitInlineShapeAtomNodeConstraint(self, ctx: ShExDocParser.InlineShapeAtomNodeConstraintContext): """ inlineShapeAtomNodeConstraint: nodeConstraint inlineShapeOrRef? # inlineShapeAtomShapeOrRef """ nc = ShexNodeExpressionParser(self.context, self.label) nc.visit(ctx.nodeConstraint()) if ctx.inlineShapeOrRef(): self.expr = ShapeAnd(shapeExprs=[nc.nodeconstraint]) sorref_parser = ShexShapeExpressionParser(self.context) sorref_parser.visit(ctx.inlineShapeOrRef()) # if isinstance(sorref_parser.expr, Shape) and self.context.is_empty_shape(sorref_parser.expr): # self.expr = nc.nodeconstraint # else: self.expr.shapeExprs.append(sorref_parser.expr) else: self.expr = nc.nodeconstraint
[ "def", "visitInlineShapeAtomNodeConstraint", "(", "self", ",", "ctx", ":", "ShExDocParser", ".", "InlineShapeAtomNodeConstraintContext", ")", ":", "nc", "=", "ShexNodeExpressionParser", "(", "self", ".", "context", ",", "self", ".", "label", ")", "nc", ".", "visit", "(", "ctx", ".", "nodeConstraint", "(", ")", ")", "if", "ctx", ".", "inlineShapeOrRef", "(", ")", ":", "self", ".", "expr", "=", "ShapeAnd", "(", "shapeExprs", "=", "[", "nc", ".", "nodeconstraint", "]", ")", "sorref_parser", "=", "ShexShapeExpressionParser", "(", "self", ".", "context", ")", "sorref_parser", ".", "visit", "(", "ctx", ".", "inlineShapeOrRef", "(", ")", ")", "# if isinstance(sorref_parser.expr, Shape) and self.context.is_empty_shape(sorref_parser.expr):", "# self.expr = nc.nodeconstraint", "# else:", "self", ".", "expr", ".", "shapeExprs", ".", "append", "(", "sorref_parser", ".", "expr", ")", "else", ":", "self", ".", "expr", "=", "nc", ".", "nodeconstraint" ]
58.428571
21.5
def _set_new_object(self, new_obj, inherited_obj, new_class, superclass, qualifier_repo, propagated, type_str): """ Set the object attributes for a single object and resolve the qualifiers. This sets attributes for Properties, Methods, and Parameters. """ assert isinstance(new_obj, (CIMMethod, CIMProperty, CIMParameter)) if inherited_obj: inherited_obj_qual = inherited_obj.qualifiers else: inherited_obj_qual = None if propagated: assert superclass is not None new_obj.propagated = propagated if propagated: assert inherited_obj is not None new_obj.class_origin = inherited_obj.class_origin else: assert inherited_obj is None new_obj.class_origin = new_class.classname self._resolve_qualifiers(new_obj.qualifiers, inherited_obj_qual, new_class, superclass, new_obj.name, type_str, qualifier_repo, propagate=propagated)
[ "def", "_set_new_object", "(", "self", ",", "new_obj", ",", "inherited_obj", ",", "new_class", ",", "superclass", ",", "qualifier_repo", ",", "propagated", ",", "type_str", ")", ":", "assert", "isinstance", "(", "new_obj", ",", "(", "CIMMethod", ",", "CIMProperty", ",", "CIMParameter", ")", ")", "if", "inherited_obj", ":", "inherited_obj_qual", "=", "inherited_obj", ".", "qualifiers", "else", ":", "inherited_obj_qual", "=", "None", "if", "propagated", ":", "assert", "superclass", "is", "not", "None", "new_obj", ".", "propagated", "=", "propagated", "if", "propagated", ":", "assert", "inherited_obj", "is", "not", "None", "new_obj", ".", "class_origin", "=", "inherited_obj", ".", "class_origin", "else", ":", "assert", "inherited_obj", "is", "None", "new_obj", ".", "class_origin", "=", "new_class", ".", "classname", "self", ".", "_resolve_qualifiers", "(", "new_obj", ".", "qualifiers", ",", "inherited_obj_qual", ",", "new_class", ",", "superclass", ",", "new_obj", ".", "name", ",", "type_str", ",", "qualifier_repo", ",", "propagate", "=", "propagated", ")" ]
40.133333
16.066667
def fpy_interface(fpy, static, interface, typedict): """Splices the full list of subroutines and the module procedure list into the static.f90 file. :arg static: the string contents of the static.f90 file. :arg interface: the name of the interface *field* being replaced. :arg typedict: the dictionary of dtypes and their kind and suffix combos. """ modprocs = [] subtext = [] for dtype, combos in list(typedict.items()): for tcombo in combos: kind, suffix = tcombo xnames, sub = fpy_interface_sub(fpy, dtype, kind, suffix) modprocs.extend(xnames) subtext.append(sub) subtext.append("\n") #Next, chunk the names of the module procedures into blocks of five #so that they display nicely for human readability. from fortpy.printing.formatting import present_params splice = static.replace(interface, present_params(modprocs, 21)) return splice.replace(interface.replace("py", "xpy"), ''.join(subtext))
[ "def", "fpy_interface", "(", "fpy", ",", "static", ",", "interface", ",", "typedict", ")", ":", "modprocs", "=", "[", "]", "subtext", "=", "[", "]", "for", "dtype", ",", "combos", "in", "list", "(", "typedict", ".", "items", "(", ")", ")", ":", "for", "tcombo", "in", "combos", ":", "kind", ",", "suffix", "=", "tcombo", "xnames", ",", "sub", "=", "fpy_interface_sub", "(", "fpy", ",", "dtype", ",", "kind", ",", "suffix", ")", "modprocs", ".", "extend", "(", "xnames", ")", "subtext", ".", "append", "(", "sub", ")", "subtext", ".", "append", "(", "\"\\n\"", ")", "#Next, chunk the names of the module procedures into blocks of five", "#so that they display nicely for human readability.", "from", "fortpy", ".", "printing", ".", "formatting", "import", "present_params", "splice", "=", "static", ".", "replace", "(", "interface", ",", "present_params", "(", "modprocs", ",", "21", ")", ")", "return", "splice", ".", "replace", "(", "interface", ".", "replace", "(", "\"py\"", ",", "\"xpy\"", ")", ",", "''", ".", "join", "(", "subtext", ")", ")" ]
43.478261
19.304348
def system(*args, **kwargs): """Execute the given bash command""" kwargs.setdefault('stdout', subprocess.PIPE) proc = subprocess.Popen(args, **kwargs) out, _ = proc.communicate() if proc.returncode: raise SystemExit(proc.returncode) return out.decode('utf-8')
[ "def", "system", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'stdout'", ",", "subprocess", ".", "PIPE", ")", "proc", "=", "subprocess", ".", "Popen", "(", "args", ",", "*", "*", "kwargs", ")", "out", ",", "_", "=", "proc", ".", "communicate", "(", ")", "if", "proc", ".", "returncode", ":", "raise", "SystemExit", "(", "proc", ".", "returncode", ")", "return", "out", ".", "decode", "(", "'utf-8'", ")" ]
35.5
7.5
def check_data(cls, name, dims, is_unstructured): """ A validation method for the data shape Parameters ---------- name: str or list of str The variable names (one variable per array) dims: list with length 1 or list of lists with length 1 The dimension of the arrays. Only 1D-Arrays are allowed is_unstructured: bool or list of bool True if the corresponding array is unstructured. Returns ------- %(Plotter.check_data.returns)s """ if isinstance(name, six.string_types) or not is_iterable(name): name = [name] dims = [dims] is_unstructured = [is_unstructured] N = len(name) if N != 1: return [False] * N, [ 'Number of provided names (%i) must equal 1!' % (N)] * N elif len(dims) != 1: return [False], [ 'Number of provided dimension lists (%i) must equal 1!' % ( len(dims))] elif len(is_unstructured) != 1: return [False], [ ('Number of provided unstructured information (%i) must ' 'equal 1!') % (len(is_unstructured))] if name[0] != 0 and not name[0]: return [False], ['At least one variable name must be provided!'] # unstructured arrays have only 1 dimension dimlen = cls.allowed_dims if is_unstructured[0]: dimlen -= 1 # Check that the array is two-dimensional # # if more than one array name is provided, the dimensions should be # one les than dimlen to have a 2D array if (not isstring(name[0]) and not is_iterable(name[0]) and len(name[0]) != 1 and len(dims[0]) != dimlen - 1): return [False], ['Only one name is allowed per array!'] # otherwise the number of dimensions must equal dimlen if len(dims[0]) != dimlen: return [False], [ 'An array with dimension %i is required, not %i' % ( dimlen, len(dims[0]))] return [True], ['']
[ "def", "check_data", "(", "cls", ",", "name", ",", "dims", ",", "is_unstructured", ")", ":", "if", "isinstance", "(", "name", ",", "six", ".", "string_types", ")", "or", "not", "is_iterable", "(", "name", ")", ":", "name", "=", "[", "name", "]", "dims", "=", "[", "dims", "]", "is_unstructured", "=", "[", "is_unstructured", "]", "N", "=", "len", "(", "name", ")", "if", "N", "!=", "1", ":", "return", "[", "False", "]", "*", "N", ",", "[", "'Number of provided names (%i) must equal 1!'", "%", "(", "N", ")", "]", "*", "N", "elif", "len", "(", "dims", ")", "!=", "1", ":", "return", "[", "False", "]", ",", "[", "'Number of provided dimension lists (%i) must equal 1!'", "%", "(", "len", "(", "dims", ")", ")", "]", "elif", "len", "(", "is_unstructured", ")", "!=", "1", ":", "return", "[", "False", "]", ",", "[", "(", "'Number of provided unstructured information (%i) must '", "'equal 1!'", ")", "%", "(", "len", "(", "is_unstructured", ")", ")", "]", "if", "name", "[", "0", "]", "!=", "0", "and", "not", "name", "[", "0", "]", ":", "return", "[", "False", "]", ",", "[", "'At least one variable name must be provided!'", "]", "# unstructured arrays have only 1 dimension", "dimlen", "=", "cls", ".", "allowed_dims", "if", "is_unstructured", "[", "0", "]", ":", "dimlen", "-=", "1", "# Check that the array is two-dimensional", "#", "# if more than one array name is provided, the dimensions should be", "# one les than dimlen to have a 2D array", "if", "(", "not", "isstring", "(", "name", "[", "0", "]", ")", "and", "not", "is_iterable", "(", "name", "[", "0", "]", ")", "and", "len", "(", "name", "[", "0", "]", ")", "!=", "1", "and", "len", "(", "dims", "[", "0", "]", ")", "!=", "dimlen", "-", "1", ")", ":", "return", "[", "False", "]", ",", "[", "'Only one name is allowed per array!'", "]", "# otherwise the number of dimensions must equal dimlen", "if", "len", "(", "dims", "[", "0", "]", ")", "!=", "dimlen", ":", "return", "[", "False", "]", ",", "[", "'An array with dimension %i is required, not %i'", "%", "(", "dimlen", ",", "len", "(", "dims", "[", "0", "]", ")", ")", "]", "return", "[", "True", "]", ",", "[", "''", "]" ]
40.538462
17.076923
def _rate_limit_status(self, api=None, mode=None): """ Verifying the API limits """ if api == None: api = self.connectToAPI() if mode == None: print json.dumps(api.rate_limit_status(), indent=2) raw_input("<Press ENTER>") else: # Testing if we have enough queries while True: allLimits = api.rate_limit_status() if mode == "get_user": limit = allLimits["resources"]["users"]["/users/show/:id"]["limit"] remaining = allLimits["resources"]["users"]["/users/show/:id"]["remaining"] reset = allLimits["resources"]["users"]["/users/show/:id"]["reset"] elif mode == "get_followers": limit = allLimits["resources"]["followers"]["/followers/ids"]["limit"] remaining = allLimits["resources"]["followers"]["/followers/ids"]["remaining"] reset = allLimits["resources"]["followers"]["/followers/ids"]["reset"] elif mode == "get_friends": limit = allLimits["resources"]["friends"]["/friends/ids"]["limit"] remaining = allLimits["resources"]["friends"]["/friends/ids"]["remaining"] reset = allLimits["resources"]["friends"]["/friends/ids"]["reset"] elif mode == "search_users": limit = allLimits["resources"]["users"]["/users/search"]["limit"] remaining = allLimits["resources"]["users"]["/users/search"]["remaining"] reset = allLimits["resources"]["users"]["/users/search"]["reset"] else: remaining = 1 """elif mode == "get_all_docs": limit = allLimits["resources"]REPLACEME["limit"] remaining = allLimits["resources"]REPLACEME["remaining"] reset = allLimits["resources"]REPLACEME["reset"]""" """elif mode == "get_users": limit = allLimits["resources"]REPLACEME["limit"] remaining = allLimits["resources"]REPLACEME["remaining"] reset = allLimits["resources"]REPLACEME["reset"] """ """else: remaining = 1""" # Checking if we have enough remaining queries if remaining > 0: #raw_input(str(remaining) + " queries yet...") break else: waitTime = 60 print "No more queries remaining, sleeping for " + str(waitTime) +" seconds..." time.sleep(waitTime) return 0
[ "def", "_rate_limit_status", "(", "self", ",", "api", "=", "None", ",", "mode", "=", "None", ")", ":", "if", "api", "==", "None", ":", "api", "=", "self", ".", "connectToAPI", "(", ")", "if", "mode", "==", "None", ":", "print", "json", ".", "dumps", "(", "api", ".", "rate_limit_status", "(", ")", ",", "indent", "=", "2", ")", "raw_input", "(", "\"<Press ENTER>\"", ")", "else", ":", "# Testing if we have enough queries", "while", "True", ":", "allLimits", "=", "api", ".", "rate_limit_status", "(", ")", "if", "mode", "==", "\"get_user\"", ":", "limit", "=", "allLimits", "[", "\"resources\"", "]", "[", "\"users\"", "]", "[", "\"/users/show/:id\"", "]", "[", "\"limit\"", "]", "remaining", "=", "allLimits", "[", "\"resources\"", "]", "[", "\"users\"", "]", "[", "\"/users/show/:id\"", "]", "[", "\"remaining\"", "]", "reset", "=", "allLimits", "[", "\"resources\"", "]", "[", "\"users\"", "]", "[", "\"/users/show/:id\"", "]", "[", "\"reset\"", "]", "elif", "mode", "==", "\"get_followers\"", ":", "limit", "=", "allLimits", "[", "\"resources\"", "]", "[", "\"followers\"", "]", "[", "\"/followers/ids\"", "]", "[", "\"limit\"", "]", "remaining", "=", "allLimits", "[", "\"resources\"", "]", "[", "\"followers\"", "]", "[", "\"/followers/ids\"", "]", "[", "\"remaining\"", "]", "reset", "=", "allLimits", "[", "\"resources\"", "]", "[", "\"followers\"", "]", "[", "\"/followers/ids\"", "]", "[", "\"reset\"", "]", "elif", "mode", "==", "\"get_friends\"", ":", "limit", "=", "allLimits", "[", "\"resources\"", "]", "[", "\"friends\"", "]", "[", "\"/friends/ids\"", "]", "[", "\"limit\"", "]", "remaining", "=", "allLimits", "[", "\"resources\"", "]", "[", "\"friends\"", "]", "[", "\"/friends/ids\"", "]", "[", "\"remaining\"", "]", "reset", "=", "allLimits", "[", "\"resources\"", "]", "[", "\"friends\"", "]", "[", "\"/friends/ids\"", "]", "[", "\"reset\"", "]", "elif", "mode", "==", "\"search_users\"", ":", "limit", "=", "allLimits", "[", "\"resources\"", "]", "[", "\"users\"", "]", "[", "\"/users/search\"", "]", "[", "\"limit\"", "]", "remaining", "=", "allLimits", "[", "\"resources\"", "]", "[", "\"users\"", "]", "[", "\"/users/search\"", "]", "[", "\"remaining\"", "]", "reset", "=", "allLimits", "[", "\"resources\"", "]", "[", "\"users\"", "]", "[", "\"/users/search\"", "]", "[", "\"reset\"", "]", "else", ":", "remaining", "=", "1", "\"\"\"elif mode == \"get_all_docs\":\n limit = allLimits[\"resources\"]REPLACEME[\"limit\"]\n remaining = allLimits[\"resources\"]REPLACEME[\"remaining\"]\n reset = allLimits[\"resources\"]REPLACEME[\"reset\"]\"\"\"", "\"\"\"elif mode == \"get_users\":\n limit = allLimits[\"resources\"]REPLACEME[\"limit\"]\n remaining = allLimits[\"resources\"]REPLACEME[\"remaining\"]\n reset = allLimits[\"resources\"]REPLACEME[\"reset\"] \"\"\"", "\"\"\"else:\n remaining = 1\"\"\"", "# Checking if we have enough remaining queries", "if", "remaining", ">", "0", ":", "#raw_input(str(remaining) + \" queries yet...\")", "break", "else", ":", "waitTime", "=", "60", "print", "\"No more queries remaining, sleeping for \"", "+", "str", "(", "waitTime", ")", "+", "\" seconds...\"", "time", ".", "sleep", "(", "waitTime", ")", "return", "0" ]
52
22.307692
def update_config( self, filename="MAGTUNE_PYMAGICC.CFG", top_level_key="nml_allcfgs", **kwargs ): """Updates a configuration file for MAGICC Updates the contents of a fortran namelist in the run directory, creating a new namelist if none exists. Parameters ---------- filename : str Name of configuration file to write top_level_key : str Name of namelist to be written in the configuration file kwargs Other parameters to pass to the configuration file. No validation on the parameters is performed. Returns ------- dict The contents of the namelist which was written to file """ kwargs = self._format_config(kwargs) fname = join(self.run_dir, filename) if exists(fname): conf = f90nml.read(fname) else: conf = {top_level_key: {}} conf[top_level_key].update(kwargs) f90nml.write(conf, fname, force=True) return conf
[ "def", "update_config", "(", "self", ",", "filename", "=", "\"MAGTUNE_PYMAGICC.CFG\"", ",", "top_level_key", "=", "\"nml_allcfgs\"", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "self", ".", "_format_config", "(", "kwargs", ")", "fname", "=", "join", "(", "self", ".", "run_dir", ",", "filename", ")", "if", "exists", "(", "fname", ")", ":", "conf", "=", "f90nml", ".", "read", "(", "fname", ")", "else", ":", "conf", "=", "{", "top_level_key", ":", "{", "}", "}", "conf", "[", "top_level_key", "]", ".", "update", "(", "kwargs", ")", "f90nml", ".", "write", "(", "conf", ",", "fname", ",", "force", "=", "True", ")", "return", "conf" ]
27.578947
21.394737
def reload_core(host=None, core_name=None): ''' MULTI-CORE HOSTS ONLY Load a new core from the same configuration as an existing registered core. While the "new" core is initializing, the "old" one will continue to accept requests. Once it has finished, all new request will go to the "new" core, and the "old" core will be unloaded. host : str (None) The solr host to query. __opts__['host'] is default. core_name : str The name of the core to reload Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.reload_core None music Return data is in the following format:: {'success':bool, 'data':dict, 'errors':list, 'warnings':list} ''' ret = _get_return_dict() if not _check_for_cores(): err = ['solr.reload_core can only be called by "multi-core" minions'] return ret.update({'success': False, 'errors': err}) if _get_none_or_value(core_name) is None and _check_for_cores(): success = True for name in __opts__['solr.cores']: resp = reload_core(host, name) if not resp['success']: success = False data = {name: {'data': resp['data']}} ret = _update_return_dict(ret, success, data, resp['errors'], resp['warnings']) return ret extra = ['action=RELOAD', 'core={0}'.format(core_name)] url = _format_url('admin/cores', host=host, core_name=None, extra=extra) return _http_request(url)
[ "def", "reload_core", "(", "host", "=", "None", ",", "core_name", "=", "None", ")", ":", "ret", "=", "_get_return_dict", "(", ")", "if", "not", "_check_for_cores", "(", ")", ":", "err", "=", "[", "'solr.reload_core can only be called by \"multi-core\" minions'", "]", "return", "ret", ".", "update", "(", "{", "'success'", ":", "False", ",", "'errors'", ":", "err", "}", ")", "if", "_get_none_or_value", "(", "core_name", ")", "is", "None", "and", "_check_for_cores", "(", ")", ":", "success", "=", "True", "for", "name", "in", "__opts__", "[", "'solr.cores'", "]", ":", "resp", "=", "reload_core", "(", "host", ",", "name", ")", "if", "not", "resp", "[", "'success'", "]", ":", "success", "=", "False", "data", "=", "{", "name", ":", "{", "'data'", ":", "resp", "[", "'data'", "]", "}", "}", "ret", "=", "_update_return_dict", "(", "ret", ",", "success", ",", "data", ",", "resp", "[", "'errors'", "]", ",", "resp", "[", "'warnings'", "]", ")", "return", "ret", "extra", "=", "[", "'action=RELOAD'", ",", "'core={0}'", ".", "format", "(", "core_name", ")", "]", "url", "=", "_format_url", "(", "'admin/cores'", ",", "host", "=", "host", ",", "core_name", "=", "None", ",", "extra", "=", "extra", ")", "return", "_http_request", "(", "url", ")" ]
34.888889
22.533333
def call_workflow_event(instance, event, after=True): """Calls the instance's workflow event """ if not event.transition: return False portal_type = instance.portal_type wf_module = _load_wf_module('{}.events'.format(portal_type.lower())) if not wf_module: return False # Inspect if event_<transition_id> function exists in the module prefix = after and "after" or "before" func_name = "{}_{}".format(prefix, event.transition.id) func = getattr(wf_module, func_name, False) if not func: return False logger.info('WF event: {0}.events.{1}' .format(portal_type.lower(), func_name)) func(instance) return True
[ "def", "call_workflow_event", "(", "instance", ",", "event", ",", "after", "=", "True", ")", ":", "if", "not", "event", ".", "transition", ":", "return", "False", "portal_type", "=", "instance", ".", "portal_type", "wf_module", "=", "_load_wf_module", "(", "'{}.events'", ".", "format", "(", "portal_type", ".", "lower", "(", ")", ")", ")", "if", "not", "wf_module", ":", "return", "False", "# Inspect if event_<transition_id> function exists in the module", "prefix", "=", "after", "and", "\"after\"", "or", "\"before\"", "func_name", "=", "\"{}_{}\"", ".", "format", "(", "prefix", ",", "event", ".", "transition", ".", "id", ")", "func", "=", "getattr", "(", "wf_module", ",", "func_name", ",", "False", ")", "if", "not", "func", ":", "return", "False", "logger", ".", "info", "(", "'WF event: {0}.events.{1}'", ".", "format", "(", "portal_type", ".", "lower", "(", ")", ",", "func_name", ")", ")", "func", "(", "instance", ")", "return", "True" ]
31.090909
18.318182