text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def count(self, sub, start=0, end=-1): """Return the number of non-overlapping occurrences of substring sub in string[start:end]. Optional arguments start and end are interpreted as in slice notation. :param str sub: Substring to search. :param int start: Beginning position. :param int end: Stop comparison at this position. """ return self.value_no_colors.count(sub, start, end)
[ "def", "count", "(", "self", ",", "sub", ",", "start", "=", "0", ",", "end", "=", "-", "1", ")", ":", "return", "self", ".", "value_no_colors", ".", "count", "(", "sub", ",", "start", ",", "end", ")" ]
42.9
16.4
def wait_until_element_present(self, element, timeout=None): """Search element and wait until it is found :param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found :param timeout: max time to wait :returns: the web element if it is present :rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement :raises TimeoutException: If the element is not found after the timeout """ return self._wait_until(self._expected_condition_find_element, element, timeout)
[ "def", "wait_until_element_present", "(", "self", ",", "element", ",", "timeout", "=", "None", ")", ":", "return", "self", ".", "_wait_until", "(", "self", ".", "_expected_condition_find_element", ",", "element", ",", "timeout", ")" ]
59.2
28.9
def group_survival_table_from_events( groups, durations, event_observed, birth_times=None, limit=-1 ): # pylint: disable=too-many-locals """ Joins multiple event series together into DataFrames. A generalization of `survival_table_from_events` to data with groups. Previously called `group_event_series` pre 0.2.3. Parameters ---------- groups: a (n,) array individuals' group ids. durations: a (n,) array durations of each individual event_observed: a (n,) array event observations, 1 if observed, 0 else. birth_times: a (n,) array when the subject was first observed. A subject's death event is then at [birth times + duration observed]. Normally set to all zeros, but can be positive or negative. limit: Returns ------- unique_groups: np.array array of all the unique groups present removed: DataFrame DataFrame of removal count data at event_times for each group, column names are 'removed:<group name>' observed: DataFrame DataFrame of observed count data at event_times for each group, column names are 'observed:<group name>' censored: DataFrame DataFrame of censored count data at event_times for each group, column names are 'censored:<group name>' Example ------- >>> #input >>> group_survival_table_from_events(waltonG, waltonT, np.ones_like(waltonT)) #data available in test_suite.py >>> #output >>> [ >>> array(['control', 'miR-137'], dtype=object), >>> removed:control removed:miR-137 >>> event_at >>> 6 0 1 >>> 7 2 0 >>> 9 0 3 >>> 13 0 3 >>> 15 0 2 >>> , >>> observed:control observed:miR-137 >>> event_at >>> 6 0 1 >>> 7 2 0 >>> 9 0 3 >>> 13 0 3 >>> 15 0 2 >>> , >>> censored:control censored:miR-137 >>> event_at >>> 6 0 0 >>> 7 0 0 >>> 9 0 0 >>> , >>> ] See Also -------- survival_table_from_events """ n = np.max(groups.shape) assert n == np.max(durations.shape) == np.max(event_observed.shape), "inputs must be of the same length." if birth_times is None: # Create some birth times birth_times = np.zeros(np.max(durations.shape)) birth_times[:] = np.min(durations) assert n == np.max(birth_times.shape), "inputs must be of the same length." groups, durations, event_observed, birth_times = [ pd.Series(np.asarray(vector).reshape(n)) for vector in [groups, durations, event_observed, birth_times] ] unique_groups = groups.unique() for i, group in enumerate(unique_groups): ix = groups == group T = durations[ix] C = event_observed[ix] B = birth_times[ix] group_name = str(group) columns = [ event_name + ":" + group_name for event_name in ["removed", "observed", "censored", "entrance", "at_risk"] ] if i == 0: survival_table = survival_table_from_events(T, C, B, columns=columns) else: survival_table = survival_table.join(survival_table_from_events(T, C, B, columns=columns), how="outer") survival_table = survival_table.fillna(0) # hmmm pandas its too bad I can't do data.loc[:limit] and leave out the if. if int(limit) != -1: survival_table = survival_table.loc[:limit] return ( unique_groups, survival_table.filter(like="removed:"), survival_table.filter(like="observed:"), survival_table.filter(like="censored:"), )
[ "def", "group_survival_table_from_events", "(", "groups", ",", "durations", ",", "event_observed", ",", "birth_times", "=", "None", ",", "limit", "=", "-", "1", ")", ":", "# pylint: disable=too-many-locals", "n", "=", "np", ".", "max", "(", "groups", ".", "sha...
37.546296
25.231481
def get_instance(self, payload): """ Build an instance of CredentialListInstance :param dict payload: Payload response from the API :returns: twilio.rest.trunking.v1.trunk.credential_list.CredentialListInstance :rtype: twilio.rest.trunking.v1.trunk.credential_list.CredentialListInstance """ return CredentialListInstance(self._version, payload, trunk_sid=self._solution['trunk_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "CredentialListInstance", "(", "self", ".", "_version", ",", "payload", ",", "trunk_sid", "=", "self", ".", "_solution", "[", "'trunk_sid'", "]", ",", ")" ]
43.5
26.9
def create(cls, config_file=None): """ Return the default configuration. """ if cls.instance is None: cls.instance = cls(config_file) # Load config file, possibly overwriting the defaults cls.instance.load_ini() if config_file and config_file != cls.instance.config_file: raise RuntimeError("Configuration initialized a second time with a different file!") return cls.instance
[ "def", "create", "(", "cls", ",", "config_file", "=", "None", ")", ":", "if", "cls", ".", "instance", "is", "None", ":", "cls", ".", "instance", "=", "cls", "(", "config_file", ")", "# Load config file, possibly overwriting the defaults", "cls", ".", "instance...
35
20.230769
def updateAllKeys(self): """Update times for all keys in the layout.""" for kf, key in zip(self.kf_list, self.sorted_key_list()): kf.update(key, self.dct[key])
[ "def", "updateAllKeys", "(", "self", ")", ":", "for", "kf", ",", "key", "in", "zip", "(", "self", ".", "kf_list", ",", "self", ".", "sorted_key_list", "(", ")", ")", ":", "kf", ".", "update", "(", "key", ",", "self", ".", "dct", "[", "key", "]", ...
46
10.5
def convert_outlook_msg(msg_bytes): """ Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to standard RFC 822 format Args: msg_bytes (bytes): the content of the .msg file Returns: A RFC 822 string """ if not is_outlook_msg(msg_bytes): raise ValueError("The supplied bytes are not an Outlook MSG file") orig_dir = os.getcwd() tmp_dir = tempfile.mkdtemp() os.chdir(tmp_dir) with open("sample.msg", "wb") as msg_file: msg_file.write(msg_bytes) try: subprocess.check_call(["msgconvert", "sample.msg"], stdout=null_file, stderr=null_file) eml_path = "sample.eml" with open(eml_path, "rb") as eml_file: rfc822 = eml_file.read() except FileNotFoundError: raise EmailParserError( "Failed to convert Outlook MSG: msgconvert utility not found") finally: os.chdir(orig_dir) shutil.rmtree(tmp_dir) return rfc822
[ "def", "convert_outlook_msg", "(", "msg_bytes", ")", ":", "if", "not", "is_outlook_msg", "(", "msg_bytes", ")", ":", "raise", "ValueError", "(", "\"The supplied bytes are not an Outlook MSG file\"", ")", "orig_dir", "=", "os", ".", "getcwd", "(", ")", "tmp_dir", "...
30.6875
18
def readdir(self, req, ino, size, off, fi): """Read directory Valid replies: reply_readdir reply_err """ if ino == 1: attr = {'st_ino': 1, 'st_mode': S_IFDIR} entries = [('.', attr), ('..', attr)] self.reply_readdir(req, size, off, entries) else: self.reply_err(req, errno.ENOENT)
[ "def", "readdir", "(", "self", ",", "req", ",", "ino", ",", "size", ",", "off", ",", "fi", ")", ":", "if", "ino", "==", "1", ":", "attr", "=", "{", "'st_ino'", ":", "1", ",", "'st_mode'", ":", "S_IFDIR", "}", "entries", "=", "[", "(", "'.'", ...
29.307692
14.076923
def bound_perturbed_gmres(pseudo, p, epsilon, deltas): '''Compute GMRES perturbation bound based on pseudospectrum Computes the GMRES bound from [SifEM13]_. ''' if not numpy.all(numpy.array(deltas) > epsilon): raise ArgumentError('all deltas have to be greater than epsilon') bound = [] for delta in deltas: # get boundary paths paths = pseudo.contour_paths(delta) # get vertices on boundary vertices = paths.vertices() # evaluate polynomial supremum = numpy.max(numpy.abs(p(vertices))) # compute bound bound.append(epsilon/(delta-epsilon) * paths.length()/(2*numpy.pi*delta) * supremum) return bound
[ "def", "bound_perturbed_gmres", "(", "pseudo", ",", "p", ",", "epsilon", ",", "deltas", ")", ":", "if", "not", "numpy", ".", "all", "(", "numpy", ".", "array", "(", "deltas", ")", ">", "epsilon", ")", ":", "raise", "ArgumentError", "(", "'all deltas have...
30.166667
20
def semantic_alert(visitor, block): """ Format: {% alert class=error %} message {% endalert %} """ txt = [] cls = block['kwargs'].get('class', '') txt.append('<div class="ui %s message">' % cls) text = visitor.parse_text(block['body'], 'article') txt.append(text) txt.append('</div>') return '\n'.join(txt)
[ "def", "semantic_alert", "(", "visitor", ",", "block", ")", ":", "txt", "=", "[", "]", "cls", "=", "block", "[", "'kwargs'", "]", ".", "get", "(", "'class'", ",", "''", ")", "txt", ".", "append", "(", "'<div class=\"ui %s message\">'", "%", "cls", ")",...
24.333333
15
def set(self, section, option, value=''): ''' This is overridden from the RawConfigParser merely to change the default value for the 'value' argument. ''' self._string_check(value) super(GitConfigParser, self).set(section, option, value)
[ "def", "set", "(", "self", ",", "section", ",", "option", ",", "value", "=", "''", ")", ":", "self", ".", "_string_check", "(", "value", ")", "super", "(", "GitConfigParser", ",", "self", ")", ".", "set", "(", "section", ",", "option", ",", "value", ...
39.857143
18.428571
def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['b1'], group['b2'] state['step'] += 1 # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm']) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) denom = exp_avg_sq.sqrt().add_(group['e']) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 p.data.addcdiv_(-step_size, exp_avg, denom) # Add weight decay at the end (fixed version) if (len(p.size()) > 1 or group['vector_l2']) and group['weight_decay'] > 0: p.data.add_(-lr_scheduled * group['weight_decay'], p.data) return loss
[ "def", "step", "(", "self", ",", "closure", "=", "None", ")", ":", "loss", "=", "None", "if", "closure", "is", "not", "None", ":", "loss", "=", "closure", "(", ")", "for", "group", "in", "self", ".", "param_groups", ":", "for", "p", "in", "group", ...
38.637931
23.741379
def projection(self, axis): """Sums all data along all other axes, then return Hist1D""" axis = self.get_axis_number(axis) projected_hist = np.sum(self.histogram, axis=self.other_axes(axis)) return Hist1d.from_histogram(projected_hist, bin_edges=self.bin_edges[axis])
[ "def", "projection", "(", "self", ",", "axis", ")", ":", "axis", "=", "self", ".", "get_axis_number", "(", "axis", ")", "projected_hist", "=", "np", ".", "sum", "(", "self", ".", "histogram", ",", "axis", "=", "self", ".", "other_axes", "(", "axis", ...
59
18.6
def match_color_index(self, color): """Takes an "R,G,B" string or wx.Color and returns a matching xlwt color. """ from jcvi.utils.webcolors import color_diff if isinstance(color, int): return color if color: if isinstance(color, six.string_types): rgb = map(int, color.split(',')) else: rgb = color.Get() logging.disable(logging.DEBUG) distances = [color_diff(rgb, x) for x in self.xlwt_colors] logging.disable(logging.NOTSET) result = distances.index(min(distances)) self.unused_colors.discard(self.xlwt_colors[result]) return result
[ "def", "match_color_index", "(", "self", ",", "color", ")", ":", "from", "jcvi", ".", "utils", ".", "webcolors", "import", "color_diff", "if", "isinstance", "(", "color", ",", "int", ")", ":", "return", "color", "if", "color", ":", "if", "isinstance", "(...
39.166667
12.333333
def LoadGDAL(filename, no_data=None): """Read a GDAL file. Opens any file GDAL can read, selects the first raster band, and loads it and its metadata into a RichDEM array of the appropriate data type. If you need to do something more complicated, look at the source of this function. Args: filename (str): Name of the raster file to open no_data (float): Optionally, set the no_data value to this. Returns: A RichDEM array """ if not GDAL_AVAILABLE: raise Exception("richdem.LoadGDAL() requires GDAL.") allowed_types = {gdal.GDT_Byte,gdal.GDT_Int16,gdal.GDT_Int32,gdal.GDT_UInt16,gdal.GDT_UInt32,gdal.GDT_Float32,gdal.GDT_Float64} #Read in data src_ds = gdal.Open(filename) srcband = src_ds.GetRasterBand(1) if no_data is None: no_data = srcband.GetNoDataValue() if no_data is None: raise Exception("The source data did not have a NoData value. Please use the no_data argument to specify one. If should not be equal to any of the actual data values. If you are using all possible data values, then the situation is pretty hopeless - sorry.") srcdata = rdarray(srcband.ReadAsArray(), no_data=no_data) # raster_srs = osr.SpatialReference() # raster_srs.ImportFromWkt(raster.GetProjectionRef()) if not srcband.DataType in allowed_types: raise Exception("This datatype is not supported. Please file a bug report on RichDEM.") srcdata.projection = src_ds.GetProjectionRef() srcdata.geotransform = src_ds.GetGeoTransform() srcdata.metadata = dict() for k,v in src_ds.GetMetadata().items(): srcdata.metadata[k] = v _AddAnalysis(srcdata, "LoadGDAL(filename={0}, no_data={1})".format(filename, no_data)) return srcdata
[ "def", "LoadGDAL", "(", "filename", ",", "no_data", "=", "None", ")", ":", "if", "not", "GDAL_AVAILABLE", ":", "raise", "Exception", "(", "\"richdem.LoadGDAL() requires GDAL.\"", ")", "allowed_types", "=", "{", "gdal", ".", "GDT_Byte", ",", "gdal", ".", "GDT_I...
35.520833
30.166667
def create_virtual_aggregation(geometry, crs): """Function to create aggregation layer based on extent. :param geometry: The geometry to use as an extent. :type geometry: QgsGeometry :param crs: The Coordinate Reference System to use for the layer. :type crs: QgsCoordinateReferenceSystem :returns: A polygon layer with exposure's crs. :rtype: QgsVectorLayer """ fields = [ create_field_from_definition(aggregation_id_field), create_field_from_definition(aggregation_name_field) ] aggregation_layer = create_memory_layer( 'aggregation', QgsWkbTypes.PolygonGeometry, crs, fields) aggregation_layer.startEditing() feature = QgsFeature() feature.setGeometry(geometry) feature.setAttributes([1, tr('Entire Area')]) aggregation_layer.addFeature(feature) aggregation_layer.commitChanges() # Generate aggregation keywords aggregation_layer.keywords['layer_purpose'] = ( layer_purpose_aggregation['key']) aggregation_layer.keywords['title'] = 'aggr_from_bbox' aggregation_layer.keywords[inasafe_keyword_version_key] = ( inasafe_keyword_version) aggregation_layer.keywords['inasafe_fields'] = { aggregation_id_field['key']: aggregation_id_field['field_name'], aggregation_name_field['key']: aggregation_name_field['field_name'] } # We will fill default values later, according to the exposure. aggregation_layer.keywords['inasafe_default_values'] = {} return aggregation_layer
[ "def", "create_virtual_aggregation", "(", "geometry", ",", "crs", ")", ":", "fields", "=", "[", "create_field_from_definition", "(", "aggregation_id_field", ")", ",", "create_field_from_definition", "(", "aggregation_name_field", ")", "]", "aggregation_layer", "=", "cre...
36.439024
18.804878
def supprime(cls,table, **kwargs): """ Remove entries matchin given condition kwargs is a dict of column name : value , with length ONE. """ assert len(kwargs) == 1 field, value = kwargs.popitem() req = f"""DELETE FROM {table} WHERE {field} = """ + cls.mark_style args = (value,) return MonoExecutant((req, args))
[ "def", "supprime", "(", "cls", ",", "table", ",", "*", "*", "kwargs", ")", ":", "assert", "len", "(", "kwargs", ")", "==", "1", "field", ",", "value", "=", "kwargs", ".", "popitem", "(", ")", "req", "=", "f\"\"\"DELETE FROM {table} WHERE {field} = \"\"\"",...
41.111111
6.777778
def backing_type_for(value): """Returns the DynamoDB backing type for a given python value's type :: 4 -> 'N' ['x', 3] -> 'L' {2, 4} -> 'SS' """ if isinstance(value, str): vtype = "S" elif isinstance(value, bytes): vtype = "B" # NOTE: numbers.Number check must come **AFTER** bool check since isinstance(True, numbers.Number) elif isinstance(value, bool): vtype = "BOOL" elif isinstance(value, numbers.Number): vtype = "N" elif isinstance(value, dict): vtype = "M" elif isinstance(value, list): vtype = "L" elif isinstance(value, set): if not value: vtype = "SS" # doesn't matter, Set(x) should dump an empty set the same for all x else: inner = next(iter(value)) if isinstance(inner, str): vtype = "SS" elif isinstance(inner, bytes): vtype = "BS" elif isinstance(inner, numbers.Number): vtype = "NS" else: raise ValueError(f"Unknown set type for inner value {inner!r}") else: raise ValueError(f"Can't dump unexpected type {type(value)!r} for value {value!r}") return vtype
[ "def", "backing_type_for", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "vtype", "=", "\"S\"", "elif", "isinstance", "(", "value", ",", "bytes", ")", ":", "vtype", "=", "\"B\"", "# NOTE: numbers.Number check must come **AFTER...
35.868421
17.631579
def Hf_g(CASRN, AvailableMethods=False, Method=None): r'''This function handles the retrieval of a chemical's gas heat of formation. Lookup is based on CASRNs. Will automatically select a data source to use if no Method is provided; returns None if the data is not available. Prefered sources are 'Active Thermochemical Tables (g)' for high accuracy, and 'TRC' for less accuracy but more chemicals. Function has data for approximately 2000 chemicals. Parameters ---------- CASRN : string CASRN [-] Returns ------- _Hfg : float Gas phase heat of formation, [J/mol] methods : list, only returned if AvailableMethods == True List of methods which can be used to obtain Hf(g) with the given inputs Other Parameters ---------------- Method : string, optional A string for the method name to use, as defined by constants in Hf_g_methods AvailableMethods : bool, optional If True, function will determine which methods can be used to obtain Hf(g) for the desired chemical, and will return methods instead of Hf(g) Notes ----- Sources are: * 'ATCT_G', the Active Thermochemical Tables version 1.112. * 'TRC', from a 1994 compilation. Examples -------- >>> Hf_g('67-56-1') -200700.0 References ---------- .. [1] Ruscic, Branko, Reinhardt E. Pinzon, Gregor von Laszewski, Deepti Kodeboyina, Alexander Burcat, David Leahy, David Montoy, and Albert F. Wagner. "Active Thermochemical Tables: Thermochemistry for the 21st Century." Journal of Physics: Conference Series 16, no. 1 (January 1, 2005): 561. doi:10.1088/1742-6596/16/1/078. .. [2] Frenkelʹ, M. L, Texas Engineering Experiment Station, and Thermodynamics Research Center. Thermodynamics of Organic Compounds in the Gas State. College Station, Tex.: Thermodynamics Research Center, 1994. ''' def list_methods(): methods = [] if CASRN in ATcT_g.index: methods.append(ATCT_G) if CASRN in TRC_gas_data.index and not np.isnan(TRC_gas_data.at[CASRN, 'Hf']): methods.append(TRC) methods.append(NONE) return methods if AvailableMethods: return list_methods() if not Method: Method = list_methods()[0] if Method == ATCT_G: _Hfg = float(ATcT_g.at[CASRN, 'Hf_298K']) elif Method == TRC: _Hfg = float(TRC_gas_data.at[CASRN, 'Hf']) elif Method == NONE: return None else: raise Exception('Failure in in function') return _Hfg
[ "def", "Hf_g", "(", "CASRN", ",", "AvailableMethods", "=", "False", ",", "Method", "=", "None", ")", ":", "def", "list_methods", "(", ")", ":", "methods", "=", "[", "]", "if", "CASRN", "in", "ATcT_g", ".", "index", ":", "methods", ".", "append", "(",...
33.454545
24.831169
def GetNextWrittenEventSource(self): """Retrieves the next event source that was written after open. Returns: EventSource: event source or None if there are no newly written ones. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed. """ if not self._is_open: raise IOError('Unable to read from closed storage writer.') if self._written_event_source_index >= len(self._event_sources): return None event_source = self._event_sources[self._written_event_source_index] self._written_event_source_index += 1 return event_source
[ "def", "GetNextWrittenEventSource", "(", "self", ")", ":", "if", "not", "self", ".", "_is_open", ":", "raise", "IOError", "(", "'Unable to read from closed storage writer.'", ")", "if", "self", ".", "_written_event_source_index", ">=", "len", "(", "self", ".", "_e...
32.473684
21.842105
def api_request(self, url, data=None, method='GET', raw=False, file=None): """ Perform an API request to the given URL, optionally including the specified data :type url: String :param url: the URL to which to make the request :type data: String :param data: the data to send with the request, if any :type method: String :param method: the HTTP request method :type raw: Boolean :para raw: if True, return the raw response, otherwise treat as JSON and return the parsed response :type file: String :param file: (Optional) full path to file to be uploaded in a POST request :returns: the response from the server either as a raw response or a Python dictionary generated by parsing the JSON response :raises: APIError if the API request is not successful """ if method is 'GET': response = self.oauth.get(url) elif method is 'POST': if file is not None: response = self.oauth.post(url, data=data, file=file) else: response = self.oauth.post(url, data=data) elif method is 'PUT': response = self.oauth.put(url, data=data) elif method is 'DELETE': response = self.oauth.delete(url) else: raise APIError("Unknown request method: %s" % (method,)) # check for error responses if response.status_code >= 400: raise APIError(response.status_code, '', "Error accessing API (url: %s, method: %s)\nData: %s\nMessage: %s" % (url, method, data, response.text)) if raw: return response.content else: return response.json()
[ "def", "api_request", "(", "self", ",", "url", ",", "data", "=", "None", ",", "method", "=", "'GET'", ",", "raw", "=", "False", ",", "file", "=", "None", ")", ":", "if", "method", "is", "'GET'", ":", "response", "=", "self", ".", "oauth", ".", "g...
36.854167
22.479167
def directory_generator(dirname, trim=0): """ yields a tuple of (relative filename, chunking function). The chunking function can be called to open and iterate over the contents of the filename. """ def gather(collect, dirname, fnames): for fname in fnames: df = join(dirname, fname) if not isdir(df): collect.append(df) collect = list() walk(dirname, gather, collect) for fname in collect: yield fname[trim:], file_chunk(fname)
[ "def", "directory_generator", "(", "dirname", ",", "trim", "=", "0", ")", ":", "def", "gather", "(", "collect", ",", "dirname", ",", "fnames", ")", ":", "for", "fname", "in", "fnames", ":", "df", "=", "join", "(", "dirname", ",", "fname", ")", "if", ...
29.764706
12.941176
def reference_cluster(envs, name): """ Return set of all env names referencing or referenced by given name. >>> cluster = sorted(reference_cluster([ ... {'name': 'base', 'refs': []}, ... {'name': 'test', 'refs': ['base']}, ... {'name': 'local', 'refs': ['test']}, ... ], 'test')) >>> cluster == ['base', 'local', 'test'] True """ edges = [ set([env['name'], ref]) for env in envs for ref in env['refs'] ] prev, cluster = set(), set([name]) while prev != cluster: # While cluster grows prev = set(cluster) to_visit = [] for edge in edges: if cluster & edge: # Add adjacent nodes: cluster |= edge else: # Leave only edges that are out # of cluster for the next round: to_visit.append(edge) edges = to_visit return cluster
[ "def", "reference_cluster", "(", "envs", ",", "name", ")", ":", "edges", "=", "[", "set", "(", "[", "env", "[", "'name'", "]", ",", "ref", "]", ")", "for", "env", "in", "envs", "for", "ref", "in", "env", "[", "'refs'", "]", "]", "prev", ",", "c...
28.242424
12.484848
def parse_json(pairs): """ modified from: https://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-from-json#34796078 pass this to the object_pairs_hook kwarg of json.load/loads """ new_pairs = [] for key, value in pairs: if isinstance(value, unicode): value = value.encode('utf-8') if isinstance(key, unicode): key = key.encode('utf-8') new_pairs.append((key, value)) return dict(new_pairs)
[ "def", "parse_json", "(", "pairs", ")", ":", "new_pairs", "=", "[", "]", "for", "key", ",", "value", "in", "pairs", ":", "if", "isinstance", "(", "value", ",", "unicode", ")", ":", "value", "=", "value", ".", "encode", "(", "'utf-8'", ")", "if", "i...
32.6
15.533333
def wrap(self, row: Union[Mapping[str, Any], Sequence[Any]]): """Return row tuple for row.""" return ( self.dataclass( **{ ident: row[column_name] for ident, column_name in self.ids_and_column_names.items() } ) if isinstance(row, Mapping) else self.dataclass( **{ident: val for ident, val in zip(self.ids_and_column_names.keys(), row)} ) )
[ "def", "wrap", "(", "self", ",", "row", ":", "Union", "[", "Mapping", "[", "str", ",", "Any", "]", ",", "Sequence", "[", "Any", "]", "]", ")", ":", "return", "(", "self", ".", "dataclass", "(", "*", "*", "{", "ident", ":", "row", "[", "column_n...
35.571429
20.642857
def en_passant_moves(self, position): """ Finds possible en passant moves. :rtype: list """ # if pawn is not on a valid en passant get_location then return None if self.on_en_passant_valid_location(): for move in itertools.chain(self.add_one_en_passant_move(lambda x: x.shift_right(), position), self.add_one_en_passant_move(lambda x: x.shift_left(), position)): yield move
[ "def", "en_passant_moves", "(", "self", ",", "position", ")", ":", "# if pawn is not on a valid en passant get_location then return None", "if", "self", ".", "on_en_passant_valid_location", "(", ")", ":", "for", "move", "in", "itertools", ".", "chain", "(", "self", "....
40
24.333333
def _determine_monetary_account_id(cls, monetary_account_id=None): """ :type monetary_account_id: int :rtype: int """ if monetary_account_id is None: return context.BunqContext.user_context().primary_monetary_account.id_ return monetary_account_id
[ "def", "_determine_monetary_account_id", "(", "cls", ",", "monetary_account_id", "=", "None", ")", ":", "if", "monetary_account_id", "is", "None", ":", "return", "context", ".", "BunqContext", ".", "user_context", "(", ")", ".", "primary_monetary_account", ".", "i...
27.272727
19.818182
def run(self, dag): """ Pick a convenient layout depending on the best matching qubit connectivity, and set the property `layout`. Args: dag (DAGCircuit): DAG to find layout for. Raises: TranspilerError: if dag wider than self.coupling_map """ num_dag_qubits = sum([qreg.size for qreg in dag.qregs.values()]) if num_dag_qubits > self.coupling_map.size(): raise TranspilerError('Number of qubits greater than device.') best_sub = self._best_subset(num_dag_qubits) layout = Layout() map_iter = 0 for qreg in dag.qregs.values(): for i in range(qreg.size): layout[(qreg, i)] = int(best_sub[map_iter]) map_iter += 1 self.property_set['layout'] = layout
[ "def", "run", "(", "self", ",", "dag", ")", ":", "num_dag_qubits", "=", "sum", "(", "[", "qreg", ".", "size", "for", "qreg", "in", "dag", ".", "qregs", ".", "values", "(", ")", "]", ")", "if", "num_dag_qubits", ">", "self", ".", "coupling_map", "."...
36.909091
17.909091
def create_df_file_with_query(self, query, output): """ Dumps in df in chunks to avoid crashes. """ chunk_size = 100000 offset = 0 data = defaultdict(lambda : defaultdict(list)) with open(output, 'wb') as outfile: query = query.replace(';', '') query += """ LIMIT {chunk_size} OFFSET {offset};""" while True: print(offset) query = query.format( chunk_size=chunk_size, offset=offset ) df = pd.read_sql(query, self.engine) pickle.dump(df, outfile) offset += chunk_size if len(df) < chunk_size: break outfile.close()
[ "def", "create_df_file_with_query", "(", "self", ",", "query", ",", "output", ")", ":", "chunk_size", "=", "100000", "offset", "=", "0", "data", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "list", ")", ")", "with", "open", "(", "output", ...
34.590909
9.636364
def create_dataset(self, group, chunk_size): """Initializes sparse specific datasets""" group.attrs['format'] = self.dformat group.attrs['dim'] = self.dim if chunk_size == 'auto': group.create_dataset( 'coordinates', (0, 2), dtype=np.float64, chunks=True, maxshape=(None, 2)) group.create_dataset( self.name, (0,), dtype=self.dtype, chunks=True, maxshape=(None,)) else: # for storing sparse data we don't use the self.nb_per_chunk, # which is only used by the Writer to determine times chunking. per_chunk = nb_per_chunk(self.dtype.itemsize, 1, chunk_size) group.create_dataset( 'coordinates', (0, 2), dtype=np.float64, chunks=(per_chunk, 2), maxshape=(None, 2)) group.create_dataset( self.name, (0,), dtype=self.dtype, chunks=(per_chunk,), maxshape=(None,)) dtype = np.int64 if chunk_size == 'auto': chunks = True self.nb_per_chunk = 'auto' else: chunks = (nb_per_chunk(np.dtype(dtype).itemsize, 1, chunk_size),) # Needed by Times.create_dataset self.nb_per_chunk = nb_per_chunk( self.dtype.itemsize, int(round(self.sparsity*self.dim)), chunk_size) group.create_dataset( 'frames', (0,), dtype=dtype, chunks=chunks, maxshape=(None,))
[ "def", "create_dataset", "(", "self", ",", "group", ",", "chunk_size", ")", ":", "group", ".", "attrs", "[", "'format'", "]", "=", "self", ".", "dformat", "group", ".", "attrs", "[", "'dim'", "]", "=", "self", ".", "dim", "if", "chunk_size", "==", "'...
36.071429
16.880952
def assert_datasource_protocol(event): """Assert that an event meets the protocol for datasource outputs.""" assert event.type in DATASOURCE_TYPE # Done packets have no dt. if not event.type == DATASOURCE_TYPE.DONE: assert isinstance(event.dt, datetime) assert event.dt.tzinfo == pytz.utc
[ "def", "assert_datasource_protocol", "(", "event", ")", ":", "assert", "event", ".", "type", "in", "DATASOURCE_TYPE", "# Done packets have no dt.", "if", "not", "event", ".", "type", "==", "DATASOURCE_TYPE", ".", "DONE", ":", "assert", "isinstance", "(", "event", ...
34.888889
11.666667
def drop(self, table): """ Drop a table from a database. Accepts either a string representing a table name or a list of strings representing a table names. """ existing_tables = self.tables if isinstance(table, (list, set, tuple)): for t in table: self._drop(t, existing_tables) else: self._drop(table, existing_tables) return table
[ "def", "drop", "(", "self", ",", "table", ")", ":", "existing_tables", "=", "self", ".", "tables", "if", "isinstance", "(", "table", ",", "(", "list", ",", "set", ",", "tuple", ")", ")", ":", "for", "t", "in", "table", ":", "self", ".", "_drop", ...
30.857143
13.428571
def parseMemory(memAttribute): """ Returns EC2 'memory' string as a float. Format should always be '#' GiB (example: '244 GiB' or '1,952 GiB'). Amazon loves to put commas in their numbers, so we have to accommodate that. If the syntax ever changes, this will raise. :param memAttribute: EC2 JSON memory param string. :return: A float representing memory in GiB. """ mem = memAttribute.replace(',', '').split() if mem[1] == 'GiB': return float(mem[0]) else: raise RuntimeError('EC2 JSON format has likely changed. Error parsing memory.')
[ "def", "parseMemory", "(", "memAttribute", ")", ":", "mem", "=", "memAttribute", ".", "replace", "(", "','", ",", "''", ")", ".", "split", "(", ")", "if", "mem", "[", "1", "]", "==", "'GiB'", ":", "return", "float", "(", "mem", "[", "0", "]", ")"...
36.5
19.375
def _logout_url(request, next_page=None): """ Generates CAS logout URL :param: request RequestObj :param: next_page Page to redirect after logout. """ url = urlparse.urljoin(settings.CAS_SERVER_URL, 'logout') if next_page and getattr(settings, 'CAS_PROVIDE_URL_TO_LOGOUT', True): parsed_url = urlparse.urlparse(next_page) if parsed_url.scheme: #If next_page is a protocol-rooted url, skip redirect url construction url += '?' + urlencode({'service': next_page}) else: protocol = ('http://', 'https://')[request.is_secure()] host = request.get_host() url += '?' + urlencode({'service': protocol + host + next_page}) return url
[ "def", "_logout_url", "(", "request", ",", "next_page", "=", "None", ")", ":", "url", "=", "urlparse", ".", "urljoin", "(", "settings", ".", "CAS_SERVER_URL", ",", "'logout'", ")", "if", "next_page", "and", "getattr", "(", "settings", ",", "'CAS_PROVIDE_URL_...
34
23.619048
def drop_all(self, check_first: bool = True): """Drop all tables from the database. :param bool check_first: Defaults to True, only issue DROPs for tables confirmed to be present in the target database. Defers to :meth:`sqlalchemy.sql.schema.MetaData.drop_all` """ self._metadata.drop_all(self.engine, checkfirst=check_first) self._store_drop()
[ "def", "drop_all", "(", "self", ",", "check_first", ":", "bool", "=", "True", ")", ":", "self", ".", "_metadata", ".", "drop_all", "(", "self", ".", "engine", ",", "checkfirst", "=", "check_first", ")", "self", ".", "_store_drop", "(", ")" ]
48.5
25
def get_predicates(self, class_, controller=None): """Get full predicate information for given request class, and cache for subsequent calls. """ if class_ not in self._predicates: if controller is None: controller = self._find_controller(class_) else: classes = self.request_controllers.get(controller, None) if classes is None: raise ValueError( 'Unknown request controller {!r}'.format(controller)) if class_ not in classes: raise ValueError( 'Unknown request class {!r}'.format(class_)) predicates_data = self._download_predicate_data(class_, controller) predicate_objects = self._parse_predicates_data(predicates_data) self._predicates[class_] = predicate_objects return self._predicates[class_]
[ "def", "get_predicates", "(", "self", ",", "class_", ",", "controller", "=", "None", ")", ":", "if", "class_", "not", "in", "self", ".", "_predicates", ":", "if", "controller", "is", "None", ":", "controller", "=", "self", ".", "_find_controller", "(", "...
44.47619
16.714286
def statements(self): '''Return a list of statements This is done by joining together any rows that have continuations ''' # FIXME: no need to do this every time; we should cache the # result if len(self.rows) == 0: return [] current_statement = Statement(self.rows[0]) current_statement.startline = self.rows[0].linenumber current_statement.endline = self.rows[0].linenumber statements = [] for row in self.rows[1:]: if len(row) > 0 and row[0] == "...": # we found a continuation current_statement += row[1:] current_statement.endline = row.linenumber else: if len(current_statement) > 0: # append current statement to the list of statements... statements.append(current_statement) # start a new statement current_statement = Statement(row) current_statement.startline = row.linenumber current_statement.endline = row.linenumber if len(current_statement) > 0: statements.append(current_statement) # trim trailing blank statements while (len(statements[-1]) == 0 or ((len(statements[-1]) == 1) and len(statements[-1][0]) == 0)): statements.pop() return statements
[ "def", "statements", "(", "self", ")", ":", "# FIXME: no need to do this every time; we should cache the", "# result", "if", "len", "(", "self", ".", "rows", ")", "==", "0", ":", "return", "[", "]", "current_statement", "=", "Statement", "(", "self", ".", "rows"...
37.918919
17
def execute_notebook(self, name): """Loads and then runs a notebook file.""" warnings.filterwarnings("ignore", category=DeprecationWarning) nb,f = self.load_notebook(name) self.run_notebook(nb,f) self.assertTrue(True)
[ "def", "execute_notebook", "(", "self", ",", "name", ")", ":", "warnings", ".", "filterwarnings", "(", "\"ignore\"", ",", "category", "=", "DeprecationWarning", ")", "nb", ",", "f", "=", "self", ".", "load_notebook", "(", "name", ")", "self", ".", "run_not...
36
14
def parse_parameter_reference(self, tup_tree): """ :: <!ELEMENT PARAMETER.REFERENCE (QUALIFIER*)> <!ATTLIST PARAMETER.REFERENCE %CIMName; %ReferenceClass;> """ self.check_node(tup_tree, 'PARAMETER.REFERENCE', ('NAME',), ('REFERENCECLASS',), ('QUALIFIER',)) attrl = attrs(tup_tree) qualifiers = self.list_of_matching(tup_tree, ('QUALIFIER',)) return CIMParameter(attrl['NAME'], type='reference', is_array=False, reference_class=attrl.get('REFERENCECLASS', None), qualifiers=qualifiers, embedded_object=False)
[ "def", "parse_parameter_reference", "(", "self", ",", "tup_tree", ")", ":", "self", ".", "check_node", "(", "tup_tree", ",", "'PARAMETER.REFERENCE'", ",", "(", "'NAME'", ",", ")", ",", "(", "'REFERENCECLASS'", ",", ")", ",", "(", "'QUALIFIER'", ",", ")", "...
33.391304
18.434783
def make_field_objects(field_data, names): # type: (List[Dict[Text, Text]], Names) -> List[Field] """We're going to need to make message parameters too.""" field_objects = [] field_names = [] # type: List[Text] for field in field_data: if hasattr(field, 'get') and callable(field.get): atype = cast(Text, field.get('type')) name = cast(Text, field.get('name')) # null values can have a default value of None has_default = False default = None if 'default' in field: has_default = True default = field.get('default') order = field.get('order') doc = field.get('doc') other_props = get_other_props(field, FIELD_RESERVED_PROPS) new_field = Field(atype, name, has_default, default, order, names, doc, other_props) # make sure field name has not been used yet if new_field.name in field_names: fail_msg = 'Field name %s already in use.' % new_field.name raise SchemaParseException(fail_msg) field_names.append(new_field.name) else: raise SchemaParseException('Not a valid field: %s' % field) field_objects.append(new_field) return field_objects
[ "def", "make_field_objects", "(", "field_data", ",", "names", ")", ":", "# type: (List[Dict[Text, Text]], Names) -> List[Field]", "field_objects", "=", "[", "]", "field_names", "=", "[", "]", "# type: List[Text]", "for", "field", "in", "field_data", ":", "if", "hasatt...
46.225806
15.612903
def sr1flood(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): # noqa: E501 """Flood and receive packets at layer 3 and return only the first answer prn: function applied to packets received verbose: set verbosity level nofilter: put 1 to avoid use of BPF filters filter: provide a BPF filter iface: listen answers only on the given interface""" s = conf.L3socket(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) # noqa: E501 ans, _ = sndrcvflood(s, x, *args, **kargs) s.close() if len(ans) > 0: return ans[0][1] else: return None
[ "def", "sr1flood", "(", "x", ",", "promisc", "=", "None", ",", "filter", "=", "None", ",", "iface", "=", "None", ",", "nofilter", "=", "0", ",", "*", "args", ",", "*", "*", "kargs", ")", ":", "# noqa: E501", "s", "=", "conf", ".", "L3socket", "("...
43.285714
19.071429
def all_terms(self): """Iterate over all of the terms. The self.terms property has only root level terms. This iterator iterates over all terms""" for s_name, s in self.sections.items(): # Yield the section header if s.name != 'Root': yield s # Yield all of the rows for terms in the section for rterm in s: yield rterm for d in rterm.descendents: yield d
[ "def", "all_terms", "(", "self", ")", ":", "for", "s_name", ",", "s", "in", "self", ".", "sections", ".", "items", "(", ")", ":", "# Yield the section header", "if", "s", ".", "name", "!=", "'Root'", ":", "yield", "s", "# Yield all of the rows for terms in t...
32.266667
15.733333
def to_ip(self, values, from_unit): """Return values in IP and the units to which the values have been converted.""" if from_unit in self._ip_units: return values, from_unit elif from_unit == 'degC-hours': return self.to_unit(values, 'degF-hours', from_unit), 'degF-hours' else: return self.to_unit(values, 'degF-days', from_unit), 'degF-days'
[ "def", "to_ip", "(", "self", ",", "values", ",", "from_unit", ")", ":", "if", "from_unit", "in", "self", ".", "_ip_units", ":", "return", "values", ",", "from_unit", "elif", "from_unit", "==", "'degC-hours'", ":", "return", "self", ".", "to_unit", "(", "...
50.5
14
def set_dataframe_format(self, new_format): """ Set format to use in DataframeEditor. Args: new_format (string): e.g. "%.3f" """ self.sig_option_changed.emit('dataframe_format', new_format) self.model.dataframe_format = new_format
[ "def", "set_dataframe_format", "(", "self", ",", "new_format", ")", ":", "self", ".", "sig_option_changed", ".", "emit", "(", "'dataframe_format'", ",", "new_format", ")", "self", ".", "model", ".", "dataframe_format", "=", "new_format" ]
32.333333
13
def _wrapusage(self, usage=None, width=0): """Textwrap usage instructions. ARGS: width = 0 <int>: Maximum allowed page width. 0 means use default from self.iMaxHelpWidth. """ if not width: width = self.width return textwrap.fill('USAGE: ' + self.format_usage(usage), width=width, subsequent_indent=' ...')
[ "def", "_wrapusage", "(", "self", ",", "usage", "=", "None", ",", "width", "=", "0", ")", ":", "if", "not", "width", ":", "width", "=", "self", ".", "width", "return", "textwrap", ".", "fill", "(", "'USAGE: '", "+", "self", ".", "format_usage", "(", ...
34.818182
19.545455
def locked(dev, target): """ Gets or sets the lock. """ click.echo("Locked: %s" % dev.locked) if target is not None: click.echo("Setting lock: %s" % target) dev.locked = target
[ "def", "locked", "(", "dev", ",", "target", ")", ":", "click", ".", "echo", "(", "\"Locked: %s\"", "%", "dev", ".", "locked", ")", "if", "target", "is", "not", "None", ":", "click", ".", "echo", "(", "\"Setting lock: %s\"", "%", "target", ")", "dev", ...
33.166667
8.5
def makeEquilibriumTable(out_filename, four_in_files, CRRA): ''' Make the equilibrium statistics table for the paper, saving it as a tex file in the tables folder. Also makes a version for the slides that doesn't use the table environment, nor include the note at bottom. Parameters ---------- out_filename : str Name of the file in which to save output (in the tables directory). Suffix .tex appended automatically. four_in_files: [str] A list with four csv files. 0) SOE frictionless 1) SOE Sticky 2) DSGE frictionless 3) DSGE sticky CRRA : float Coefficient of relative risk aversion Returns ------- None ''' # Read in statistics from the four files SOEfrictionless = np.genfromtxt(results_dir + four_in_files[0] + 'Results.csv', delimiter=',') SOEsticky = np.genfromtxt(results_dir + four_in_files[1] + 'Results.csv', delimiter=',') DSGEfrictionless = np.genfromtxt(results_dir + four_in_files[2] + 'Results.csv', delimiter=',') DSGEsticky = np.genfromtxt(results_dir + four_in_files[3] + 'Results.csv', delimiter=',') # Read in value at birth from the four files vBirth_SOE_F = np.genfromtxt(results_dir + four_in_files[0] + 'BirthValue.csv', delimiter=',') vBirth_SOE_S = np.genfromtxt(results_dir + four_in_files[1] + 'BirthValue.csv', delimiter=',') vBirth_DSGE_F = np.genfromtxt(results_dir + four_in_files[2] + 'BirthValue.csv', delimiter=',') vBirth_DSGE_S = np.genfromtxt(results_dir + four_in_files[3] + 'BirthValue.csv', delimiter=',') # Calculate the cost of stickiness in the SOE and DSGE models StickyCost_SOE = np.mean(1. - (vBirth_SOE_S/vBirth_SOE_F)**(1./(1.-CRRA))) StickyCost_DSGE = np.mean(1. - (vBirth_DSGE_S/vBirth_DSGE_F)**(1./(1.-CRRA))) paper_top = "\\begin{minipage}{\\textwidth}\n" paper_top += " \\begin{table} \n" paper_top += "\caption{Equilibrium Statistics} \n" paper_top += "\label{table:Eqbm} \n" paper_top += "\\newsavebox{\EqbmBox} \n" paper_top += "\sbox{\EqbmBox}{ \n" paper_top += "\\newcommand{\EqDir}{\TablesDir/Eqbm} \n" slides_top = '\\begin{center} \n' main_table = "\\begin{tabular}{lllcccc} \n" main_table += "\\toprule \n" main_table += "&&& \multicolumn{2}{c}{SOE Model} & \multicolumn{2}{c}{HA-DSGE Model} \n" main_table += "\\\\ %\cline{4-5} \n" main_table += " &&& \multicolumn{1}{c}{Frictionless} & \multicolumn{1}{c}{Sticky} & \multicolumn{1}{c}{Frictionless} & \multicolumn{1}{c}{Sticky} \n" main_table += "\\\\ \\midrule \n" main_table += " \multicolumn{3}{l}{Means} \n" main_table += "%\\\\ & & $M$ \n" main_table += "%\\\\ & & $K$ \n" main_table += "\\\\ & & $A$ & {:.2f}".format(SOEfrictionless[0]) +" &{:.2f}".format(SOEsticky[0]) +" & {:.2f}".format(DSGEfrictionless[0]) +" & {:.2f}".format(DSGEsticky[0]) +" \n" main_table += "\\\\ & & $C$ & {:.2f}".format(SOEfrictionless[1]) +" &{:.2f}".format(SOEsticky[1]) +" & {:.2f}".format(DSGEfrictionless[1]) +" & {:.2f}".format(DSGEsticky[1]) +" \n" main_table += "\\\\ \\midrule \n" main_table += " \multicolumn{3}{l}{Standard Deviations} \n" main_table += "\\\\ & \multicolumn{4}{l}{Aggregate Time Series (`Macro')} \n" main_table += "%\\ & & $\Delta \log \mathbf{M}$ \n" main_table += "\\\\ & & $\log A $ & {:.3f}".format(SOEfrictionless[2]) +" & {:.3f}".format(SOEsticky[2]) +" & {:.3f}".format(DSGEfrictionless[2]) +" & {:.3f}".format(DSGEsticky[2]) +" \n" main_table += "\\\\ & & $\Delta \log \\CLevBF $ & {:.3f}".format(SOEfrictionless[3]) +" & {:.3f}".format(SOEsticky[3]) +" & {:.3f}".format(DSGEfrictionless[3]) +" & {:.3f}".format(DSGEsticky[3]) +" \n" main_table += "\\\\ & & $\Delta \log \\YLevBF $ & {:.3f}".format(SOEfrictionless[4]) +" & {:.3f}".format(SOEsticky[4]) +" & {:.3f}".format(DSGEfrictionless[4]) +" & {:.3f}".format(DSGEsticky[4]) +" \n" main_table += "\\\\ & \multicolumn{3}{l}{Individual Cross Sectional (`Micro')} \n" main_table += "\\\\ & & $\log \\aLevBF $ & {:.3f}".format(SOEfrictionless[6]) +" & {:.3f}".format(SOEsticky[6]) +" & {:.3f}".format(DSGEfrictionless[6]) +" & {:.3f}".format(DSGEsticky[6]) +" \n" main_table += "\\\\ & & $\log \\cLevBF $ & {:.3f}".format(SOEfrictionless[7]) +" & {:.3f}".format(SOEsticky[7]) +" & {:.3f}".format(DSGEfrictionless[7]) +" & {:.3f}".format(DSGEsticky[7]) +" \n" main_table += "\\\\ & & $\log p $ & {:.3f}".format(SOEfrictionless[8]) +" & {:.3f}".format(SOEsticky[8]) +" & {:.3f}".format(DSGEfrictionless[8]) +" & {:.3f}".format(DSGEsticky[8]) +" \n" main_table += "\\\\ & & $\log \\yLevBF | \\yLevBF > 0 $ & {:.3f}".format(SOEfrictionless[9]) +" & {:.3f}".format(SOEsticky[9]) +" & {:.3f}".format(DSGEfrictionless[9]) +" & {:.3f}".format(DSGEsticky[9]) +" \n" main_table += "\\\\ & & $\Delta \log \\cLevBF $ & {:.3f}".format(SOEfrictionless[11]) +" & {:.3f}".format(SOEsticky[11]) +" & {:.3f}".format(DSGEfrictionless[11]) +" & {:.3f}".format(DSGEsticky[11]) +" \n" main_table += " \n" main_table += " \n" main_table += "\\\\ \\midrule \multicolumn{3}{l}{Cost of Stickiness} \n" main_table += " & \multicolumn{2}{c}{" + mystr2(StickyCost_SOE) + "} \n" main_table += " & \multicolumn{2}{c}{" + mystr2(StickyCost_DSGE) + "} \n" main_table += "\\\\ \\bottomrule \n" main_table += " \end{tabular} \n" paper_bot = " } \n " paper_bot += "\\usebox{\EqbmBox} \n" paper_bot += "\ifthenelse{\\boolean{StandAlone}}{\\newlength\TableWidth}{} \n" paper_bot += "\settowidth\TableWidth{\\usebox{\EqbmBox}} % Calculate width of table so notes will match \n" paper_bot += "\medskip\medskip \\vspace{0.0cm} \parbox{\TableWidth}{\\footnotesize\n" paper_bot += "\\textbf{Notes}: The cost of stickiness is calculated as the proportion by which the permanent income of a newborn frictionless consumer would need to be reduced in order to achieve the same reduction of expected value associated with forcing them to become a sticky expectations consumer.} \n" paper_bot += "\end{table}\n" paper_bot += "\end{minipage}\n" paper_bot += "\ifthenelse{\\boolean{StandAlone}}{\end{document}}{} \n" slides_bot = '\\end{center} \n' paper_output = paper_top + main_table + paper_bot with open(tables_dir + out_filename + '.tex','w') as f: f.write(paper_output) f.close() slides_output = slides_top + main_table + slides_bot with open(tables_dir + out_filename + 'Slides.tex','w') as f: f.write(slides_output) f.close()
[ "def", "makeEquilibriumTable", "(", "out_filename", ",", "four_in_files", ",", "CRRA", ")", ":", "# Read in statistics from the four files", "SOEfrictionless", "=", "np", ".", "genfromtxt", "(", "results_dir", "+", "four_in_files", "[", "0", "]", "+", "'Results.csv'",...
65.36
43.34
def deliver_hook(target, payload, instance_id=None, hook_id=None, **kwargs): """ target: the url to receive the payload. payload: a python primitive data structure instance_id: a possibly None "trigger" instance ID hook_id: the ID of defining Hook object """ r = requests.post( url=target, data=json.dumps(payload), headers={ "Content-Type": "application/json", "Authorization": "Token %s" % settings.HOOK_AUTH_TOKEN, }, ) r.raise_for_status() return r.text
[ "def", "deliver_hook", "(", "target", ",", "payload", ",", "instance_id", "=", "None", ",", "hook_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "r", "=", "requests", ".", "post", "(", "url", "=", "target", ",", "data", "=", "json", ".", "dum...
33.647059
17.647059
def print(self, *args, end='\n', file=None): """Convenience function so you don't need to remember to put the \n at the end of the line. """ if file is None: file = self.stdout s = ' '.join(str(arg) for arg in args) + end file.write(s)
[ "def", "print", "(", "self", ",", "*", "args", ",", "end", "=", "'\\n'", ",", "file", "=", "None", ")", ":", "if", "file", "is", "None", ":", "file", "=", "self", ".", "stdout", "s", "=", "' '", ".", "join", "(", "str", "(", "arg", ")", "for"...
36.375
8.375
def get_arp_table(self): """ Get arp table information. Return a list of dictionaries having the following set of keys: * interface (string) * mac (string) * ip (string) * age (float) For example:: [ { 'interface' : 'MgmtEth0/RSP0/CPU0/0', 'mac' : '5c:5e:ab:da:3c:f0', 'ip' : '172.17.17.1', 'age' : 1454496274.84 }, { 'interface': 'MgmtEth0/RSP0/CPU0/0', 'mac' : '66:0e:94:96:e0:ff', 'ip' : '172.17.17.2', 'age' : 1435641582.49 } ] """ arp_table = [] command = 'show arp | exclude Incomplete' output = self._send_command(command) # Skip the first line which is a header output = output.split('\n') output = output[1:] for line in output: if len(line) == 0: return {} if len(line.split()) == 5: # Static ARP entries have no interface # Internet 10.0.0.1 - 0010.2345.1cda ARPA interface = '' protocol, address, age, mac, eth_type = line.split() elif len(line.split()) == 6: protocol, address, age, mac, eth_type, interface = line.split() else: raise ValueError("Unexpected output from: {}".format(line.split())) try: if age == '-': age = 0 age = float(age) except ValueError: raise ValueError("Unable to convert age value to float: {}".format(age)) # Validate we matched correctly if not re.search(RE_IPADDR, address): raise ValueError("Invalid IP Address detected: {}".format(address)) if not re.search(RE_MAC, mac): raise ValueError("Invalid MAC Address detected: {}".format(mac)) entry = { 'interface': interface, 'mac': napalm_base.helpers.mac(mac), 'ip': address, 'age': age } arp_table.append(entry) return arp_table
[ "def", "get_arp_table", "(", "self", ")", ":", "arp_table", "=", "[", "]", "command", "=", "'show arp | exclude Incomplete'", "output", "=", "self", ".", "_send_command", "(", "command", ")", "# Skip the first line which is a header", "output", "=", "output", ".", ...
34.367647
18.279412
def _validate_entities(self, stages): """ Purpose: Validate whether the argument 'stages' is of list of Stage objects :argument: list of Stage objects """ if not stages: raise TypeError(expected_type=Stage, actual_type=type(stages)) if not isinstance(stages, list): stages = [stages] for value in stages: if not isinstance(value, Stage): raise TypeError(expected_type=Stage, actual_type=type(value)) return stages
[ "def", "_validate_entities", "(", "self", ",", "stages", ")", ":", "if", "not", "stages", ":", "raise", "TypeError", "(", "expected_type", "=", "Stage", ",", "actual_type", "=", "type", "(", "stages", ")", ")", "if", "not", "isinstance", "(", "stages", "...
30.411765
20.058824
def send(self, message, socket_): """ Sends a message (dict) to the socket. Message consists of a 8-byte len header followed by a msgpack-numpy encoded dict. Args: message: The message dict (e.g. {"cmd": "reset"}) socket_: The python socket object to use. """ if not socket_: raise TensorForceError("No socket given in call to `send`!") elif not isinstance(message, dict): raise TensorForceError("Message to be sent must be a dict!") message = msgpack.packb(message) len_ = len(message) # prepend 8-byte len field to all our messages socket_.send(bytes("{:08d}".format(len_), encoding="ascii") + message)
[ "def", "send", "(", "self", ",", "message", ",", "socket_", ")", ":", "if", "not", "socket_", ":", "raise", "TensorForceError", "(", "\"No socket given in call to `send`!\"", ")", "elif", "not", "isinstance", "(", "message", ",", "dict", ")", ":", "raise", "...
42.882353
20.294118
def _field_value_text(self, field): """Return the html representation of the value of the given field""" if field in self.fields: return unicode(self.get(field)) else: return self.get_timemachine_instance(field)._object_name_text()
[ "def", "_field_value_text", "(", "self", ",", "field", ")", ":", "if", "field", "in", "self", ".", "fields", ":", "return", "unicode", "(", "self", ".", "get", "(", "field", ")", ")", "else", ":", "return", "self", ".", "get_timemachine_instance", "(", ...
45.666667
13
def _onCompletionListItemSelected(self, index): """Item selected. Insert completion to editor """ model = self._widget.model() selectedWord = model.words[index] textToInsert = selectedWord[len(model.typedText()):] self._qpart.textCursor().insertText(textToInsert) self._closeCompletion()
[ "def", "_onCompletionListItemSelected", "(", "self", ",", "index", ")", ":", "model", "=", "self", ".", "_widget", ".", "model", "(", ")", "selectedWord", "=", "model", ".", "words", "[", "index", "]", "textToInsert", "=", "selectedWord", "[", "len", "(", ...
42
7.25
def _cleanup(self): """Cleanup the stored sessions""" current_time = time.time() timeout = self._config.timeout if current_time - self._last_cleanup_time > timeout: self.store.cleanup(timeout) self._last_cleanup_time = current_time
[ "def", "_cleanup", "(", "self", ")", ":", "current_time", "=", "time", ".", "time", "(", ")", "timeout", "=", "self", ".", "_config", ".", "timeout", "if", "current_time", "-", "self", ".", "_last_cleanup_time", ">", "timeout", ":", "self", ".", "store",...
40.142857
8.571429
def _read_linguas_from_files(env, linguas_files=None): """ Parse `LINGUAS` file and return list of extracted languages """ import SCons.Util import SCons.Environment global _re_comment global _re_lang if not SCons.Util.is_List(linguas_files) \ and not SCons.Util.is_String(linguas_files) \ and not isinstance(linguas_files, SCons.Node.FS.Base) \ and linguas_files: # If, linguas_files==True or such, then read 'LINGUAS' file. linguas_files = ['LINGUAS'] if linguas_files is None: return [] fnodes = env.arg2nodes(linguas_files) linguas = [] for fnode in fnodes: contents = _re_comment.sub("", fnode.get_text_contents()) ls = [l for l in _re_lang.findall(contents) if l] linguas.extend(ls) return linguas
[ "def", "_read_linguas_from_files", "(", "env", ",", "linguas_files", "=", "None", ")", ":", "import", "SCons", ".", "Util", "import", "SCons", ".", "Environment", "global", "_re_comment", "global", "_re_lang", "if", "not", "SCons", ".", "Util", ".", "is_List",...
38.619048
15.714286
def random_word(self, length, prefix=0, start=False, end=False, flatten=False): """ Generate a random word of length from this table. :param length: the length of the generated word; >= 1; :param prefix: if greater than 0, the maximum length of the prefix to consider to choose the next character; :param start: if True, the generated word starts as a word of table; :param end: if True, the generated word ends as a word of table; :param flatten: whether or not consider the table as flattened; :return: a random word of length generated from table. :raises GenerationError: if no word of length can be generated. """ if start: word = ">" length += 1 return self._extend_word(word, length, prefix=prefix, end=end, flatten=flatten)[1:] else: first_letters = list(k for k in self if len(k) == 1 and k != ">") while True: word = random.choice(first_letters) try: word = self._extend_word(word, length, prefix=prefix, end=end, flatten=flatten) return word except GenerationError: first_letters.remove(word[0])
[ "def", "random_word", "(", "self", ",", "length", ",", "prefix", "=", "0", ",", "start", "=", "False", ",", "end", "=", "False", ",", "flatten", "=", "False", ")", ":", "if", "start", ":", "word", "=", "\">\"", "length", "+=", "1", "return", "self"...
47.172414
21.37931
def build(self): """Builds the index, creating an instance of `lunr.Index`. This completes the indexing process and should only be called once all documents have been added to the index. """ self._calculate_average_field_lengths() self._create_field_vectors() self._create_token_set() return Index( inverted_index=self.inverted_index, field_vectors=self.field_vectors, token_set=self.token_set, fields=list(self._fields.keys()), pipeline=self.search_pipeline, )
[ "def", "build", "(", "self", ")", ":", "self", ".", "_calculate_average_field_lengths", "(", ")", "self", ".", "_create_field_vectors", "(", ")", "self", ".", "_create_token_set", "(", ")", "return", "Index", "(", "inverted_index", "=", "self", ".", "inverted_...
34.058824
14.117647
def predict_epitopes_from_args(args): """ Returns an epitope collection from the given commandline arguments. Parameters ---------- args : argparse.Namespace Parsed commandline arguments for Topiary """ mhc_model = mhc_binding_predictor_from_args(args) variants = variant_collection_from_args(args) gene_expression_dict = rna_gene_expression_dict_from_args(args) transcript_expression_dict = rna_transcript_expression_dict_from_args(args) predictor = TopiaryPredictor( mhc_model=mhc_model, padding_around_mutation=args.padding_around_mutation, ic50_cutoff=args.ic50_cutoff, percentile_cutoff=args.percentile_cutoff, min_transcript_expression=args.rna_min_transcript_expression, min_gene_expression=args.rna_min_gene_expression, only_novel_epitopes=args.only_novel_epitopes, raise_on_error=not args.skip_variant_errors) return predictor.predict_from_variants( variants=variants, transcript_expression_dict=transcript_expression_dict, gene_expression_dict=gene_expression_dict)
[ "def", "predict_epitopes_from_args", "(", "args", ")", ":", "mhc_model", "=", "mhc_binding_predictor_from_args", "(", "args", ")", "variants", "=", "variant_collection_from_args", "(", "args", ")", "gene_expression_dict", "=", "rna_gene_expression_dict_from_args", "(", "a...
40.555556
16.481481
def best_identities(self): """Returns identities of the best HSP in alignment. """ if len(self.hsp_list) > 0: return round(float(self.hsp_list[0].identities) / float(self.hsp_list[0].align_length) * 100, 1)
[ "def", "best_identities", "(", "self", ")", ":", "if", "len", "(", "self", ".", "hsp_list", ")", ">", "0", ":", "return", "round", "(", "float", "(", "self", ".", "hsp_list", "[", "0", "]", ".", "identities", ")", "/", "float", "(", "self", ".", ...
58
22
def get_gallery_favorites(self): """Get a list of the images in the gallery this user has favorited.""" url = (self._imgur._base_url + "/3/account/{0}/gallery_favorites".format( self.name)) resp = self._imgur._send_request(url) return [Image(img, self._imgur) for img in resp]
[ "def", "get_gallery_favorites", "(", "self", ")", ":", "url", "=", "(", "self", ".", "_imgur", ".", "_base_url", "+", "\"/3/account/{0}/gallery_favorites\"", ".", "format", "(", "self", ".", "name", ")", ")", "resp", "=", "self", ".", "_imgur", ".", "_send...
53
14
def compose_info(root_dir, files, hash_fn, aleph_record, urn_nbn=None): """ Compose `info` XML file. Info example:: <?xml version="1.0" encoding="UTF-8" standalone="yes" ?> <info> <created>2014-07-31T10:58:53</created> <metadataversion>1.0</metadataversion> <packageid>c88f5a50-7b34-11e2-b930-005056827e51</packageid> <mainmets>mets.xml</mainmets> <titleid type="ccnb">cnb001852189</titleid> <titleid type="isbn">978-80-85979-89-6</titleid> <collection>edeposit</collection> <institution>nakladatelství Altar</institution> <creator>ABA001</creator> <size>1530226</size> <itemlist itemtotal="1"> <item>\data\Denik_zajatce_Sramek_CZ_v30f-font.epub</item> </itemlist> <checksum type="MD5" checksum="ce076548eaade33888005de5d4634a0d"> \MD5.md5 </checksum> </info> Args: root_dir (str): Absolute path to the root directory. files (list): Absolute paths to all ebook and metadata files. hash_fn (str): Absolute path to the MD5 file. aleph_record (str): String with Aleph record with metadata. Returns: str: XML string. """ # compute hash for hashfile with open(hash_fn) as f: hash_file_md5 = hashlib.md5(f.read()).hexdigest() schema_location = "http://www.ndk.cz/standardy-digitalizace/info11.xsd" document = odict[ "info": odict[ "@xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance", "@xsi:noNamespaceSchemaLocation": schema_location, "created": time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime()), "metadataversion": "1.0", "packageid": _path_to_id(root_dir), # not used in SIP # "mainmets": _get_localized_fn(metadata_fn, root_dir), "titleid": None, "collection": "edeposit", "institution": None, "creator": None, "size": _calc_dir_size(root_dir) / 1024, # size in kiB "itemlist": odict[ "@itemtotal": "2", "item": map( lambda x: _get_localized_fn(x, root_dir), files ) ], "checksum": odict[ "@type": "MD5", "@checksum": hash_file_md5, "#text": _get_localized_fn(hash_fn, root_dir) ], ] ] # get informations from MARC record record = MARCXMLRecord(aleph_record) # get publisher info publisher = unicode(record.get_publisher(), "utf-8") if record.get_publisher(None): document["info"]["institution"] = remove_hairs(publisher) # get <creator> info creator = record.getDataRecords("910", "a", False) alt_creator = record.getDataRecords("040", "d", False) document["info"]["creator"] = creator[0] if creator else alt_creator[-1] # collect informations for <titleid> tags isbns = record.get_ISBNs() ccnb = record.getDataRecords("015", "a", False) ccnb = ccnb[0] if ccnb else None if any([isbns, ccnb, urn_nbn]): # TODO: issn document["info"]["titleid"] = [] for isbn in isbns: document["info"]["titleid"].append({ "@type": "isbn", "#text": isbn }) if ccnb: document["info"]["titleid"].append({ "@type": "ccnb", "#text": ccnb }) if urn_nbn: document["info"]["titleid"].append({ "@type": "urnnbn", "#text": urn_nbn }) # TODO: later # if issn: # document["info"]["titleid"].append({ # "@type": "issn", # "#text": issn # }) # remove unset options unset_keys = [ key for key in document["info"] if key is None ] for key in unset_keys: del document[key] xml_document = xmltodict.unparse(document, pretty=True) return xml_document.encode("utf-8")
[ "def", "compose_info", "(", "root_dir", ",", "files", ",", "hash_fn", ",", "aleph_record", ",", "urn_nbn", "=", "None", ")", ":", "# compute hash for hashfile", "with", "open", "(", "hash_fn", ")", "as", "f", ":", "hash_file_md5", "=", "hashlib", ".", "md5",...
30.730769
20.130769
def tb2radiance(self, tb_, **kwargs): """Get the radiance from the brightness temperature (Tb) given the band name. Input: tb_: Brightness temperature of the band (self.band) Optional arguments: lut: If not none, this is a Look Up Table with tb and radiance values which will be used for the conversion. Default is None. normalized: If True, the derived radiance values are the spectral radiances for the band. If False the radiance is the band integrated radiance. Default is True. """ lut = kwargs.get('lut', None) normalized = kwargs.get('normalized', True) if self.wavespace == WAVE_NUMBER: if normalized: unit = 'W/m^2 sr^-1 (m^-1)^-1' else: unit = 'W/m^2 sr^-1' scale = 1.0 else: if normalized: unit = 'W/m^2 sr^-1 m^-1' else: unit = 'W/m^2 sr^-1' scale = 1.0 if lut: ntb = (tb_ * self.tb_scale).astype('int16') start = int(lut['tb'][0] * self.tb_scale) retv = {} bounds = 0, lut['radiance'].shape[0] - 1 index = np.clip(ntb - start, bounds[0], bounds[1]) retv['radiance'] = lut['radiance'][index] if retv['radiance'].ravel().shape[0] == 1: retv['radiance'] = retv['radiance'][0] retv['unit'] = unit retv['scale'] = scale return retv planck = self.blackbody_function(self.wavelength_or_wavenumber, tb_) * self.response if normalized: radiance = integrate.trapz(planck, self.wavelength_or_wavenumber) / self.rsr_integral else: radiance = integrate.trapz(planck, self.wavelength_or_wavenumber) return {'radiance': radiance, 'unit': unit, 'scale': scale}
[ "def", "tb2radiance", "(", "self", ",", "tb_", ",", "*", "*", "kwargs", ")", ":", "lut", "=", "kwargs", ".", "get", "(", "'lut'", ",", "None", ")", "normalized", "=", "kwargs", ".", "get", "(", "'normalized'", ",", "True", ")", "if", "self", ".", ...
35.981132
21.358491
def translate_gene(self, gene): """ Translate a gene with binary DNA into a base-10 floating point real number. Parses the DNA in this manner: 1. The first bit determines the sign of the integer portion of the result (0=positive, 1=negative) 2. The next ``significand_length`` bits of the DNA are converted into a base-10 integer, and given a positive/negative sign based on step (1). 3. The next bit determines the sign of the exponent portion of the result (0=positive, 1=negative) 4. The remaining bits in the DNA are converted into a base-10 integer, and given a positive/negative sign based on step (3). 5. The result of step (2) is multiplied by 10 raised to the power of the result of step (4). Example: let DNA="001111", significand_length=3 1. "0" indicates a positive sign for the integer portion 2. "011" is converted into the base-10 integer 3, its sign stays positive due to step (1) 3. "1" indicates a negative sign for the exponent portion 4. The remaining "1" bit is converted into the base-10 integer 1 and becomes -1 due to step (3) 5. The final result becomes: 3 * 10^-1 = 0.3 """ if self.signed: sign = 1 if gene.dna[0] == '0' else -1 base_start_idx = 1 else: sign = 1 base_start_idx = 0 base = sign * int(gene.dna[base_start_idx:base_start_idx + self.significand_length], base=2) exponent_sign = 1 if gene.dna[1 + self.significand_length] == '0' else -1 exponent = exponent_sign * int(gene.dna[self.significand_length + 2:], base=2) return float(base * 10 ** exponent)
[ "def", "translate_gene", "(", "self", ",", "gene", ")", ":", "if", "self", ".", "signed", ":", "sign", "=", "1", "if", "gene", ".", "dna", "[", "0", "]", "==", "'0'", "else", "-", "1", "base_start_idx", "=", "1", "else", ":", "sign", "=", "1", ...
53.333333
30.909091
def add_arguments(self, parser): """ Add arguments to the command parser. Uses argparse syntax. See documentation at https://docs.python.org/3/library/argparse.html. """ parser.add_argument( '--start', '-s', default=0, type=int, help=u"The Submission.id at which to begin updating rows. 0 by default." ) parser.add_argument( '--chunk', '-c', default=1000, type=int, help=u"Batch size, how many rows to update in a given transaction. Default 1000.", ) parser.add_argument( '--wait', '-w', default=2, type=int, help=u"Wait time between transactions, in seconds. Default 2.", )
[ "def", "add_arguments", "(", "self", ",", "parser", ")", ":", "parser", ".", "add_argument", "(", "'--start'", ",", "'-s'", ",", "default", "=", "0", ",", "type", "=", "int", ",", "help", "=", "u\"The Submission.id at which to begin updating rows. 0 by default.\""...
31.36
19.44
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the data encoding the ExtensionInformation object to a stream. Args: ostream (Stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. """ tstream = BytearrayStream() self.extension_name.write(tstream, kmip_version=kmip_version) if self.extension_tag is not None: self.extension_tag.write(tstream, kmip_version=kmip_version) if self.extension_type is not None: self.extension_type.write(tstream, kmip_version=kmip_version) self.length = tstream.length() super(ExtensionInformation, self).write( ostream, kmip_version=kmip_version ) ostream.write(tstream.buffer)
[ "def", "write", "(", "self", ",", "ostream", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "tstream", "=", "BytearrayStream", "(", ")", "self", ".", "extension_name", ".", "write", "(", "tstream", ",", "kmip_version", "=...
39.692308
21.461538
def get_archives(self, offset=None, count=None, session_id=None): """Returns an ArchiveList, which is an array of archives that are completed and in-progress, for your API key. :param int: offset Optional. The index offset of the first archive. 0 is offset of the most recently started archive. 1 is the offset of the archive that started prior to the most recent archive. If you do not specify an offset, 0 is used. :param int: count Optional. The number of archives to be returned. The maximum number of archives returned is 1000. :param string: session_id Optional. Used to list archives for a specific session ID. :rtype: An ArchiveList object, which is an array of Archive objects. """ params = {} if offset is not None: params['offset'] = offset if count is not None: params['count'] = count if session_id is not None: params['sessionId'] = session_id endpoint = self.endpoints.archive_url() + "?" + urlencode(params) response = requests.get( endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code < 300: return ArchiveList(self, response.json()) elif response.status_code == 403: raise AuthError() elif response.status_code == 404: raise NotFoundError("Archive not found") else: raise RequestError("An unexpected error occurred", response.status_code)
[ "def", "get_archives", "(", "self", ",", "offset", "=", "None", ",", "count", "=", "None", ",", "session_id", "=", "None", ")", ":", "params", "=", "{", "}", "if", "offset", "is", "not", "None", ":", "params", "[", "'offset'", "]", "=", "offset", "...
44.342857
23.457143
def parse_line(self, line): """ For each line we are passed, call the XML parser. Returns the line if we are outside one of the ignored tables, otherwise returns the empty string. @param line: the line of the LIGO_LW XML file to be parsed @type line: string @return: the line of XML passed in or the null string @rtype: string """ self.__p.Parse(line) if self.__in_table: self.__silent = 1 if not self.__silent: ret = line else: ret = "" if not self.__in_table: self.__silent = 0 return ret
[ "def", "parse_line", "(", "self", ",", "line", ")", ":", "self", ".", "__p", ".", "Parse", "(", "line", ")", "if", "self", ".", "__in_table", ":", "self", ".", "__silent", "=", "1", "if", "not", "self", ".", "__silent", ":", "ret", "=", "line", "...
25.227273
19.681818
def filterAcceptsRow(self, source_row, source_parent): """Exclude items in `self.excludes`""" model = self.sourceModel() item = model.items[source_row] key = getattr(item, "filter", None) if key is not None: regex = self.filterRegExp() if regex.pattern(): match = regex.indexIn(key) return False if match == -1 else True # --- Check if any family assigned to the plugin is in allowed families for role, values in self.includes.items(): includes_list = [([x] if isinstance(x, (list, tuple)) else x) for x in getattr(item, role, None)] return any(include in values for include in includes_list) for role, values in self.excludes.items(): data = getattr(item, role, None) if data in values: return False return super(ProxyModel, self).filterAcceptsRow( source_row, source_parent)
[ "def", "filterAcceptsRow", "(", "self", ",", "source_row", ",", "source_parent", ")", ":", "model", "=", "self", ".", "sourceModel", "(", ")", "item", "=", "model", ".", "items", "[", "source_row", "]", "key", "=", "getattr", "(", "item", ",", "\"filter\...
41.208333
15.541667
def add_checker(self, checker): """walk to the checker's dir and collect visit and leave methods""" # XXX : should be possible to merge needed_checkers and add_checker vcids = set() lcids = set() visits = self.visit_events leaves = self.leave_events for member in dir(checker): cid = member[6:] if cid == "default": continue if member.startswith("visit_"): v_meth = getattr(checker, member) # don't use visit_methods with no activated message: if self._is_method_enabled(v_meth): visits[cid].append(v_meth) vcids.add(cid) elif member.startswith("leave_"): l_meth = getattr(checker, member) # don't use leave_methods with no activated message: if self._is_method_enabled(l_meth): leaves[cid].append(l_meth) lcids.add(cid) visit_default = getattr(checker, "visit_default", None) if visit_default: for cls in nodes.ALL_NODE_CLASSES: cid = cls.__name__.lower() if cid not in vcids: visits[cid].append(visit_default)
[ "def", "add_checker", "(", "self", ",", "checker", ")", ":", "# XXX : should be possible to merge needed_checkers and add_checker", "vcids", "=", "set", "(", ")", "lcids", "=", "set", "(", ")", "visits", "=", "self", ".", "visit_events", "leaves", "=", "self", "...
43.413793
11.241379
def statistics(self): """ Get the dictionary with the count of the check-statuses :return: dict(str -> int) """ result = {} for r in self.results: result.setdefault(r.status, 0) result[r.status] += 1 return result
[ "def", "statistics", "(", "self", ")", ":", "result", "=", "{", "}", "for", "r", "in", "self", ".", "results", ":", "result", ".", "setdefault", "(", "r", ".", "status", ",", "0", ")", "result", "[", "r", ".", "status", "]", "+=", "1", "return", ...
25.818182
13.454545
def get_info(handle): """Get information about this current console window (for Microsoft Windows only). Raises IOError if attempt to get information fails (if there is no console window). Don't forget to call _WindowsCSBI.initialize() once in your application before calling this method. Positional arguments: handle -- either _WindowsCSBI.HANDLE_STDERR or _WindowsCSBI.HANDLE_STDOUT. Returns: Dictionary with different integer values. Keys are: buffer_width -- width of the buffer (Screen Buffer Size in cmd.exe layout tab). buffer_height -- height of the buffer (Screen Buffer Size in cmd.exe layout tab). terminal_width -- width of the terminal window. terminal_height -- height of the terminal window. bg_color -- current background color (http://msdn.microsoft.com/en-us/library/windows/desktop/ms682088). fg_color -- current text color code. """ # Query Win32 API. csbi = _WindowsCSBI.CSBI() try: if not _WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo(handle, ctypes.byref(csbi)): raise IOError('Unable to get console screen buffer info from win32 API.') except ctypes.ArgumentError: raise IOError('Unable to get console screen buffer info from win32 API.') # Parse data. result = dict( buffer_width=int(csbi.dwSize.X - 1), buffer_height=int(csbi.dwSize.Y), terminal_width=int(csbi.srWindow.Right - csbi.srWindow.Left), terminal_height=int(csbi.srWindow.Bottom - csbi.srWindow.Top), bg_color=int(csbi.wAttributes & 240), fg_color=int(csbi.wAttributes % 16), ) return result
[ "def", "get_info", "(", "handle", ")", ":", "# Query Win32 API.", "csbi", "=", "_WindowsCSBI", ".", "CSBI", "(", ")", "try", ":", "if", "not", "_WindowsCSBI", ".", "WINDLL", ".", "kernel32", ".", "GetConsoleScreenBufferInfo", "(", "handle", ",", "ctypes", "....
47.810811
28.486486
def parse(readDataInstance, numberOfEntries): """ Returns a L{ImageBoundForwarderRef} array where every element is a L{ImageBoundForwarderRefEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object with the corresponding data to generate a new L{ImageBoundForwarderRef} object. @type numberOfEntries: int @param numberOfEntries: The number of C{IMAGE_BOUND_FORWARDER_REF} entries in the array. @rtype: L{ImageBoundForwarderRef} @return: A new L{ImageBoundForwarderRef} object. @raise DataLengthException: If the L{ReadData} instance has less data than C{NumberOfEntries} * sizeof L{ImageBoundForwarderRefEntry}. """ imageBoundForwarderRefsList = ImageBoundForwarderRef() dLength = len(readDataInstance) entryLength = ImageBoundForwarderRefEntry().sizeof() toRead = numberOfEntries * entryLength if dLength >= toRead: for i in range(numberOfEntries): entryData = readDataInstance.read(entryLength) rd = utils.ReadData(entryData) imageBoundForwarderRefsList.append(ImageBoundForwarderRefEntry.parse(rd)) else: raise excep.DataLengthException("Not enough bytes to read.") return imageBoundForwarderRefsList
[ "def", "parse", "(", "readDataInstance", ",", "numberOfEntries", ")", ":", "imageBoundForwarderRefsList", "=", "ImageBoundForwarderRef", "(", ")", "dLength", "=", "len", "(", "readDataInstance", ")", "entryLength", "=", "ImageBoundForwarderRefEntry", "(", ")", ".", ...
47.517241
25.862069
def _get_events(self, result): """"Internal method for being able to run unit tests.""" events = [] for event_data in result: event = Event.factory(event_data) if event is not None: events.append(event) if isinstance(event, DeviceStateChangedEvent): # change device state if self.__devices[event.device_url] is None: raise Exception( "Received device change " + "state for unknown device '" + event.device_url + "'") self.__devices[event.device_url].set_active_states( event.states) return events
[ "def", "_get_events", "(", "self", ",", "result", ")", ":", "events", "=", "[", "]", "for", "event_data", "in", "result", ":", "event", "=", "Event", ".", "factory", "(", "event_data", ")", "if", "event", "is", "not", "None", ":", "events", ".", "app...
34.545455
18.090909
def get_ptn(unit): """获取文本行的中文字符的个数 Keyword arguments: unit -- 文本行 Return: ptn -- 纯文本数 """ ptn = 0 match_re = re.findall(chinese, unit) if match_re: string = ''.join(match_re) ptn = len(string) return int(ptn)
[ "def", "get_ptn", "(", "unit", ")", ":", "ptn", "=", "0", "match_re", "=", "re", ".", "findall", "(", "chinese", ",", "unit", ")", "if", "match_re", ":", "string", "=", "''", ".", "join", "(", "match_re", ")", "ptn", "=", "len", "(", "string", ")...
20.857143
15.357143
def add_topic(self, topic): """Add a topic to the list of topics tracked via metadata. Arguments: topic (str): topic to track Returns: Future: resolves after metadata request/response """ if topic in self._topics: return Future().success(set(self._topics)) self._topics.add(topic) return self.cluster.request_update()
[ "def", "add_topic", "(", "self", ",", "topic", ")", ":", "if", "topic", "in", "self", ".", "_topics", ":", "return", "Future", "(", ")", ".", "success", "(", "set", "(", "self", ".", "_topics", ")", ")", "self", ".", "_topics", ".", "add", "(", "...
28.5
16.714286
def _connected(cm, nodes, connection): """Test connectivity for the connectivity matrix.""" if nodes is not None: cm = cm[np.ix_(nodes, nodes)] num_components, _ = connected_components(cm, connection=connection) return num_components < 2
[ "def", "_connected", "(", "cm", ",", "nodes", ",", "connection", ")", ":", "if", "nodes", "is", "not", "None", ":", "cm", "=", "cm", "[", "np", ".", "ix_", "(", "nodes", ",", "nodes", ")", "]", "num_components", ",", "_", "=", "connected_components",...
36.571429
14.571429
def adjustChildren(self, delta, secs=False): """ Shifts the children for this item by the inputed number of days. :param delta | <int> """ if self.adjustmentsBlocked('children'): return if self.itemStyle() != self.ItemStyle.Group: return if not delta: return for c in range(self.childCount()): child = self.child(c) child.blockAdjustments('range', True) if secs: dstart = child.dateTimeStart() dstart = dstart.addSecs(delta) child.setDateStart(dstart.date()) child.setTimeStart(dstart.time()) else: child.setDateStart(child.dateStart().addDays(delta)) child.blockAdjustments('range', False)
[ "def", "adjustChildren", "(", "self", ",", "delta", ",", "secs", "=", "False", ")", ":", "if", "self", ".", "adjustmentsBlocked", "(", "'children'", ")", ":", "return", "if", "self", ".", "itemStyle", "(", ")", "!=", "self", ".", "ItemStyle", ".", "Gro...
33.653846
15.346154
def project_path(cls, user, project): """Return a fully-qualified project string.""" return google.api_core.path_template.expand( "users/{user}/projects/{project}", user=user, project=project )
[ "def", "project_path", "(", "cls", ",", "user", ",", "project", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"users/{user}/projects/{project}\"", ",", "user", "=", "user", ",", "project", "=", "project", ")" ]
45
15.8
def remove_api_key_from_groups(self, api_key, body, **kwargs): # noqa: E501 """Remove API key from groups. # noqa: E501 An endpoint for removing API key from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/api-keys/{apikey-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.remove_api_key_from_groups(api_key, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str api_key: The ID of the API key to be removed from the group. (required) :param list[str] body: A list of IDs of the groups to be updated. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.remove_api_key_from_groups_with_http_info(api_key, body, **kwargs) # noqa: E501 else: (data) = self.remove_api_key_from_groups_with_http_info(api_key, body, **kwargs) # noqa: E501 return data
[ "def", "remove_api_key_from_groups", "(", "self", ",", "api_key", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "retur...
62.772727
37.090909
def _slr_build_parser_table(productionset): """SLR method to build parser table""" result = ParserTable() statesset = build_states_sets(productionset) for itemindex, itemset in enumerate(statesset): LOG.debug("_slr_build_parser_table: Evaluating itemset:" + str(itemset)) for symbol in productionset.getSymbols() + [EndSymbol()]: numberoptions = 0 for lritem in itemset.itemlist: #if cursor is before a terminal, and there is a transition to another itemset with the following terminal, append shift rule if isinstance(symbol, TerminalSymbol) and lritem.next_symbol() == symbol and itemset.has_transition(symbol): destinationstate = statesset.index(itemset.get_transition(symbol)) result.append(itemindex, symbol, "Shift", destinationstate) numberoptions += 1 if isinstance(symbol, NonTerminalSymbol) and lritem.next_symbol() == symbol and itemset.has_transition(symbol): destinationstate = statesset.index(itemset.get_transition(symbol)) result.append_goto(itemindex, symbol, destinationstate) #if cursor is at the end of the rule, then append reduce rule and go transition if lritem.previous_symbol() == symbol and lritem.is_last_position() and symbol != Extended_S: for x in productionset.next_lookup(symbol): if isinstance(x, Grammar): result.append(itemindex, TerminalSymbol(x), "Reduce", None, lritem.rule) elif isinstance(x, Symbol): result.append(itemindex, x, "Reduce", None, lritem.rule) else: raise TypeError(x) numberoptions += 1 #if cursor is at the end of main rule, and current symbol is end, then append accept rule if symbol == EndSymbol() and lritem.previous_symbol() == productionset.initialsymbol and lritem.next_symbol() == EndSymbol(): result.append(itemindex, symbol, "Accept", None) numberoptions += 1 if not numberoptions: LOG.info("No rule found to generate a new parsertable entry ") LOG.debug("symbol: " + str(symbol)) LOG.debug("itemset: " + str(itemset)) elif numberoptions > 1: #FIXME can it count duplicated entries? raise Exception("LR Conflict %s" % symbol) return result
[ "def", "_slr_build_parser_table", "(", "productionset", ")", ":", "result", "=", "ParserTable", "(", ")", "statesset", "=", "build_states_sets", "(", "productionset", ")", "for", "itemindex", ",", "itemset", "in", "enumerate", "(", "statesset", ")", ":", "LOG", ...
67.473684
31.210526
def read_env(path=None, recurse=True, stream=None, verbose=False, override=False): """Read a .env file into os.environ. If .env is not found in the directory from which this method is called, the default behavior is to recurse up the directory tree until a .env file is found. If you do not wish to recurse up the tree, you may pass False as a second positional argument. """ # By default, start search from the same file this function is called if path is None: frame = inspect.currentframe().f_back caller_dir = os.path.dirname(frame.f_code.co_filename) start = os.path.join(os.path.abspath(caller_dir)) else: start = path if recurse: for dirname in _walk_to_root(start): check_path = os.path.join(dirname, ".env") if os.path.exists(check_path): return load_dotenv( check_path, stream=stream, verbose=verbose, override=override ) else: if path is None: start = os.path.join(start, ".env") return load_dotenv(start, stream=stream, verbose=verbose, override=override)
[ "def", "read_env", "(", "path", "=", "None", ",", "recurse", "=", "True", ",", "stream", "=", "None", ",", "verbose", "=", "False", ",", "override", "=", "False", ")", ":", "# By default, start search from the same file this function is called", "if", "path", "i...
47.192308
21.923077
def start(cls, settings=None): """ RUN ME FIRST TO SETUP THE THREADED LOGGING http://victorlin.me/2012/08/good-logging-practice-in-python/ log - LIST OF PARAMETERS FOR LOGGER(S) trace - SHOW MORE DETAILS IN EVERY LOG LINE (default False) cprofile - True==ENABLE THE C-PROFILER THAT COMES WITH PYTHON (default False) USE THE LONG FORM TO SET THE FILENAME {"enabled": True, "filename": "cprofile.tab"} profile - True==ENABLE pyLibrary SIMPLE PROFILING (default False) (eg with Profiler("some description"):) USE THE LONG FORM TO SET FILENAME {"enabled": True, "filename": "profile.tab"} constants - UPDATE MODULE CONSTANTS AT STARTUP (PRIMARILY INTENDED TO CHANGE DEBUG STATE) """ global _Thread if not settings: return settings = wrap(settings) Log.stop() cls.settings = settings cls.trace = coalesce(settings.trace, False) if cls.trace: from mo_threads import Thread as _Thread _ = _Thread # ENABLE CPROFILE if settings.cprofile is False: settings.cprofile = {"enabled": False} elif settings.cprofile is True: if isinstance(settings.cprofile, bool): settings.cprofile = {"enabled": True, "filename": "cprofile.tab"} if settings.cprofile.enabled: from mo_threads import profiles profiles.enable_profilers(settings.cprofile.filename) if settings.profile is True or (is_data(settings.profile) and settings.profile.enabled): Log.error("REMOVED 2018-09-02, Activedata revision 3f30ff46f5971776f8ba18") # from mo_logs import profiles # # if isinstance(settings.profile, bool): # profiles.ON = True # settings.profile = {"enabled": True, "filename": "profile.tab"} # # if settings.profile.enabled: # profiles.ON = True if settings.constants: constants.set(settings.constants) if settings.log: cls.logging_multi = StructuredLogger_usingMulti() for log in listwrap(settings.log): Log.add_log(Log.new_instance(log)) from mo_logs.log_usingThread import StructuredLogger_usingThread cls.main_log = StructuredLogger_usingThread(cls.logging_multi)
[ "def", "start", "(", "cls", ",", "settings", "=", "None", ")", ":", "global", "_Thread", "if", "not", "settings", ":", "return", "settings", "=", "wrap", "(", "settings", ")", "Log", ".", "stop", "(", ")", "cls", ".", "settings", "=", "settings", "cl...
42.421053
23.403509
def csch(x, context=None): """ Return the hyperbolic cosecant of x. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_csch, (BigFloat._implicit_convert(x),), context, )
[ "def", "csch", "(", "x", ",", "context", "=", "None", ")", ":", "return", "_apply_function_in_current_context", "(", "BigFloat", ",", "mpfr", ".", "mpfr_csch", ",", "(", "BigFloat", ".", "_implicit_convert", "(", "x", ")", ",", ")", ",", "context", ",", ...
20.727273
14.545455
def get_agents(self, addr=True, agent_cls=None, as_coro=False): """Get agents from the slave environments. :param bool addr: If ``True``, returns only addresses of the agents, otherwise returns a :class:`Proxy` object for each agent. :param agent_cls: If specified, returns only agents that are members of that particular class. :param bool as_coro: If ``True``, returns a coroutine, otherwise runs the method in an event loop. :returns: A coroutine or list of :class:`Proxy` objects or addresses as specified by the input parameters. Slave environment managers are excluded from the returned list by default. Essentially, this method calls each slave environment manager's :meth:`creamas.mp.EnvManager.get_agents` asynchronously. .. note:: Calling each slave environment's manager might be costly in some situations. Therefore, it is advisable to store the returned agent list if the agent sets in the slave environments are not bound to change. """ async def slave_task(mgr_addr, addr=True, agent_cls=None): r_manager = await self.env.connect(mgr_addr, timeout=TIMEOUT) return await r_manager.get_agents(addr=addr, agent_cls=agent_cls) tasks = create_tasks(slave_task, self.addrs, addr, agent_cls) return run_or_coro(tasks, as_coro)
[ "def", "get_agents", "(", "self", ",", "addr", "=", "True", ",", "agent_cls", "=", "None", ",", "as_coro", "=", "False", ")", ":", "async", "def", "slave_task", "(", "mgr_addr", ",", "addr", "=", "True", ",", "agent_cls", "=", "None", ")", ":", "r_ma...
41.083333
26.833333
def file_matches(filename, patterns): """Does this filename match any of the patterns?""" return any(fnmatch.fnmatch(filename, pat) or fnmatch.fnmatch(os.path.basename(filename), pat) for pat in patterns)
[ "def", "file_matches", "(", "filename", ",", "patterns", ")", ":", "return", "any", "(", "fnmatch", ".", "fnmatch", "(", "filename", ",", "pat", ")", "or", "fnmatch", ".", "fnmatch", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", ...
47.6
7.8
def confirm_deliveries(self): """Set the channel to confirm that each message has been successfully delivered. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ self._confirming_deliveries = True confirm_frame = specification.Confirm.Select() return self.rpc_request(confirm_frame)
[ "def", "confirm_deliveries", "(", "self", ")", ":", "self", ".", "_confirming_deliveries", "=", "True", "confirm_frame", "=", "specification", ".", "Confirm", ".", "Select", "(", ")", "return", "self", ".", "rpc_request", "(", "confirm_frame", ")" ]
37.615385
17.076923
def delete_os_dummy_rtr(self, tenant_id, fw_dict, is_fw_virt=False): """Delete the Openstack Dummy router and store the info in DB. """ ret = True tenant_name = fw_dict.get('tenant_name') try: rtr_id = fw_dict.get('router_id') if not rtr_id: LOG.error("Invalid router id, deleting dummy interface" " failed") return False if not is_fw_virt: ret = self._delete_dummy_intf_rtr(tenant_id, tenant_name, rtr_id) except Exception as exc: # Function _attach_dummy_intf_rtr already took care of # cleanup for error cases. LOG.error("Deletion of Openstack Router failed tenant " "%(tenant)s, Exception %(exc)s", {'tenant': tenant_id, 'exc': str(exc)}) ret = False if ret: res = fw_const.OS_DUMMY_RTR_DEL_SUCCESS else: res = fw_const.OS_DUMMY_RTR_DEL_FAIL self.update_fw_db_result(tenant_id, os_status=res) return ret
[ "def", "delete_os_dummy_rtr", "(", "self", ",", "tenant_id", ",", "fw_dict", ",", "is_fw_virt", "=", "False", ")", ":", "ret", "=", "True", "tenant_name", "=", "fw_dict", ".", "get", "(", "'tenant_name'", ")", "try", ":", "rtr_id", "=", "fw_dict", ".", "...
43.461538
16.846154
def parse_trips(self, xml, requested_time): """ Parse the NS API xml result into Trip objects """ obj = xmltodict.parse(xml) trips = [] if 'error' in obj: print('Error in trips: ' + obj['error']['message']) return None try: for trip in obj['ReisMogelijkheden']['ReisMogelijkheid']: newtrip = Trip(trip, requested_time) trips.append(newtrip) except TypeError: # If no options are found, obj['ReisMogelijkheden'] is None return None return trips
[ "def", "parse_trips", "(", "self", ",", "xml", ",", "requested_time", ")", ":", "obj", "=", "xmltodict", ".", "parse", "(", "xml", ")", "trips", "=", "[", "]", "if", "'error'", "in", "obj", ":", "print", "(", "'Error in trips: '", "+", "obj", "[", "'...
29.55
18.65
def contains_empty(features): """Check features data are not empty :param features: The features data to check. :type features: list of numpy arrays. :return: True if one of the array is empty, False else. """ if not features: return True for feature in features: if feature.shape[0] == 0: return True return False
[ "def", "contains_empty", "(", "features", ")", ":", "if", "not", "features", ":", "return", "True", "for", "feature", "in", "features", ":", "if", "feature", ".", "shape", "[", "0", "]", "==", "0", ":", "return", "True", "return", "False" ]
24.2
17.333333
def max_runs_reached(self): """ :return: whether all file paths have been processed max_runs times """ if self._max_runs == -1: # Unlimited runs. return False for file_path in self._file_paths: if self._run_count[file_path] < self._max_runs: return False if self._run_count[self._heart_beat_key] < self._max_runs: return False return True
[ "def", "max_runs_reached", "(", "self", ")", ":", "if", "self", ".", "_max_runs", "==", "-", "1", ":", "# Unlimited runs.", "return", "False", "for", "file_path", "in", "self", ".", "_file_paths", ":", "if", "self", ".", "_run_count", "[", "file_path", "]"...
36.333333
14.166667
def main(args=None): """Main""" vs = [(v-100)*0.001 for v in range(200)] for f in ['IM.channel.nml','Kd.channel.nml']: nml_doc = pynml.read_neuroml2_file(f) for ct in nml_doc.ComponentType: ys = [] for v in vs: req_variables = {'v':'%sV'%v,'vShift':'10mV'} vals = pynml.evaluate_component(ct,req_variables=req_variables) print vals if 'x' in vals: ys.append(vals['x']) if 't' in vals: ys.append(vals['t']) if 'r' in vals: ys.append(vals['r']) ax = pynml.generate_plot([vs],[ys], "Some traces from %s in %s"%(ct.name,f), show_plot_already=False ) print vals plt.show()
[ "def", "main", "(", "args", "=", "None", ")", ":", "vs", "=", "[", "(", "v", "-", "100", ")", "*", "0.001", "for", "v", "in", "range", "(", "200", ")", "]", "for", "f", "in", "[", "'IM.channel.nml'", ",", "'Kd.channel.nml'", "]", ":", "nml_doc", ...
30.103448
19
def ivorn_prefix_present(session, ivorn_prefix): """ Predicate, returns whether there is an entry in the database with matching IVORN prefix. """ n_matches = session.query(Voevent.ivorn).filter( Voevent.ivorn.like('{}%'.format(ivorn_prefix))).count() return bool(n_matches)
[ "def", "ivorn_prefix_present", "(", "session", ",", "ivorn_prefix", ")", ":", "n_matches", "=", "session", ".", "query", "(", "Voevent", ".", "ivorn", ")", ".", "filter", "(", "Voevent", ".", "ivorn", ".", "like", "(", "'{}%'", ".", "format", "(", "ivorn...
37.75
15.25
def push_concurrency_history_item(self, state, number_concurrent_threads): """Adds a new concurrency-history-item to the history item list A concurrent history item stores information about the point in time where a certain number of states is launched concurrently (e.g. in a barrier concurrency state). :param state: the state that launches the state group :param number_concurrent_threads: the number of states that are launched """ last_history_item = self.get_last_history_item() return_item = ConcurrencyItem(state, self.get_last_history_item(), number_concurrent_threads, state.run_id, self.execution_history_storage) return self._push_item(last_history_item, return_item)
[ "def", "push_concurrency_history_item", "(", "self", ",", "state", ",", "number_concurrent_threads", ")", ":", "last_history_item", "=", "self", ".", "get_last_history_item", "(", ")", "return_item", "=", "ConcurrencyItem", "(", "state", ",", "self", ".", "get_last_...
54.866667
26.866667
def _add_process_guards(self, engine): """Add multiprocessing guards. Forces a connection to be reconnected if it is detected as having been shared to a sub-process. """ @sqlalchemy.event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): connection_record.info['pid'] = os.getpid() @sqlalchemy.event.listens_for(engine, "checkout") def checkout(dbapi_connection, connection_record, connection_proxy): pid = os.getpid() if connection_record.info['pid'] != pid: self.logger.debug( "Parent process %(orig)s forked (%(newproc)s) with an open database connection, " "which is being discarded and recreated." % {"newproc": pid, "orig": connection_record.info['pid']}) connection_record.connection = connection_proxy.connection = None raise exc.DisconnectionError( "Connection record belongs to pid %s, attempting to check out in pid %s" % ( connection_record.info['pid'], pid) )
[ "def", "_add_process_guards", "(", "self", ",", "engine", ")", ":", "@", "sqlalchemy", ".", "event", ".", "listens_for", "(", "engine", ",", "\"connect\"", ")", "def", "connect", "(", "dbapi_connection", ",", "connection_record", ")", ":", "connection_record", ...
47.166667
25.291667
def is_resource_protected(self, request, **kwargs): """ Returns true if and only if the resource's URL is *not* exempt and *is* protected. """ exempt_urls = self.get_exempt_url_patterns() protected_urls = self.get_protected_url_patterns() path = request.path_info.lstrip('/') path_is_exempt = any(m.match(path) for m in exempt_urls) if path_is_exempt: return False path_is_protected = any(m.match(path) for m in protected_urls) if path_is_protected: return True return False
[ "def", "is_resource_protected", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "exempt_urls", "=", "self", ".", "get_exempt_url_patterns", "(", ")", "protected_urls", "=", "self", ".", "get_protected_url_patterns", "(", ")", "path", "=", "requ...
33.705882
20.411765
def move(self, entries, directory): """ Move one or more entries (file or directory) to the destination directory :param list entries: a list of source entries (:class:`.BaseFile` object) :param directory: destination directory :return: whether the action is successful :raise: :class:`.APIError` if something bad happened """ fcids = [] for entry in entries: if isinstance(entry, File): fcid = entry.fid elif isinstance(entry, Directory): fcid = entry.cid else: raise APIError('Invalid BaseFile instance for an entry.') fcids.append(fcid) if not isinstance(directory, Directory): raise APIError('Invalid destination directory.') if self._req_files_move(directory.cid, fcids): for entry in entries: if isinstance(entry, File): entry.cid = directory.cid entry.reload() return True else: raise APIError('Error moving entries.')
[ "def", "move", "(", "self", ",", "entries", ",", "directory", ")", ":", "fcids", "=", "[", "]", "for", "entry", "in", "entries", ":", "if", "isinstance", "(", "entry", ",", "File", ")", ":", "fcid", "=", "entry", ".", "fid", "elif", "isinstance", "...
36.966667
14.433333
def value( self, node, parent=None ): """Return value used to compare size of this node""" # this is the *weighted* size/contribution of the node try: return node['contribution'] except KeyError, err: contribution = int(node.get('totsize',0)/float( len(node.get('parents',())) or 1)) node['contribution'] = contribution return contribution
[ "def", "value", "(", "self", ",", "node", ",", "parent", "=", "None", ")", ":", "# this is the *weighted* size/contribution of the node ", "try", ":", "return", "node", "[", "'contribution'", "]", "except", "KeyError", ",", "err", ":", "contribution", "=", "int"...
45.777778
15.111111
def schedule_hostgroup_host_downtime(self, hostgroup, start_time, end_time, fixed, trigger_id, duration, author, comment): """Schedule a downtime for each host of a hostgroup Format of the line that triggers function call:: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :param hostgroup: hostgroup to schedule :type hostgroup: alignak.objects.hostgroup.Hostgroup :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: host = self.daemon.hosts[host_id] self.schedule_host_downtime(host, start_time, end_time, fixed, trigger_id, duration, author, comment)
[ "def", "schedule_hostgroup_host_downtime", "(", "self", ",", "hostgroup", ",", "start_time", ",", "end_time", ",", "fixed", ",", "trigger_id", ",", "duration", ",", "author", ",", "comment", ")", ":", "for", "host_id", "in", "hostgroup", ".", "get_hosts", "(",...
43.16129
16.83871
def requires_authentication(fn): """ Requires that the calling Subject be authenticated before allowing access. """ @functools.wraps(fn) def wrap(*args, **kwargs): subject = WebYosai.get_current_subject() if not subject.authenticated: msg = "The current Subject is not authenticated. ACCESS DENIED." raise WebYosai.get_current_webregistry().raise_unauthorized(msg) return fn(*args, **kwargs) return wrap
[ "def", "requires_authentication", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "wrap", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "subject", "=", "WebYosai", ".", "get_current_subject", "(", ")", "if", "not", ...
33.933333
20.333333