text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def parameters(self) -> List['Parameter']: """Return a list of parameter objects.""" _lststr = self._lststr _type_to_spans = self._type_to_spans return [ Parameter(_lststr, _type_to_spans, span, 'Parameter') for span in self._subspans('Parameter')]
[ "def", "parameters", "(", "self", ")", "->", "List", "[", "'Parameter'", "]", ":", "_lststr", "=", "self", ".", "_lststr", "_type_to_spans", "=", "self", ".", "_type_to_spans", "return", "[", "Parameter", "(", "_lststr", ",", "_type_to_spans", ",", "span", ...
42.571429
11
def get_placeholder_cache_key_for_parent(parent_object, placeholder_name, language_code): """ Return a cache key for a placeholder. This key is used to cache the entire output of a placeholder. """ parent_type = ContentType.objects.get_for_model(parent_object) return _get_placeholder_cache_key_for_id( parent_type.id, parent_object.pk, placeholder_name, language_code )
[ "def", "get_placeholder_cache_key_for_parent", "(", "parent_object", ",", "placeholder_name", ",", "language_code", ")", ":", "parent_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "parent_object", ")", "return", "_get_placeholder_cache_key_for_id", ...
32.230769
19
def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() page = self.get_page(self.base_url) if not page: raise DistlibException('Unable to get %s' % self.base_url) for match in self._distname_re.finditer(page.data): result.add(match.group(1)) return result
[ "def", "get_distribution_names", "(", "self", ")", ":", "result", "=", "set", "(", ")", "page", "=", "self", ".", "get_page", "(", "self", ".", "base_url", ")", "if", "not", "page", ":", "raise", "DistlibException", "(", "'Unable to get %s'", "%", "self", ...
35.636364
12.909091
def _parseDeclaration(self, src): """declaration : ident S* ':' S* expr prio? | /* empty */ ; """ # property propertyName, src = self._getIdent(src) if propertyName is not None: src = src.lstrip() # S* : S* if src[:1] in (':', '='): # Note: we are being fairly flexable here... technically, the # ":" is *required*, but in the name of flexibility we # suppor a null transition, as well as an "=" transition src = src[1:].lstrip() src, property = self._parseDeclarationProperty(src, propertyName) else: property = None return src, property
[ "def", "_parseDeclaration", "(", "self", ",", "src", ")", ":", "# property", "propertyName", ",", "src", "=", "self", ".", "_getIdent", "(", "src", ")", "if", "propertyName", "is", "not", "None", ":", "src", "=", "src", ".", "lstrip", "(", ")", "# S* :...
31.478261
19
def sslv2_derive_keys(self, key_material): """ There is actually only one key, the CLIENT-READ-KEY or -WRITE-KEY. Note that skip_first is opposite from the one with SSLv3 derivation. Also, if needed, the IV should be set elsewhere. """ skip_first = True if ((self.connection_end == "client" and self.row == "read") or (self.connection_end == "server" and self.row == "write")): skip_first = False cipher_alg = self.ciphersuite.cipher_alg start = 0 if skip_first: start += cipher_alg.key_len end = start + cipher_alg.key_len cipher_secret = key_material[start:end] self.cipher = cipher_alg(cipher_secret) self.debug_repr("cipher_secret", cipher_secret)
[ "def", "sslv2_derive_keys", "(", "self", ",", "key_material", ")", ":", "skip_first", "=", "True", "if", "(", "(", "self", ".", "connection_end", "==", "\"client\"", "and", "self", ".", "row", "==", "\"read\"", ")", "or", "(", "self", ".", "connection_end"...
35.727273
19
def grid_1d(self): """ The arc second-grid of (y,x) coordinates of every pixel. This is defined from the top-left corner, such that the first pixel at location [0, 0] will have a negative x \ value y value in arc seconds. """ return grid_util.regular_grid_1d_from_shape_pixel_scales_and_origin(shape=self.shape, pixel_scales=self.pixel_scales, origin=self.origin)
[ "def", "grid_1d", "(", "self", ")", ":", "return", "grid_util", ".", "regular_grid_1d_from_shape_pixel_scales_and_origin", "(", "shape", "=", "self", ".", "shape", ",", "pixel_scales", "=", "self", ".", "pixel_scales", ",", "origin", "=", "self", ".", "origin", ...
60.888889
35.444444
def dist_dir(self): '''The dist dir at which to place the finished distribution.''' if self.distribution is None: warning('Tried to access {}.dist_dir, but {}.distribution ' 'is None'.format(self, self)) exit(1) return self.distribution.dist_dir
[ "def", "dist_dir", "(", "self", ")", ":", "if", "self", ".", "distribution", "is", "None", ":", "warning", "(", "'Tried to access {}.dist_dir, but {}.distribution '", "'is None'", ".", "format", "(", "self", ",", "self", ")", ")", "exit", "(", "1", ")", "ret...
43.857143
16.714286
def save(self, filething=None, v2_version=4, v23_sep='/', padding=None): """Save ID3v2 data to the AIFF file""" fileobj = filething.fileobj iff_file = IFFFile(fileobj) if u'ID3' not in iff_file: iff_file.insert_chunk(u'ID3') chunk = iff_file[u'ID3'] try: data = self._prepare_data( fileobj, chunk.data_offset, chunk.data_size, v2_version, v23_sep, padding) except ID3Error as e: reraise(error, e, sys.exc_info()[2]) new_size = len(data) new_size += new_size % 2 # pad byte assert new_size % 2 == 0 chunk.resize(new_size) data += (new_size - len(data)) * b'\x00' assert new_size == len(data) chunk.write(data)
[ "def", "save", "(", "self", ",", "filething", "=", "None", ",", "v2_version", "=", "4", ",", "v23_sep", "=", "'/'", ",", "padding", "=", "None", ")", ":", "fileobj", "=", "filething", ".", "fileobj", "iff_file", "=", "IFFFile", "(", "fileobj", ")", "...
29.615385
17.153846
def update_input(filelist, ivmlist=None, removed_files=None): """ Removes files flagged to be removed from the input filelist. Removes the corresponding ivm files if present. """ newfilelist = [] if removed_files == []: return filelist, ivmlist else: sci_ivm = list(zip(filelist, ivmlist)) for f in removed_files: result=[sci_ivm.remove(t) for t in sci_ivm if t[0] == f ] ivmlist = [el[1] for el in sci_ivm] newfilelist = [el[0] for el in sci_ivm] return newfilelist, ivmlist
[ "def", "update_input", "(", "filelist", ",", "ivmlist", "=", "None", ",", "removed_files", "=", "None", ")", ":", "newfilelist", "=", "[", "]", "if", "removed_files", "==", "[", "]", ":", "return", "filelist", ",", "ivmlist", "else", ":", "sci_ivm", "=",...
34.3125
14.1875
def magic_read_dict(path, data=None, sort_by_this_name=None, return_keys=False): """ Read a magic-formatted tab-delimited file and return a dictionary of dictionaries, with this format: {'Z35.5a': {'specimen_weight': '1.000e-03', 'er_citation_names': 'This study', 'specimen_volume': '', 'er_location_name': '', 'er_site_name': 'Z35.', 'er_sample_name': 'Z35.5', 'specimen_class': '', 'er_specimen_name': 'Z35.5a', 'specimen_lithology': '', 'specimen_type': ''}, ....} return data, file_type, and keys (if return_keys is true) """ DATA = {} #fin = open(path, 'r') #first_line = fin.readline() lines = open_file(path) if not lines: if return_keys: return {}, 'empty_file', None else: return {}, 'empty_file' first_line = lines.pop(0) if first_line[0] == "s" or first_line[1] == "s": delim = ' ' elif first_line[0] == "t" or first_line[1] == "t": delim = '\t' else: print('-W- error reading ', path) if return_keys: return {}, 'bad_file', None else: return {}, 'bad_file' file_type = first_line.strip('\n').strip('\r').split(delim)[1] item_type = file_type #item_type = file_type.split('_')[1][:-1] if sort_by_this_name: pass elif item_type == 'age': sort_by_this_name = "by_line_number" else: sort_by_this_name = item_type line = lines.pop(0) header = line.strip('\n').strip('\r').split(delim) counter = 0 for line in lines: tmp_data = {} tmp_line = line.strip('\n').strip('\r').split(delim) for i in range(len(header)): if i < len(tmp_line): tmp_data[header[i]] = tmp_line[i].strip() else: tmp_data[header[i]] = "" if sort_by_this_name == "by_line_number": DATA[counter] = tmp_data counter += 1 else: if tmp_data[sort_by_this_name] != "": DATA[tmp_data[sort_by_this_name]] = tmp_data if return_keys: return DATA, file_type, header else: return DATA, file_type
[ "def", "magic_read_dict", "(", "path", ",", "data", "=", "None", ",", "sort_by_this_name", "=", "None", ",", "return_keys", "=", "False", ")", ":", "DATA", "=", "{", "}", "#fin = open(path, 'r')", "#first_line = fin.readline()", "lines", "=", "open_file", "(", ...
35.644068
19.881356
def loads(astring): """Decompress and deserialize string into Python object via pickle.""" try: return pickle.loads(zlib.decompress(astring)) except zlib.error as e: raise SerializerError( 'Cannot decompress object ("{}")'.format(str(e)) ) except pickle.UnpicklingError as e: raise SerializerError( 'Cannot restore object ("{}")'.format(str(e)) )
[ "def", "loads", "(", "astring", ")", ":", "try", ":", "return", "pickle", ".", "loads", "(", "zlib", ".", "decompress", "(", "astring", ")", ")", "except", "zlib", ".", "error", "as", "e", ":", "raise", "SerializerError", "(", "'Cannot decompress object (\...
38.25
15.75
def _delete_network(self, network_info): """Send network delete request to DCNM. :param network_info: contains network info to be deleted. """ org_name = network_info.get('organizationName', '') part_name = network_info.get('partitionName', '') segment_id = network_info['segmentId'] if 'mobDomainName' in network_info: vlan_id = network_info['vlanId'] mob_dom_name = network_info['mobDomainName'] url = self._network_mob_url % (org_name, part_name, vlan_id, mob_dom_name) else: url = self._network_url % (org_name, part_name, segment_id) return self._send_request('DELETE', url, '', 'network')
[ "def", "_delete_network", "(", "self", ",", "network_info", ")", ":", "org_name", "=", "network_info", ".", "get", "(", "'organizationName'", ",", "''", ")", "part_name", "=", "network_info", ".", "get", "(", "'partitionName'", ",", "''", ")", "segment_id", ...
46.4375
16.1875
def fit(self, X, y): """Build an accelerated failure time model. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data matrix. y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. Returns ------- self """ X, event, time = check_arrays_survival(X, y) weights = ipc_weights(event, time) super().fit(X, numpy.log(time), sample_weight=weights) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", ")", ":", "X", ",", "event", ",", "time", "=", "check_arrays_survival", "(", "X", ",", "y", ")", "weights", "=", "ipc_weights", "(", "event", ",", "time", ")", "super", "(", ")", ".", "fit", "(", "...
27.217391
22.26087
def transform_describe(self, node, describes, context_variable): """ Transform a describe node into a ``TestCase``. ``node`` is the node object. ``describes`` is the name of the object being described. ``context_variable`` is the name bound in the context manager (usually "it"). """ body = self.transform_describe_body(node.body, context_variable) return ast.ClassDef( name="Test" + describes.title(), bases=[ast.Name(id="TestCase", ctx=ast.Load())], keywords=[], starargs=None, kwargs=None, body=list(body), decorator_list=[], )
[ "def", "transform_describe", "(", "self", ",", "node", ",", "describes", ",", "context_variable", ")", ":", "body", "=", "self", ".", "transform_describe_body", "(", "node", ".", "body", ",", "context_variable", ")", "return", "ast", ".", "ClassDef", "(", "n...
32.238095
19.857143
def FDMT_iteration(datain, maxDT, nchan0, f_min, f_max, iteration_num, dataType): """ Input: Input - 3d array, with dimensions [nint, N_d0, nbl, nchan, npol] f_min,f_max - are the base-band begin and end frequencies. The frequencies can be entered in both MHz and GHz, units are factored out in all uses. maxDT - the maximal delay (in time bins) of the maximal dispersion. Appears in the paper as N_{\Delta} A typical input is maxDT = N_f dataType - To naively use FFT, one must use floating point types. Due to casting, use either complex64 or complex128. iteration num - Algorithm works in log2(Nf) iterations, each iteration changes all the sizes (like in FFT) Output: 5d array, with dimensions [nint, N_d1, nbl, nchan/2, npol] where N_d1 is the maximal number of bins the dispersion curve travels at one output frequency band For details, see algorithm 1 in Zackay & Ofek (2014) """ nint, dT, nbl, nchan, npol = datain.shape # output_dims = list(input_dims) deltaF = 2**(iteration_num) * (f_max - f_min)/float(nchan0) dF = (f_max - f_min)/float(nchan0) # the maximum deltaT needed to calculate at the i'th iteration deltaT = int(np.ceil((maxDT-1) *(1./f_min**2 - 1./(f_min + deltaF)**2) / (1./f_min**2 - 1./f_max**2))) logger.debug("deltaT = {0}".format(deltaT)) logger.debug("N_f = {0}".format(nchan0/2**(iteration_num))) dataout = np.zeros((nint, deltaT+1, nbl, nchan/2, npol), dataType) logger.debug('input_dims = {0}'.format(datain.shape)) logger.debug('output_dims = {0}'.format(dataout.shape)) # No negative D's are calculated => no shift is needed # If you want negative dispersions, this will have to change to 1+deltaT,1+deltaTOld # Might want to calculate negative dispersions when using coherent dedispersion, to reduce the number of trial dispersions by a factor of 2 (reducing the complexity of the coherent part of the hybrid) ShiftOutput = 0 ShiftInput = 0 F_jumps = nchan/2 # For some situations, it is beneficial to play with this correction. # When applied to real data, one should carefully analyze and understand the effect of # this correction on the pulse he is looking for (especially if convolving with a specific pulse profile) if iteration_num>0: correction = dF/2. else: correction = 0 for i_F in range(F_jumps): f_start = (f_max - f_min)/float(F_jumps) * (i_F) + f_min f_end = (f_max - f_min)/float(F_jumps) *(i_F+1) + f_min f_middle = (f_end - f_start)/2. + f_start - correction # it turned out in the end, that putting the correction +dF to f_middle_larger (or -dF/2 to f_middle, and +dF/2 to f_middle larger) # is less sensitive than doing nothing when dedispersing a coherently dispersed pulse. # The confusing part is that the hitting efficiency is better with the corrections (!?!). f_middle_larger = (f_end - f_start)/2 + f_start + correction deltaTLocal = int(np.ceil((maxDT-1) *(1./f_start**2 - 1./(f_end)**2) / (1./f_min**2 - 1./f_max**2))) logger.debug('deltaT {0} deltaTLocal {1}'.format(deltaT, deltaTLocal)) for i_dT in range(deltaTLocal+1): dT_middle = int(round(i_dT * (1./f_middle**2 - 1./f_start**2)/(1./f_end**2 - 1./f_start**2))) dT_middle_index = dT_middle + ShiftInput dT_middle_larger = int(round(i_dT * (1./f_middle_larger**2 - 1./f_start**2)/(1./f_end**2 - 1./f_start**2))) dT_rest = i_dT - dT_middle_larger dT_rest_index = dT_rest + ShiftInput logger.debug('{0}:{1}, {2}+{3}, {4} <= {5}, {6}'.format(i_T_min, i_T_max, i_dT, ShiftOutput, i_F, dT_middle_index, 2*i_F)) # out of bounds data? i_T_min = 0 i_T_max = dT_middle_larger dataout[i_T_min:i_T_max, i_dT + ShiftOutput, :, i_F, :] = datain[i_T_min:i_T_max, dT_middle_index, :, 2*i_F, :] # fully dedispersed data i_T_min = dT_middle_larger i_T_max = nint dataout[i_T_min:i_T_max, i_dT + ShiftOutput, :, i_F, :] = datain[i_T_min:i_T_max, dT_middle_index, :, 2*i_F, :] + datain[i_T_min - dT_middle_larger:i_T_max-dT_middle_larger, dT_rest_index, :, 2*i_F+1, :] return dataout
[ "def", "FDMT_iteration", "(", "datain", ",", "maxDT", ",", "nchan0", ",", "f_min", ",", "f_max", ",", "iteration_num", ",", "dataType", ")", ":", "nint", ",", "dT", ",", "nbl", ",", "nchan", ",", "npol", "=", "datain", ".", "shape", "# output_dims = l...
51.941176
34.741176
def rejection_sample(self, evidence=None, size=1, return_type="dataframe"): """ Generates sample(s) from joint distribution of the bayesian network, given the evidence. Parameters ---------- evidence: list of `pgmpy.factor.State` namedtuples None if no evidence size: int size of sample to be generated return_type: string (dataframe | recarray) Return type for samples, either of 'dataframe' or 'recarray'. Defaults to 'dataframe' Returns ------- sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument the generated samples Examples -------- >>> from pgmpy.models.BayesianModel import BayesianModel >>> from pgmpy.factors.discrete import TabularCPD >>> from pgmpy.factors.discrete import State >>> from pgmpy.sampling import BayesianModelSampling >>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')]) >>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]]) >>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]]) >>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, ... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]], ... ['intel', 'diff'], [2, 2]) >>> student.add_cpds(cpd_d, cpd_i, cpd_g) >>> inference = BayesianModelSampling(student) >>> evidence = [State(var='diff', state=0)] >>> inference.rejection_sample(evidence=evidence, size=2, return_type='dataframe') intel diff grade 0 0 0 1 1 0 0 1 """ if evidence is None: return self.forward_sample(size) types = [(var_name, 'int') for var_name in self.topological_order] sampled = np.zeros(0, dtype=types).view(np.recarray) prob = 1 i = 0 while i < size: _size = int(((size - i) / prob) * 1.5) _sampled = self.forward_sample(_size, 'recarray') for evid in evidence: _sampled = _sampled[_sampled[evid[0]] == evid[1]] prob = max(len(_sampled) / _size, 0.01) sampled = np.append(sampled, _sampled)[:size] i += len(_sampled) return _return_samples(return_type, sampled)
[ "def", "rejection_sample", "(", "self", ",", "evidence", "=", "None", ",", "size", "=", "1", ",", "return_type", "=", "\"dataframe\"", ")", ":", "if", "evidence", "is", "None", ":", "return", "self", ".", "forward_sample", "(", "size", ")", "types", "=",...
40.322034
20.830508
def is_active_trail(self, start, end, observed=None): """ Returns True if there is any active trail between start and end node Parameters ---------- start : Graph Node end : Graph Node observed : List of nodes (optional) If given the active trail would be computed assuming these nodes to be observed. additional_observed : List of nodes (optional) If given the active trail would be computed assuming these nodes to be observed along with the nodes marked as observed in the model. Examples -------- >>> from pgmpy.base import DAG >>> student = DAG() >>> student.add_nodes_from(['diff', 'intel', 'grades', 'letter', 'sat']) >>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades'), ('grades', 'letter'), ... ('intel', 'sat')]) >>> student.is_active_trail('diff', 'intel') False >>> student.is_active_trail('grades', 'sat') True """ if end in self.active_trail_nodes(start, observed)[start]: return True else: return False
[ "def", "is_active_trail", "(", "self", ",", "start", ",", "end", ",", "observed", "=", "None", ")", ":", "if", "end", "in", "self", ".", "active_trail_nodes", "(", "start", ",", "observed", ")", "[", "start", "]", ":", "return", "True", "else", ":", ...
41.642857
21.714286
def add_edge(self, start, end, **kwargs): """ Add an edge between two nodes. The nodes will be automatically added if they are not present in the network. Parameters ---------- start: tuple Both the start and end nodes should specify the time slice as (node_name, time_slice). Here, node_name can be any hashable python object while the time_slice is an integer value, which denotes the time slice that the node belongs to. end: tuple Both the start and end nodes should specify the time slice as (node_name, time_slice). Here, node_name can be any hashable python object while the time_slice is an integer value, which denotes the time slice that the node belongs to. Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> model = DBN() >>> model.add_nodes_from(['D', 'I']) >>> model.add_edge(('D',0), ('I',0)) >>> sorted(model.edges()) [(('D', 0), ('I', 0)), (('D', 1), ('I', 1))] """ try: if len(start) != 2 or len(end) != 2: raise ValueError('Nodes must be of type (node, time_slice).') elif not isinstance(start[1], int) or not isinstance(end[1], int): raise ValueError('Nodes must be of type (node, time_slice).') elif start[1] == end[1]: start = (start[0], 0) end = (end[0], 0) elif start[1] == end[1] - 1: start = (start[0], 0) end = (end[0], 1) elif start[1] > end[1]: raise NotImplementedError('Edges in backward direction are not allowed.') elif start[1] != end[1]: raise ValueError("Edges over multiple time slices is not currently supported") except TypeError: raise ValueError('Nodes must be of type (node, time_slice).') if start == end: raise ValueError('Self Loops are not allowed') elif start in super(DynamicBayesianNetwork, self).nodes() and end \ in super(DynamicBayesianNetwork, self).nodes() and \ nx.has_path(self, end, start): raise ValueError('Loops are not allowed. Adding the edge from ({start} --> {end}) forms a loop.'.format( start=str(start), end=str(end))) super(DynamicBayesianNetwork, self).add_edge(start, end, **kwargs) if start[1] == end[1]: super(DynamicBayesianNetwork, self).add_edge((start[0], 1 - start[1]), (end[0], 1 - end[1])) else: super(DynamicBayesianNetwork, self).add_node((end[0], 1 - end[1]))
[ "def", "add_edge", "(", "self", ",", "start", ",", "end", ",", "*", "*", "kwargs", ")", ":", "try", ":", "if", "len", "(", "start", ")", "!=", "2", "or", "len", "(", "end", ")", "!=", "2", ":", "raise", "ValueError", "(", "'Nodes must be of type (n...
44.754098
24.295082
def skip_regex(lines, options): """ Optionally exclude lines that match '--skip-requirements-regex' """ skip_regex = options.skip_requirements_regex if options else None if skip_regex: lines = filterfalse(re.compile(skip_regex).search, lines) return lines
[ "def", "skip_regex", "(", "lines", ",", "options", ")", ":", "skip_regex", "=", "options", ".", "skip_requirements_regex", "if", "options", "else", "None", "if", "skip_regex", ":", "lines", "=", "filterfalse", "(", "re", ".", "compile", "(", "skip_regex", ")...
35
17
def load_path_with_default(self, path, default_constructor): ''' Same as `load_path(path)', except uses default_constructor on import errors, or if loaded a auto-generated namespace package (e.g. bare directory). ''' try: imported_obj = self.load_path(path) except (ImportError, ConfigurationError): imported_obj = default_constructor(path) else: # Ugly but seemingly expedient way to check a module was an # namespace type module if (isinstance(imported_obj, ModuleType) and imported_obj.__spec__.origin == 'namespace'): imported_obj = default_constructor(path) return imported_obj
[ "def", "load_path_with_default", "(", "self", ",", "path", ",", "default_constructor", ")", ":", "try", ":", "imported_obj", "=", "self", ".", "load_path", "(", "path", ")", "except", "(", "ImportError", ",", "ConfigurationError", ")", ":", "imported_obj", "="...
43.176471
21.058824
def set_branch_capacity(network, args): """ Set branch capacity factor of lines and transformers, different factors for HV (110kV) and eHV (220kV, 380kV). Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA args: dict Settings in appl.py """ network.lines["s_nom_total"] = network.lines.s_nom.copy() network.transformers["s_nom_total"] = network.transformers.s_nom.copy() network.lines["v_nom"] = network.lines.bus0.map( network.buses.v_nom) network.transformers["v_nom0"] = network.transformers.bus0.map( network.buses.v_nom) network.lines.s_nom[network.lines.v_nom == 110] = \ network.lines.s_nom * args['branch_capacity_factor']['HV'] network.lines.s_nom[network.lines.v_nom > 110] = \ network.lines.s_nom * args['branch_capacity_factor']['eHV'] network.transformers.s_nom[network.transformers.v_nom0 == 110]\ = network.transformers.s_nom * args['branch_capacity_factor']['HV'] network.transformers.s_nom[network.transformers.v_nom0 > 110]\ = network.transformers.s_nom * args['branch_capacity_factor']['eHV']
[ "def", "set_branch_capacity", "(", "network", ",", "args", ")", ":", "network", ".", "lines", "[", "\"s_nom_total\"", "]", "=", "network", ".", "lines", ".", "s_nom", ".", "copy", "(", ")", "network", ".", "transformers", "[", "\"s_nom_total\"", "]", "=", ...
32.714286
24.828571
def execute_command(self, command, tab=None): # TODO DBUS_ONLY """Execute the `command' in the `tab'. If tab is None, the command will be executed in the currently selected tab. Command should end with '\n', otherwise it will be appended to the string. """ # TODO CONTEXTMENU this has to be rewriten and only serves the # dbus interface, maybe this should be moved to dbusinterface.py if not self.get_notebook().has_page(): self.add_tab() if command[-1] != '\n': command += '\n' terminal = self.get_notebook().get_current_terminal() terminal.feed_child(command)
[ "def", "execute_command", "(", "self", ",", "command", ",", "tab", "=", "None", ")", ":", "# TODO DBUS_ONLY", "# TODO CONTEXTMENU this has to be rewriten and only serves the", "# dbus interface, maybe this should be moved to dbusinterface.py", "if", "not", "self", ".", "get_not...
39.235294
16.470588
def get_data_files_tuple(*rel_path, **kwargs): """Return a tuple which can be used for setup.py's data_files :param tuple path: List of path elements pointing to a file or a directory of files :param dict kwargs: Set path_to_file to True is `path` points to a file :return: tuple of install directory and list of source files :rtype: tuple(str, [str]) """ rel_path = os.path.join(*rel_path) target_path = os.path.join("share", *rel_path.split(os.sep)[1:]) # remove source/ (package_dir) if "path_to_file" in kwargs and kwargs["path_to_file"]: source_files = [rel_path] target_path = os.path.dirname(target_path) else: source_files = [os.path.join(rel_path, filename) for filename in os.listdir(rel_path)] return target_path, source_files
[ "def", "get_data_files_tuple", "(", "*", "rel_path", ",", "*", "*", "kwargs", ")", ":", "rel_path", "=", "os", ".", "path", ".", "join", "(", "*", "rel_path", ")", "target_path", "=", "os", ".", "path", ".", "join", "(", "\"share\"", ",", "*", "rel_p...
49.5
21.875
def _create_dir(path): '''Creates necessary directories for the given path or does nothing if the directories already exist. ''' try: os.makedirs(path) except OSError, exc: if exc.errno == errno.EEXIST: pass else: raise
[ "def", "_create_dir", "(", "path", ")", ":", "try", ":", "os", ".", "makedirs", "(", "path", ")", "except", "OSError", ",", "exc", ":", "if", "exc", ".", "errno", "==", "errno", ".", "EEXIST", ":", "pass", "else", ":", "raise" ]
25.181818
20.454545
def cluster(x, cluster='KMeans', n_clusters=3, ndims=None, format_data=True): """ Performs clustering analysis and returns a list of cluster labels Parameters ---------- x : A Numpy array, Pandas Dataframe or list of arrays/dfs The data to be clustered. You can pass a single array/df or a list. If a list is passed, the arrays will be stacked and the clustering will be performed across all lists (i.e. not within each list). cluster : str or dict Model to use to discover clusters. Support algorithms are: KMeans, MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration, SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}. See scikit-learn specific model docs for details on parameters supported for each model. n_clusters : int Number of clusters to discover. Not required for HDBSCAN. format_data : bool Whether or not to first call the format_data function (default: True). ndims : None Deprecated argument. Please use new analyze function to perform combinations of transformations Returns ---------- cluster_labels : list An list of cluster labels """ if cluster == None: return x elif (isinstance(cluster, six.string_types) and cluster=='HDBSCAN') or \ (isinstance(cluster, dict) and cluster['model']=='HDBSCAN'): if not _has_hdbscan: raise ImportError('HDBSCAN is not installed. Please install hdbscan>=0.8.11') if ndims != None: warnings.warn('The ndims argument is now deprecated. Ignoring dimensionality reduction step.') if format_data: x = formatter(x, ppca=True) # if reduce is a string, find the corresponding model if isinstance(cluster, six.string_types): model = models[cluster] if cluster != 'HDBSCAN': model_params = { 'n_clusters' : n_clusters } else: model_params = {} # if its a dict, use custom params elif type(cluster) is dict: if isinstance(cluster['model'], six.string_types): model = models[cluster['model']] model_params = cluster['params'] # initialize model model = model(**model_params) # fit the model model.fit(np.vstack(x)) # return the labels return list(model.labels_)
[ "def", "cluster", "(", "x", ",", "cluster", "=", "'KMeans'", ",", "n_clusters", "=", "3", ",", "ndims", "=", "None", ",", "format_data", "=", "True", ")", ":", "if", "cluster", "==", "None", ":", "return", "x", "elif", "(", "isinstance", "(", "cluste...
34.369863
24.780822
async def StartUnitCompletion(self, entities, message): ''' entities : typing.Sequence[~Entity] message : str Returns -> typing.Sequence[~ErrorResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='UpgradeSeries', request='StartUnitCompletion', version=1, params=_params) _params['entities'] = entities _params['message'] = message reply = await self.rpc(msg) return reply
[ "async", "def", "StartUnitCompletion", "(", "self", ",", "entities", ",", "message", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'UpgradeSeries'", ",", "request", "=", "'StartUnitCompletion'"...
33.125
11.25
async def client_event_handler(self, client_id, event_tuple, user_data): """Method called to actually send an event to a client. Users of this class should override this method to actually forward device events to their clients. It is called with the client_id passed to (or returned from) :meth:`setup_client` as well as the user_data object that was included there. The event tuple is a 3-tuple of: - connection string - event name - event object If you override this to be acoroutine, it will be awaited. The default implementation just logs the event. Args: client_id (str): The client_id that this event should be forwarded to. event_tuple (tuple): The connection_string, event_name and event_object that should be forwarded. user_data (object): Any user data that was passed to setup_client. """ conn_string, event_name, _event = event_tuple self._logger.debug("Ignoring event %s from device %s forwarded for client %s", event_name, conn_string, client_id) return None
[ "async", "def", "client_event_handler", "(", "self", ",", "client_id", ",", "event_tuple", ",", "user_data", ")", ":", "conn_string", ",", "event_name", ",", "_event", "=", "event_tuple", "self", ".", "_logger", ".", "debug", "(", "\"Ignoring event %s from device ...
39.233333
26.1
def focusOutEvent(self, event): """Reimplement Qt method to send focus change notification""" self.focus_changed.emit() return super(PageControlWidget, self).focusOutEvent(event)
[ "def", "focusOutEvent", "(", "self", ",", "event", ")", ":", "self", ".", "focus_changed", ".", "emit", "(", ")", "return", "super", "(", "PageControlWidget", ",", "self", ")", ".", "focusOutEvent", "(", "event", ")" ]
49.75
10.5
def _check_useless_super_delegation(self, function): """Check if the given function node is an useless method override We consider it *useless* if it uses the super() builtin, but having nothing additional whatsoever than not implementing the method at all. If the method uses super() to delegate an operation to the rest of the MRO, and if the method called is the same as the current one, the arguments passed to super() are the same as the parameters that were passed to this method, then the method could be removed altogether, by letting other implementation to take precedence. """ if ( not function.is_method() # With decorators is a change of use or function.decorators ): return body = function.body if len(body) != 1: # Multiple statements, which means this overridden method # could do multiple things we are not aware of. return statement = body[0] if not isinstance(statement, (astroid.Expr, astroid.Return)): # Doing something else than what we are interested into. return call = statement.value if ( not isinstance(call, astroid.Call) # Not a super() attribute access. or not isinstance(call.func, astroid.Attribute) ): return # Should be a super call. try: super_call = next(call.func.expr.infer()) except astroid.InferenceError: return else: if not isinstance(super_call, objects.Super): return # The name should be the same. if call.func.attrname != function.name: return # Should be a super call with the MRO pointer being the # current class and the type being the current instance. current_scope = function.parent.scope() if ( super_call.mro_pointer != current_scope or not isinstance(super_call.type, astroid.Instance) or super_call.type.name != current_scope.name ): return #  Check values of default args klass = function.parent.frame() meth_node = None for overridden in klass.local_attr_ancestors(function.name): # get astroid for the searched method try: meth_node = overridden[function.name] except KeyError: # we have found the method but it's not in the local # dictionary. # This may happen with astroid build from living objects continue if ( not isinstance(meth_node, astroid.FunctionDef) # If the method have an ancestor which is not a # function then it is legitimate to redefine it or _has_different_parameters_default_value( meth_node.args, function.args ) ): return break # Detect if the parameters are the same as the call's arguments. params = _signature_from_arguments(function.args) args = _signature_from_call(call) if meth_node is not None: def form_annotations(annotations): return [ annotation.as_string() for annotation in filter(None, annotations) ] called_annotations = form_annotations(function.args.annotations) overridden_annotations = form_annotations(meth_node.args.annotations) if called_annotations and overridden_annotations: if called_annotations != overridden_annotations: return if _definition_equivalent_to_call(params, args): self.add_message( "useless-super-delegation", node=function, args=(function.name,) )
[ "def", "_check_useless_super_delegation", "(", "self", ",", "function", ")", ":", "if", "(", "not", "function", ".", "is_method", "(", ")", "# With decorators is a change of use", "or", "function", ".", "decorators", ")", ":", "return", "body", "=", "function", ...
37.12381
22.304762
def moveaxis(a, source, destination): """Move axes of an array to new positions. Other axes remain in their original order. This function is a backport of `numpy.moveaxis` introduced in NumPy 1.11. See Also -------- numpy.moveaxis """ import numpy if hasattr(numpy, 'moveaxis'): return numpy.moveaxis(a, source, destination) try: source = list(source) except TypeError: source = [source] try: destination = list(destination) except TypeError: destination = [destination] source = [ax + a.ndim if ax < 0 else ax for ax in source] destination = [ax + a.ndim if ax < 0 else ax for ax in destination] order = [n for n in range(a.ndim) if n not in source] for dest, src in sorted(zip(destination, source)): order.insert(dest, src) return a.transpose(order)
[ "def", "moveaxis", "(", "a", ",", "source", ",", "destination", ")", ":", "import", "numpy", "if", "hasattr", "(", "numpy", ",", "'moveaxis'", ")", ":", "return", "numpy", ".", "moveaxis", "(", "a", ",", "source", ",", "destination", ")", "try", ":", ...
25
21.676471
def update_or_create_candidate( self, candidate, aggregable=True, uncontested=False ): """Create a CandidateElection.""" candidate_election, c = CandidateElection.objects.update_or_create( candidate=candidate, election=self, defaults={"aggregable": aggregable, "uncontested": uncontested}, ) return candidate_election
[ "def", "update_or_create_candidate", "(", "self", ",", "candidate", ",", "aggregable", "=", "True", ",", "uncontested", "=", "False", ")", ":", "candidate_election", ",", "c", "=", "CandidateElection", ".", "objects", ".", "update_or_create", "(", "candidate", "...
35.272727
21.181818
def invert(self, output_directory=None, catch_output=True, **kwargs): """Invert this instance, and import the result files No directories/files will be overwritten. Raise an IOError if the output directory exists. Parameters ---------- output_directory: string, optional use this directory as output directory for the generated tomodir. If None, then a temporary directory will be used that is deleted after import. catch_output: bool, optional Do not show CRTomo output cores: int, optional how many cores to use for CRTomo Returns ------- return_code: bool Return 0 if the inversion completed successfully. Return 1 if no measurements are present. """ self._check_state() if self.can_invert: if output_directory is not None: if not os.path.isdir(output_directory): os.makedirs(output_directory) tempdir = output_directory self._invert(tempdir, catch_output, **kwargs) else: raise IOError( 'output directory already exists: {0}'.format( output_directory ) ) else: with tempfile.TemporaryDirectory(dir=self.tempdir) as tempdir: self._invert(tempdir, catch_output, **kwargs) return 0 else: print( 'Sorry, no measurements present, cannot model yet' ) return 1
[ "def", "invert", "(", "self", ",", "output_directory", "=", "None", ",", "catch_output", "=", "True", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_check_state", "(", ")", "if", "self", ".", "can_invert", ":", "if", "output_directory", "is", "not", ...
35.913043
19.5
def evalall(self, loc=None, defaults=None): """Evaluates all option values in environment `loc`. :See: `eval()` """ self.check() if defaults is None: defaults = cma_default_options # TODO: this needs rather the parameter N instead of loc if 'N' in loc: # TODO: __init__ of CMA can be simplified popsize = self('popsize', defaults['popsize'], loc) for k in list(self.keys()): k = self.corrected_key(k) self.eval(k, defaults[k], {'N':loc['N'], 'popsize':popsize}) self._lock_setting = True return self
[ "def", "evalall", "(", "self", ",", "loc", "=", "None", ",", "defaults", "=", "None", ")", ":", "self", ".", "check", "(", ")", "if", "defaults", "is", "None", ":", "defaults", "=", "cma_default_options", "# TODO: this needs rather the parameter N instead of loc...
36.166667
14.333333
def filter_queryset(self, request, queryset, view): """ This method overrides the standard filter_queryset method. This method will check to see if the view calling this is from a list type action. This function will also route the filter by action type if action_routing is set to True. """ # Check if this is a list type request if view.lookup_field not in view.kwargs: if not self.action_routing: return self.filter_list_queryset(request, queryset, view) else: method_name = "filter_{action}_queryset".format(action=view.action) return getattr(self, method_name)(request, queryset, view) return queryset
[ "def", "filter_queryset", "(", "self", ",", "request", ",", "queryset", ",", "view", ")", ":", "# Check if this is a list type request", "if", "view", ".", "lookup_field", "not", "in", "view", ".", "kwargs", ":", "if", "not", "self", ".", "action_routing", ":"...
49.066667
18.4
def send_signal(self, signal): """ Send signal from this node to all connected receivers unless the node is in spectator mode. signal -- (hashable) signal value, see `dispatcher` connect for details Return a list of tuple pairs [(receiver, response), ... ] or None if the node is in spectator mode. if any receiver raises an error, the error propagates back through send, terminating the dispatch loop, so it is quite possible to not have all receivers called if a raises an error. """ if self.in_spectator_mode: return None logger.debug("Node %s broadcasts signal %s" % (self, signal)) dispatcher.send(signal=signal, sender=self)
[ "def", "send_signal", "(", "self", ",", "signal", ")", ":", "if", "self", ".", "in_spectator_mode", ":", "return", "None", "logger", ".", "debug", "(", "\"Node %s broadcasts signal %s\"", "%", "(", "self", ",", "signal", ")", ")", "dispatcher", ".", "send", ...
44.625
24.5
def plot_elbo(self, figsize=(15,7)): """ Plots the ELBO progress (if present) """ import matplotlib.pyplot as plt plt.figure(figsize=figsize) plt.plot(self.elbo_records) plt.xlabel("Iterations") plt.ylabel("ELBO") plt.show()
[ "def", "plot_elbo", "(", "self", ",", "figsize", "=", "(", "15", ",", "7", ")", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "plt", ".", "plot", "(", "self", ".", "elbo_rec...
26.090909
9.363636
def _format(color, style=''): """Return a QTextCharFormat with the given attributes. """ _color = QColor() _color.setNamedColor(color) _format = QTextCharFormat() _format.setForeground(_color) if 'bold' in style: _format.setFontWeight(QFont.Bold) if 'italic' in style: _format.setFontItalic(True) return _format
[ "def", "_format", "(", "color", ",", "style", "=", "''", ")", ":", "_color", "=", "QColor", "(", ")", "_color", ".", "setNamedColor", "(", "color", ")", "_format", "=", "QTextCharFormat", "(", ")", "_format", ".", "setForeground", "(", "_color", ")", "...
26.071429
13.285714
def ls_remote(cwd=None, remote='origin', ref=None, opts='', git_opts='', user=None, password=None, identity=None, https_user=None, https_pass=None, ignore_retcode=False, output_encoding=None, saltenv='base'): ''' Interface to `git-ls-remote(1)`_. Returns the upstream hash for a remote reference. cwd The path to the git checkout. Optional (and ignored if present) when ``remote`` is set to a URL instead of a remote name. remote : origin The name of the remote to query. Can be the name of a git remote (which exists in the git checkout defined by the ``cwd`` parameter), or the URL of a remote repository. .. versionchanged:: 2015.8.0 Argument renamed from ``repository`` to ``remote`` ref The name of the ref to query. Optional, if not specified, all refs are returned. Can be a branch or tag name, or the full name of the reference (for example, to get the hash for a Github pull request number 1234, ``ref`` can be set to ``refs/pull/1234/head`` .. versionchanged:: 2015.8.0 Argument renamed from ``branch`` to ``ref`` .. versionchanged:: 2015.8.4 Defaults to returning all refs instead of master. opts Any additional options to add to the command line, in a single string .. versionadded:: 2015.8.0 git_opts Any additional options to add to git command itself (not the ``ls-remote`` subcommand), in a single string. This is useful for passing ``-c`` to run git with temporary changes to the git configuration. .. versionadded:: 2017.7.0 .. note:: This is only supported in git 1.7.2 and newer. user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 identity Path to a private key to use for ssh URLs .. warning:: Unless Salt is invoked from the minion using ``salt-call``, the key(s) must be passphraseless. For greater security with passphraseless private keys, see the `sshd(8)`_ manpage for information on securing the keypair from the remote side in the ``authorized_keys`` file. .. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE_FORMAT .. versionchanged:: 2015.8.7 Salt will no longer attempt to use passphrase-protected keys unless invoked from the minion using ``salt-call``, to prevent blocking waiting for user input. Key can also be specified as a SaltStack file server URL, eg. salt://location/identity_file .. versionchanged:: 2016.3.0 https_user Set HTTP Basic Auth username. Only accepted for HTTPS URLs. .. versionadded:: 2015.5.0 https_pass Set HTTP Basic Auth password. Only accepted for HTTPS URLs. .. versionadded:: 2015.5.0 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 saltenv The default salt environment to pull sls files from .. versionadded:: 2016.3.1 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-ls-remote(1)`: http://git-scm.com/docs/git-ls-remote CLI Example: .. code-block:: bash salt myminion git.ls_remote /path/to/repo origin master salt myminion git.ls_remote remote=https://mydomain.tld/repo.git ref=mytag opts='--tags' ''' if cwd is not None: cwd = _expand_path(cwd, user) try: remote = salt.utils.url.add_http_basic_auth(remote, https_user, https_pass, https_only=True) except ValueError as exc: raise SaltInvocationError(exc.__str__()) command = ['git'] + _format_git_opts(git_opts) command.append('ls-remote') command.extend(_format_opts(opts)) command.append(remote) if ref: command.append(ref) output = _git_run(command, cwd=cwd, user=user, password=password, identity=identity, ignore_retcode=ignore_retcode, saltenv=saltenv, output_encoding=output_encoding)['stdout'] ret = {} for line in output.splitlines(): try: ref_sha1, ref_name = line.split(None, 1) except IndexError: continue ret[ref_name] = ref_sha1 return ret
[ "def", "ls_remote", "(", "cwd", "=", "None", ",", "remote", "=", "'origin'", ",", "ref", "=", "None", ",", "opts", "=", "''", ",", "git_opts", "=", "''", ",", "user", "=", "None", ",", "password", "=", "None", ",", "identity", "=", "None", ",", "...
32.865031
24.840491
def set_cognitive_process(self, grade_id=None): """Sets the cognitive process. arg: gradeId (osid.id.Id): the new cognitive process raise: INVALID_ARGUMENT - gradeId is invalid raise: NoAccess - gradeId cannot be modified raise: NullArgument - gradeId is null compliance: mandatory - This method must be implemented. """ if grade_id is None: raise NullArgument() metadata = Metadata(**settings.METADATA['cognitive_process_id']) if metadata.is_read_only(): raise NoAccess() if self._is_valid_input(grade_id, metadata, array=False): self._my_map['cognitiveProcessId'] = str(grade_id) else: raise InvalidArgument
[ "def", "set_cognitive_process", "(", "self", ",", "grade_id", "=", "None", ")", ":", "if", "grade_id", "is", "None", ":", "raise", "NullArgument", "(", ")", "metadata", "=", "Metadata", "(", "*", "*", "settings", ".", "METADATA", "[", "'cognitive_process_id'...
39.105263
16.631579
def terminate(self): """Override of PantsService.terminate() that cleans up when the Pailgun server is terminated.""" # Tear down the Pailgun TCPServer. if self.pailgun: self.pailgun.server_close() super(PailgunService, self).terminate()
[ "def", "terminate", "(", "self", ")", ":", "# Tear down the Pailgun TCPServer.", "if", "self", ".", "pailgun", ":", "self", ".", "pailgun", ".", "server_close", "(", ")", "super", "(", "PailgunService", ",", "self", ")", ".", "terminate", "(", ")" ]
36.285714
13.142857
def _adjacency_to_edges(adjacency): """determine from an adjacency the list of edges if (u, v) in edges, then (v, u) should not be""" edges = set() for u in adjacency: for v in adjacency[u]: try: edge = (u, v) if u <= v else (v, u) except TypeError: # Py3 does not allow sorting of unlike types if (v, u) in edges: continue edge = (u, v) edges.add(edge) return edges
[ "def", "_adjacency_to_edges", "(", "adjacency", ")", ":", "edges", "=", "set", "(", ")", "for", "u", "in", "adjacency", ":", "for", "v", "in", "adjacency", "[", "u", "]", ":", "try", ":", "edge", "=", "(", "u", ",", "v", ")", "if", "u", "<=", "...
31.25
14.125
def logWrite(self, string): """Only write text to the log file, do not print""" logFile = open(self.logFile, 'at') logFile.write(string + '\n') logFile.close()
[ "def", "logWrite", "(", "self", ",", "string", ")", ":", "logFile", "=", "open", "(", "self", ".", "logFile", ",", "'at'", ")", "logFile", ".", "write", "(", "string", "+", "'\\n'", ")", "logFile", ".", "close", "(", ")" ]
32.6
10
def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, df_tolerance=1e-12): """ Check the gradient of the ,odel by comparing to a numerical estimate. If the verbose flag is passed, individual components are tested (and printed) :param verbose: If True, print a "full" checking of each parameter :type verbose: bool :param step: The size of the step around which to linearise the objective :type step: float (default 1e-6) :param tolerance: the tolerance allowed (see note) :type tolerance: float (default 1e-3) Note:- The gradient is considered correct if the ratio of the analytical and numerical gradients is within <tolerance> of unity. The *dF_ratio* indicates the limit of numerical accuracy of numerical gradients. If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually not accurate enough for the tests (shown with blue). """ if not self._model_initialized_: import warnings warnings.warn("This model has not been initialized, try model.inititialize_model()", RuntimeWarning) return False x = self.optimizer_array.copy() if not verbose: # make sure only to test the selected parameters if target_param is None: transformed_index = np.arange(len(x)) else: transformed_index = self._raveled_index_for_transformed(target_param) if transformed_index.size == 0: print("No free parameters to check") return True # just check the global ratio dx = np.zeros(x.shape) dx[transformed_index] = step * (np.sign(np.random.uniform(-1, 1, transformed_index.size)) if transformed_index.size != 2 else 1.) # evaulate around the point x f1 = self._objective(x + dx) f2 = self._objective(x - dx) gradient = self._grads(x) dx = dx[transformed_index] gradient = gradient[transformed_index] denominator = (2 * np.dot(dx, gradient)) global_ratio = (f1 - f2) / np.where(denominator == 0., 1e-32, denominator) global_diff = np.abs(f1 - f2) < tolerance and np.allclose(gradient, 0, atol=tolerance) if global_ratio is np.nan: # pragma: no cover global_ratio = 0 return np.abs(1. - global_ratio) < tolerance or global_diff else: # check the gradient of each parameter individually, and do some pretty printing try: names = self.parameter_names_flat() except NotImplementedError: names = ['Variable %i' % i for i in range(len(x))] # Prepare for pretty-printing header = ['Name', 'Ratio', 'Difference', 'Analytical', 'Numerical', 'dF_ratio'] max_names = max([len(names[i]) for i in range(len(names))] + [len(header[0])]) float_len = 10 cols = [max_names] cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))]) cols = np.array(cols) + 5 header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))] header_string = list(map(lambda x: '|'.join(x), [header_string])) separator = '-' * len(header_string[0]) print('\n'.join([header_string[0], separator])) if target_param is None: target_param = self transformed_index = self._raveled_index_for_transformed(target_param) if transformed_index.size == 0: print("No free parameters to check") return True gradient = self._grads(x).copy() np.where(gradient == 0, 1e-312, gradient) ret = True for xind in zip(transformed_index): xx = x.copy() xx[xind] += step f1 = float(self._objective(xx)) xx[xind] -= 2.*step f2 = float(self._objective(xx)) #Avoid divide by zero, if any of the values are above 1e-15, otherwise both values are essentiall #the same if f1 > 1e-15 or f1 < -1e-15 or f2 > 1e-15 or f2 < -1e-15: df_ratio = np.abs((f1 - f2) / min(f1, f2)) else: # pragma: no cover df_ratio = 1.0 df_unstable = df_ratio < df_tolerance numerical_gradient = (f1 - f2) / (2. * step) if np.all(gradient[xind] == 0): # pragma: no cover ratio = (f1 - f2) == gradient[xind] else: ratio = (f1 - f2) / (2. * step * gradient[xind]) difference = np.abs(numerical_gradient - gradient[xind]) if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance: formatted_name = "\033[92m {0} \033[0m".format(names[xind]) ret &= True else: # pragma: no cover formatted_name = "\033[91m {0} \033[0m".format(names[xind]) ret &= False if df_unstable: # pragma: no cover formatted_name = "\033[94m {0} \033[0m".format(names[xind]) r = '%.6f' % float(ratio) d = '%.6f' % float(difference) g = '%.6f' % gradient[xind] ng = '%.6f' % float(numerical_gradient) df = '%1.e' % float(df_ratio) grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5]) print(grad_string) self.optimizer_array = x return ret
[ "def", "_checkgrad", "(", "self", ",", "target_param", "=", "None", ",", "verbose", "=", "False", ",", "step", "=", "1e-6", ",", "tolerance", "=", "1e-3", ",", "df_tolerance", "=", "1e-12", ")", ":", "if", "not", "self", ".", "_model_initialized_", ":", ...
47.016
23.624
def update_parameters(url, parameters, encoding='utf8'): """ Updates a URL's existing GET parameters. :param url: a base URL to which to add additional parameters. :param parameters: a dictionary of parameters, any mix of unicode and string objects as the parameters and the values. :parameter encoding: the byte encoding to use when passed unicode for the base URL or for keys and values of the parameters dict. This isnecessary because `urllib.urlencode` calls the `str()` function on all of its inputs. This raises a `UnicodeDecodeError` when it encounters a unicode string with characters outside of the default ASCII charset. :rtype: a string URL. """ # Convert the base URL to the default encoding. if isinstance(url, unicode): url = url.encode(encoding) parsed_url = urlparse.urlparse(url) existing_query_parameters = urlparse.parse_qsl(parsed_url.query) # Convert unicode parameters to the default encoding. byte_parameters = [] for key, value in (existing_query_parameters + parameters.items()): if isinstance(key, unicode): key = key.encode(encoding) if isinstance(value, unicode): value = value.encode(encoding) byte_parameters.append((key, value)) # Generate the final URL with all of the updated parameters. Read # http://docs.python.org/2/library/urlparse.html#urlparse.urlparse if this is # confusing. return urlparse.urlunparse(( parsed_url.scheme, parsed_url.netloc, parsed_url.path, parsed_url.params, urlencode(byte_parameters), parsed_url.fragment ))
[ "def", "update_parameters", "(", "url", ",", "parameters", ",", "encoding", "=", "'utf8'", ")", ":", "# Convert the base URL to the default encoding.", "if", "isinstance", "(", "url", ",", "unicode", ")", ":", "url", "=", "url", ".", "encode", "(", "encoding", ...
38.975
19.8
def setup_dashboard_panels_visibility_registry(section_name): """ Initializes the values for panels visibility in registry_records. By default, only users with LabManager or Manager roles can see the panels. :param section_name: :return: An string like: "role1,yes,role2,no,rol3,no" """ registry_info = get_dashboard_registry_record() role_permissions_list = [] # Getting roles defined in the system roles = [] acl_users = get_tool("acl_users") roles_tree = acl_users.portal_role_manager.listRoleIds() for role in roles_tree: roles.append(role) # Set view permissions to each role as 'yes': # "role1,yes,role2,no,rol3,no" for role in roles: role_permissions_list.append(role) visible = 'no' if role in ['LabManager', 'Manager']: visible = 'yes' role_permissions_list.append(visible) role_permissions = ','.join(role_permissions_list) # Set permissions string into dict registry_info[get_unicode(section_name)] = get_unicode(role_permissions) # Set new values to registry record set_dashboard_registry_record(registry_info) return registry_info
[ "def", "setup_dashboard_panels_visibility_registry", "(", "section_name", ")", ":", "registry_info", "=", "get_dashboard_registry_record", "(", ")", "role_permissions_list", "=", "[", "]", "# Getting roles defined in the system", "roles", "=", "[", "]", "acl_users", "=", ...
38.5
13.766667
def set_lic_id(self, doc, lic_id): """Adds a new extracted license to the document. Raises SPDXValueError if data format is incorrect. """ # FIXME: this state does not make sense self.reset_extr_lics() if validations.validate_extracted_lic_id(lic_id): doc.add_extr_lic(document.ExtractedLicense(lic_id)) return True else: raise SPDXValueError('ExtractedLicense::id')
[ "def", "set_lic_id", "(", "self", ",", "doc", ",", "lic_id", ")", ":", "# FIXME: this state does not make sense", "self", ".", "reset_extr_lics", "(", ")", "if", "validations", ".", "validate_extracted_lic_id", "(", "lic_id", ")", ":", "doc", ".", "add_extr_lic", ...
40.727273
12.818182
def control_surface_encode(self, target, idSurface, mControl, bControl): ''' Control for surface; pending and order to origin. target : The system setting the commands (uint8_t) idSurface : ID control surface send 0: throttle 1: aileron 2: elevator 3: rudder (uint8_t) mControl : Pending (float) bControl : Order to origin (float) ''' return MAVLink_control_surface_message(target, idSurface, mControl, bControl)
[ "def", "control_surface_encode", "(", "self", ",", "target", ",", "idSurface", ",", "mControl", ",", "bControl", ")", ":", "return", "MAVLink_control_surface_message", "(", "target", ",", "idSurface", ",", "mControl", ",", "bControl", ")" ]
54.636364
36.818182
def _run(command, quiet=False, timeout=None): """Run a command, returns command output.""" try: with _spawn(command, quiet, timeout) as child: command_output = child.read().strip().replace("\r\n", "\n") except pexpect.TIMEOUT: logger.info(f"command {command} timed out") raise Error() return command_output
[ "def", "_run", "(", "command", ",", "quiet", "=", "False", ",", "timeout", "=", "None", ")", ":", "try", ":", "with", "_spawn", "(", "command", ",", "quiet", ",", "timeout", ")", "as", "child", ":", "command_output", "=", "child", ".", "read", "(", ...
35
18
def is_valid_mac_oui(mac_block): """checks whether mac block is in format of 00-11-22 or 00:11:22. :return: int """ if len(mac_block) != 8: return 0 if ':' in mac_block: if len(mac_block.split(':')) != 3: return 0 elif '-' in mac_block: if len(mac_block.split('-')) != 3: return 0 return 1
[ "def", "is_valid_mac_oui", "(", "mac_block", ")", ":", "if", "len", "(", "mac_block", ")", "!=", "8", ":", "return", "0", "if", "':'", "in", "mac_block", ":", "if", "len", "(", "mac_block", ".", "split", "(", "':'", ")", ")", "!=", "3", ":", "retur...
29.142857
11.285714
def vertical_line(self, x: Union[int, float], y1: Union[int, float], y2: Union[int, float], emphasize: bool = False ) -> None: """Adds a line from (x, y1) to (x, y2).""" y1, y2 = sorted([y1, y2]) self.vertical_lines.append(_VerticalLine(x, y1, y2, emphasize))
[ "def", "vertical_line", "(", "self", ",", "x", ":", "Union", "[", "int", ",", "float", "]", ",", "y1", ":", "Union", "[", "int", ",", "float", "]", ",", "y2", ":", "Union", "[", "int", ",", "float", "]", ",", "emphasize", ":", "bool", "=", "Fal...
42.777778
8.777778
def count(self, field='*'): """ Returns a COUNT of the query by wrapping the query and performing a COUNT aggregate of the specified field :param field: the field to pass to the COUNT aggregate. Defaults to '*' :type field: str :return: The number of rows that the query will return :rtype: int """ rows = self.get_count_query().select(bypass_safe_limit=True) return list(rows[0].values())[0]
[ "def", "count", "(", "self", ",", "field", "=", "'*'", ")", ":", "rows", "=", "self", ".", "get_count_query", "(", ")", ".", "select", "(", "bypass_safe_limit", "=", "True", ")", "return", "list", "(", "rows", "[", "0", "]", ".", "values", "(", ")"...
35.538462
20
def add_manual_segmentation_to_data_frame(self, data_frame, segmentation_dictionary): """ Utility method to store manual segmentation of gait time series. :param data_frame: The data frame. It should have x, y, and z columns. :type data_frame: pandas.DataFrame :param segmentation_dictionary: A dictionary of the form {'signal_type': [(from, to), (from, to)], ..., 'signal_type': [(from, to), (from, to)]}. The from and to can either be of type numpy.datetime64 or int, depending on how you are segmenting the time series. :type segmentation_dictionary: dict :return: The data_frame with a new column named 'segmentation'. :rtype: pandas.DataFrame """ # add some checks to see if dictionary is in the right format! data_frame['segmentation'] = 'unknown' for i, (k, v) in enumerate(segmentation_dictionary.items()): for start, end in v: if type(start) != np.datetime64: if start < 0: start = 0 if end > data_frame.size: end = data_frame.size start = data_frame.index.values[start] end = data_frame.index.values[end] data_frame.loc[start: end, 'segmentation'] = k return data_frame
[ "def", "add_manual_segmentation_to_data_frame", "(", "self", ",", "data_frame", ",", "segmentation_dictionary", ")", ":", "# add some checks to see if dictionary is in the right format!", "data_frame", "[", "'segmentation'", "]", "=", "'unknown'", "for", "i", ",", "(", "k",...
50.142857
28.392857
def compareValues( self, a, b ): """ Compares two values based on the notches and values for this ruler. :param a | <variant> b | <variant> :return <int> 1 || 0 || -1 """ if ( self.rulerType() in (XChartRuler.Type.Custom, XChartRuler.Type.Monthly) ): try: aidx = self._notches.index(a) except ValueError: return -1 try: bidx = self._notches.index(b) except ValueError: return 1 return cmp(aidx, bidx) return cmp(a, b)
[ "def", "compareValues", "(", "self", ",", "a", ",", "b", ")", ":", "if", "(", "self", ".", "rulerType", "(", ")", "in", "(", "XChartRuler", ".", "Type", ".", "Custom", ",", "XChartRuler", ".", "Type", ".", "Monthly", ")", ")", ":", "try", ":", "a...
29.08
15.96
def _to_parent_frame(self, *args, **kwargs): """Conversion from Topocentric Frame to parent frame """ lat, lon, _ = self.latlonalt m = rot3(-lon) @ rot2(lat - np.pi / 2.) @ rot3(self.heading) offset = np.zeros(6) offset[:3] = self.coordinates return self._convert(m, m), offset
[ "def", "_to_parent_frame", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lat", ",", "lon", ",", "_", "=", "self", ".", "latlonalt", "m", "=", "rot3", "(", "-", "lon", ")", "@", "rot2", "(", "lat", "-", "np", ".", "pi", "/...
40.75
6.625
def import_legislators(src): """ Read the legislators from the csv files into a single Dataframe. Intended for importing new data. """ logger.info("Importing Legislators From: {0}".format(src)) current = pd.read_csv("{0}/{1}/legislators-current.csv".format( src, LEGISLATOR_DIR)) historic = pd.read_csv("{0}/{1}/legislators-historic.csv".format( src, LEGISLATOR_DIR)) legislators = current.append(historic) return legislators
[ "def", "import_legislators", "(", "src", ")", ":", "logger", ".", "info", "(", "\"Importing Legislators From: {0}\"", ".", "format", "(", "src", ")", ")", "current", "=", "pd", ".", "read_csv", "(", "\"{0}/{1}/legislators-current.csv\"", ".", "format", "(", "src...
35.846154
17.076923
def get_property_name_from_attribute_name(attribute): """ Returns property name from attribute name :param attribute: Attribute name, may contain upper and lower case and spaces :return: string """ if isinstance(attribute, str) or isinstance(attribute, unicode): attribute_name = attribute elif hasattr(attribute, 'Name'): attribute_name = attribute.Name else: raise Exception('Attribute type {0} is not supported'.format(str(type(attribute)))) return attribute_name.lower().replace(' ', '_')
[ "def", "get_property_name_from_attribute_name", "(", "attribute", ")", ":", "if", "isinstance", "(", "attribute", ",", "str", ")", "or", "isinstance", "(", "attribute", ",", "unicode", ")", ":", "attribute_name", "=", "attribute", "elif", "hasattr", "(", "attrib...
42
18.428571
def hash256(msg_bytes): ''' byte-like -> bytes ''' if 'decred' in riemann.get_current_network_name(): return blake256(blake256(msg_bytes)) return hashlib.sha256(hashlib.sha256(msg_bytes).digest()).digest()
[ "def", "hash256", "(", "msg_bytes", ")", ":", "if", "'decred'", "in", "riemann", ".", "get_current_network_name", "(", ")", ":", "return", "blake256", "(", "blake256", "(", "msg_bytes", ")", ")", "return", "hashlib", ".", "sha256", "(", "hashlib", ".", "sh...
32.428571
21.285714
def log_to_file(filename, level=DEBUG): """send paramiko logs to a logfile, if they're not already going somewhere""" l = logging.getLogger("paramiko") if len(l.handlers) > 0: return l.setLevel(level) f = open(filename, 'w') lh = logging.StreamHandler(f) lh.setFormatter(logging.Formatter('%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(_threadid)-3d %(name)s: %(message)s', '%Y%m%d-%H:%M:%S')) l.addHandler(lh)
[ "def", "log_to_file", "(", "filename", ",", "level", "=", "DEBUG", ")", ":", "l", "=", "logging", ".", "getLogger", "(", "\"paramiko\"", ")", "if", "len", "(", "l", ".", "handlers", ")", ">", "0", ":", "return", "l", ".", "setLevel", "(", "level", ...
43.727273
18.545455
def prepare(self): ''' Run the preparation sequence required to start a salt minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(Minion, self).prepare() try: if self.config['verify_env']: confd = self.config.get('default_include') if confd: # If 'default_include' is specified in config, then use it if '*' in confd: # Value is of the form "minion.d/*.conf" confd = os.path.dirname(confd) if not os.path.isabs(confd): # If configured 'default_include' is not an absolute # path, consider it relative to folder of 'conf_file' # (/etc/salt by default) confd = os.path.join( os.path.dirname(self.config['conf_file']), confd ) else: confd = os.path.join( os.path.dirname(self.config['conf_file']), 'minion.d' ) v_dirs = [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], confd, ] verify_env( v_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], root_dir=self.config['root_dir'], pki_dir=self.config['pki_dir'], ) except OSError as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) log.info('Setting up the Salt Minion "%s"', self.config['id']) migrations.migrate_paths(self.config) # Bail out if we find a process running and it matches out pidfile if self.check_running(): self.action_log_info('An instance is already running. Exiting') self.shutdown(1) transport = self.config.get('transport').lower() # TODO: AIO core is separate from transport if transport in ('zeromq', 'tcp', 'detect'): # Late import so logging works correctly import salt.minion # If the minion key has not been accepted, then Salt enters a loop # waiting for it, if we daemonize later then the minion could halt # the boot process waiting for a key to be accepted on the master. # This is the latest safe place to daemonize self.daemonize_if_required() self.set_pidfile() if self.config.get('master_type') == 'func': salt.minion.eval_master_func(self.config) self.minion = salt.minion.MinionManager(self.config) else: log.error( 'The transport \'%s\' is not supported. Please use one of ' 'the following: tcp, zeromq, or detect.', transport ) self.shutdown(1)
[ "def", "prepare", "(", "self", ")", ":", "super", "(", "Minion", ",", "self", ")", ".", "prepare", "(", ")", "try", ":", "if", "self", ".", "config", "[", "'verify_env'", "]", ":", "confd", "=", "self", ".", "config", ".", "get", "(", "'default_inc...
40.379747
20.151899
def _extract_subscription_url(url): """Extract the first part of the URL, just after subscription: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/ """ match = re.match(r".*/subscriptions/[a-f0-9-]+/", url, re.IGNORECASE) if not match: raise ValueError("Unable to extract subscription ID from URL") return match.group(0)
[ "def", "_extract_subscription_url", "(", "url", ")", ":", "match", "=", "re", ".", "match", "(", "r\".*/subscriptions/[a-f0-9-]+/\"", ",", "url", ",", "re", ".", "IGNORECASE", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "\"Unable to extract subsc...
47.125
18.75
def summary(self, featuresCol, weightCol=None): """ Returns an aggregate object that contains the summary of the column with the requested metrics. :param featuresCol: a column that contains features Vector object. :param weightCol: a column that contains weight value. Default weight is 1.0. :return: an aggregate column that contains the statistics. The exact content of this structure is determined during the creation of the builder. """ featuresCol, weightCol = Summarizer._check_param(featuresCol, weightCol) return Column(self._java_obj.summary(featuresCol._jc, weightCol._jc))
[ "def", "summary", "(", "self", ",", "featuresCol", ",", "weightCol", "=", "None", ")", ":", "featuresCol", ",", "weightCol", "=", "Summarizer", ".", "_check_param", "(", "featuresCol", ",", "weightCol", ")", "return", "Column", "(", "self", ".", "_java_obj",...
45.266667
24.6
def _get_macd(df): """ Moving Average Convergence Divergence This function will initialize all following columns. MACD Line (macd): (12-day EMA - 26-day EMA) Signal Line (macds): 9-day EMA of MACD Line MACD Histogram (macdh): MACD Line - Signal Line :param df: data :return: None """ fast = df['close_12_ema'] slow = df['close_26_ema'] df['macd'] = fast - slow df['macds'] = df['macd_9_ema'] df['macdh'] = (df['macd'] - df['macds']) log.critical("NOTE: Behavior of MACDH calculation has changed as of " "July 2017 - it is now 1/2 of previous calculated values") del df['macd_9_ema'] del fast del slow
[ "def", "_get_macd", "(", "df", ")", ":", "fast", "=", "df", "[", "'close_12_ema'", "]", "slow", "=", "df", "[", "'close_26_ema'", "]", "df", "[", "'macd'", "]", "=", "fast", "-", "slow", "df", "[", "'macds'", "]", "=", "df", "[", "'macd_9_ema'", "]...
36.142857
17.095238
def get_user(self, user_id, **params): """https://developers.coinbase.com/api/v2#show-a-user""" response = self._get('v2', 'users', user_id, params=params) return self._make_api_object(response, User)
[ "def", "get_user", "(", "self", ",", "user_id", ",", "*", "*", "params", ")", ":", "response", "=", "self", ".", "_get", "(", "'v2'", ",", "'users'", ",", "user_id", ",", "params", "=", "params", ")", "return", "self", ".", "_make_api_object", "(", "...
55.25
10.25
def call_or_cast(self, method, args={}, nowait=False, **kwargs): """Apply remote `method` asynchronously or synchronously depending on the value of `nowait`. :param method: The name of the remote method to perform. :param args: Dictionary of arguments for the method. :keyword nowait: If false the call will block until the result is available and return it (default), if true the call will be non-blocking and no result will be returned. :keyword retry: If set to true then message sending will be retried in the event of connection failures. Default is decided by the :attr:`retry` attributed. :keyword retry_policy: Override retry policies. See :attr:`retry_policy`. This must be a dictionary, and keys will be merged with the default retry policy. :keyword timeout: Timeout to wait for replies in seconds as a float (**only relevant in blocking mode**). :keyword limit: Limit number of replies to wait for (**only relevant in blocking mode**). :keyword callback: If provided, this callback will be called for every reply received (**only relevant in blocking mode**). :keyword \*\*props: Additional message properties. See :meth:`kombu.Producer.publish`. """ return (nowait and self.cast or self.call)(method, args, **kwargs)
[ "def", "call_or_cast", "(", "self", ",", "method", ",", "args", "=", "{", "}", ",", "nowait", "=", "False", ",", "*", "*", "kwargs", ")", ":", "return", "(", "nowait", "and", "self", ".", "cast", "or", "self", ".", "call", ")", "(", "method", ","...
54.538462
21.423077
async def _request( self, method: str, url: str, *, headers: dict = None, params: dict = None, json: dict = None) -> dict: """Make a request against the RainMachine device.""" if not headers: headers = {} try: async with self._websession.request(method, url, headers=headers, params=params, json=json) as resp: resp.raise_for_status() data = await resp.json(content_type=None) return data except ClientError as err: raise RequestError( 'Error requesting data from {}: {}'.format(url, err))
[ "async", "def", "_request", "(", "self", ",", "method", ":", "str", ",", "url", ":", "str", ",", "*", ",", "headers", ":", "dict", "=", "None", ",", "params", ":", "dict", "=", "None", ",", "json", ":", "dict", "=", "None", ")", "->", "dict", "...
35.5
17.409091
def get_payload(request): """ Extracts the request's payload information. This method will merge the URL parameter information and the JSON body of the request together to generate a dictionary of key<->value pairings. This method assumes that the JSON body being provided is also a key-value map, if it is not, then an HTTPBadRequest exception will be raised. :param request: <pyramid.request.Request> :return: <dict> """ # always extract values from the URL payload = dict(request.params.mixed()) # provide override capability from the JSON body try: json_data = request.json_body # no JSON body was found, pyramid raises an error # in this situation except StandardError: pass else: if not isinstance(json_data, dict): raise HTTPBadRequest('JSON body must be a key=value pairing') else: payload.update(json_data) return payload
[ "def", "get_payload", "(", "request", ")", ":", "# always extract values from the URL", "payload", "=", "dict", "(", "request", ".", "params", ".", "mixed", "(", ")", ")", "# provide override capability from the JSON body", "try", ":", "json_data", "=", "request", "...
27.647059
19.294118
def compact(self, include=None): """ Return compact views - See: Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/views#list-views---compact>`__ """ return self._get(self._build_url(self.endpoint.compact(include=include)))
[ "def", "compact", "(", "self", ",", "include", "=", "None", ")", ":", "return", "self", ".", "_get", "(", "self", ".", "_build_url", "(", "self", ".", "endpoint", ".", "compact", "(", "include", "=", "include", ")", ")", ")" ]
46.833333
19.166667
def EI(inc): """ Given a mean inclination value of a distribution of directions, this function calculates the expected elongation of this distribution using a best-fit polynomial of the TK03 GAD secular variation model (Tauxe and Kent, 2004). Parameters ---------- inc : inclination in degrees (int or float) Returns --------- elongation : float Examples --------- >>> pmag.EI(20) 2.4863973732 >>> pmag.EI(90) 1.0241570135500004 """ poly_tk03 = [3.15976125e-06, -3.52459817e-04, - 1.46641090e-02, 2.89538539e+00] return poly_tk03[0] * inc**3 + poly_tk03[1] * inc**2 + poly_tk03[2] * inc + poly_tk03[3]
[ "def", "EI", "(", "inc", ")", ":", "poly_tk03", "=", "[", "3.15976125e-06", ",", "-", "3.52459817e-04", ",", "-", "1.46641090e-02", ",", "2.89538539e+00", "]", "return", "poly_tk03", "[", "0", "]", "*", "inc", "**", "3", "+", "poly_tk03", "[", "1", "]"...
27.24
24.84
def _filenames_from_arg(filename): """Utility function to deal with polymorphic filenames argument.""" if isinstance(filename, string_types): filenames = [filename] elif isinstance(filename, (list, tuple)): filenames = filename else: raise Exception('filename argument must be string, list or tuple') for fn in filenames: if not os.path.exists(fn): raise ValueError('file not found: %s' % fn) if not os.path.isfile(fn): raise ValueError('not a file: %s' % fn) return filenames
[ "def", "_filenames_from_arg", "(", "filename", ")", ":", "if", "isinstance", "(", "filename", ",", "string_types", ")", ":", "filenames", "=", "[", "filename", "]", "elif", "isinstance", "(", "filename", ",", "(", "list", ",", "tuple", ")", ")", ":", "fi...
39.357143
12.428571
def _get_type(cls, ptr): """Get the subtype class for a pointer""" # fall back to the base class if unknown return cls.__types.get(lib.g_base_info_get_type(ptr), cls)
[ "def", "_get_type", "(", "cls", ",", "ptr", ")", ":", "# fall back to the base class if unknown", "return", "cls", ".", "__types", ".", "get", "(", "lib", ".", "g_base_info_get_type", "(", "ptr", ")", ",", "cls", ")" ]
37.4
18
def check_perms(obj_name, obj_type='file', ret=None, owner=None, grant_perms=None, deny_perms=None, inheritance=True, reset=False): ''' Check owner and permissions for the passed directory. This function checks the permissions and sets them, returning the changes made. .. versionadded:: 2019.2.0 Args: obj_name (str): The name or full path to the object obj_type (Optional[str]): The type of object for which to check permissions. Default is 'file' ret (dict): A dictionary to append changes to and return. If not passed, will create a new dictionary to return. owner (str): The owner to set for the directory. grant_perms (dict): A dictionary containing the user/group and the basic permissions to check/grant, ie: ``{'user': {'perms': 'basic_permission'}}``. Default is ``None``. deny_perms (dict): A dictionary containing the user/group and permissions to check/deny. Default is ``None``. inheritance (bool): ``True`` will enable inheritance from the parent object. ``False`` will disable inheritance. Default is ``True``. reset (bool): ``True`` will clear the DACL and set only the permissions defined in ``grant_perms`` and ``deny_perms``. ``False`` append permissions to the existing DACL. Default is ``False``. This does NOT affect inherited permissions. Returns: dict: A dictionary of changes that have been made Usage: .. code-block:: bash # You have to use __utils__ in order for __opts__ to be available # To see changes to ``C:\\Temp`` if the 'Users' group is given 'read & execute' permissions. __utils__['dacl.check_perms'](obj_name='C:\\Temp', obj_type='file', owner='Administrators', grant_perms={ 'Users': { 'perms': 'read_execute' } }) # Specify advanced attributes with a list __utils__['dacl.check_perms'](obj_name='C:\\Temp', obj_type='file', owner='Administrators', grant_perms={ 'jsnuffy': { 'perms': [ 'read_attributes', 'read_ea' ], 'applies_to': 'files_only' } }) ''' # Validate obj_type if obj_type.lower() not in flags().obj_type: raise SaltInvocationError( 'Invalid "obj_type" passed: {0}'.format(obj_type)) obj_type = obj_type.lower() if not ret: ret = {'name': obj_name, 'changes': {}, 'comment': [], 'result': True} orig_comment = '' else: orig_comment = ret['comment'] ret['comment'] = [] # Check owner if owner: owner = get_name(principal=owner) current_owner = get_owner(obj_name=obj_name, obj_type=obj_type) if owner != current_owner: if __opts__['test'] is True: ret['changes']['owner'] = owner else: try: set_owner(obj_name=obj_name, principal=owner, obj_type=obj_type) log.debug('Owner set to %s', owner) ret['changes']['owner'] = owner except CommandExecutionError: ret['result'] = False ret['comment'].append( 'Failed to change owner to "{0}"'.format(owner)) # Check inheritance if inheritance is not None: if not inheritance == get_inheritance(obj_name=obj_name, obj_type=obj_type): if __opts__['test'] is True: ret['changes']['inheritance'] = inheritance else: try: set_inheritance( obj_name=obj_name, enabled=inheritance, obj_type=obj_type) log.debug('%s inheritance', 'Enabling' if inheritance else 'Disabling') ret['changes']['inheritance'] = inheritance except CommandExecutionError: ret['result'] = False ret['comment'].append( 'Failed to set inheritance for "{0}" to {1}' ''.format(obj_name, inheritance)) # Check permissions log.debug('Getting current permissions for %s', obj_name) cur_perms = get_permissions(obj_name=obj_name, obj_type=obj_type) # Verify Deny Permissions if deny_perms is not None: ret = _check_perms(obj_name=obj_name, obj_type=obj_type, new_perms=deny_perms, cur_perms=cur_perms, access_mode='deny', ret=ret) # Verify Grant Permissions if grant_perms is not None: ret = _check_perms(obj_name=obj_name, obj_type=obj_type, new_perms=grant_perms, cur_perms=cur_perms, access_mode='grant', ret=ret) # Check reset # If reset=True, which users will be removed as a result if reset: log.debug('Resetting permissions for %s', obj_name) cur_perms = get_permissions(obj_name=obj_name, obj_type=obj_type) for user_name in cur_perms['Not Inherited']: # case insensitive dictionary search if grant_perms is not None and \ user_name.lower() not in set(k.lower() for k in grant_perms): if 'grant' in cur_perms['Not Inherited'][user_name]: if __opts__['test'] is True: if 'remove_perms' not in ret['changes']: ret['changes']['remove_perms'] = {} ret['changes']['remove_perms'].update( {user_name: cur_perms['Not Inherited'][user_name]}) else: if 'remove_perms' not in ret['changes']: ret['changes']['remove_perms'] = {} rm_permissions( obj_name=obj_name, principal=user_name, ace_type='grant', obj_type=obj_type) ret['changes']['remove_perms'].update( {user_name: cur_perms['Not Inherited'][user_name]}) # case insensitive dictionary search if deny_perms is not None and \ user_name.lower() not in set(k.lower() for k in deny_perms): if 'deny' in cur_perms['Not Inherited'][user_name]: if __opts__['test'] is True: if 'remove_perms' not in ret['changes']: ret['changes']['remove_perms'] = {} ret['changes']['remove_perms'].update( {user_name: cur_perms['Not Inherited'][user_name]}) else: if 'remove_perms' not in ret['changes']: ret['changes']['remove_perms'] = {} rm_permissions( obj_name=obj_name, principal=user_name, ace_type='deny', obj_type=obj_type) ret['changes']['remove_perms'].update( {user_name: cur_perms['Not Inherited'][user_name]}) # Re-add the Original Comment if defined if isinstance(orig_comment, six.string_types): if orig_comment: ret['comment'].insert(0, orig_comment) else: if orig_comment: ret['comment'] = orig_comment.extend(ret['comment']) ret['comment'] = '\n'.join(ret['comment']) # Set result for test = True if __opts__['test'] and ret['changes']: ret['result'] = None return ret
[ "def", "check_perms", "(", "obj_name", ",", "obj_type", "=", "'file'", ",", "ret", "=", "None", ",", "owner", "=", "None", ",", "grant_perms", "=", "None", ",", "deny_perms", "=", "None", ",", "inheritance", "=", "True", ",", "reset", "=", "False", ")"...
40.009132
19.296804
def template_heron_tools_hcl(cl_args, masters, zookeepers): ''' template heron tools ''' heron_tools_hcl_template = "%s/standalone/templates/heron_tools.template.hcl" \ % cl_args["config_path"] heron_tools_hcl_actual = "%s/standalone/resources/heron_tools.hcl" \ % cl_args["config_path"] single_master = masters[0] template_file(heron_tools_hcl_template, heron_tools_hcl_actual, { "<zookeeper_host:zookeeper_port>": ",".join( ['%s' % zk if ":" in zk else '%s:2181' % zk for zk in zookeepers]), "<heron_tracker_executable>": '"%s/heron-tracker"' % config.get_heron_bin_dir(), "<heron_tools_hostname>": '"%s"' % get_hostname(single_master, cl_args), "<heron_ui_executable>": '"%s/heron-ui"' % config.get_heron_bin_dir() })
[ "def", "template_heron_tools_hcl", "(", "cl_args", ",", "masters", ",", "zookeepers", ")", ":", "heron_tools_hcl_template", "=", "\"%s/standalone/templates/heron_tools.template.hcl\"", "%", "cl_args", "[", "\"config_path\"", "]", "heron_tools_hcl_actual", "=", "\"%s/standalon...
50.666667
31.222222
def auto_index(mcs): """Builds all indices, listed in model's Meta class. >>> class SomeModel(Model) ... class Meta: ... indices = ( ... Index('foo'), ... ) .. note:: this will result in calls to :meth:`pymongo.collection.Collection.ensure_index` method at import time, so import all your models up front. """ for index in mcs._meta.indices: index.ensure(mcs.collection)
[ "def", "auto_index", "(", "mcs", ")", ":", "for", "index", "in", "mcs", ".", "_meta", ".", "indices", ":", "index", ".", "ensure", "(", "mcs", ".", "collection", ")" ]
33.875
13.4375
def get_parent_book_nodes(self): """Gets the parents of this book. return: (osid.commenting.BookNodeList) - the parents of this book *compliance: mandatory -- This method must be implemented.* """ parent_book_nodes = [] for node in self._my_map['parentNodes']: parent_book_nodes.append(BookNode( node._my_map, runtime=self._runtime, proxy=self._proxy, lookup_session=self._lookup_session)) return BookNodeList(parent_book_nodes)
[ "def", "get_parent_book_nodes", "(", "self", ")", ":", "parent_book_nodes", "=", "[", "]", "for", "node", "in", "self", ".", "_my_map", "[", "'parentNodes'", "]", ":", "parent_book_nodes", ".", "append", "(", "BookNode", "(", "node", ".", "_my_map", ",", "...
35.1875
14.0625
def references_by_element(self, element_href): """ Return all references to element specified. :param str element_href: element reference :return: list of references where element is used :rtype: list(dict) """ result = self.make_request( method='create', resource='references_by_element', json={ 'value': element_href}) return result
[ "def", "references_by_element", "(", "self", ",", "element_href", ")", ":", "result", "=", "self", ".", "make_request", "(", "method", "=", "'create'", ",", "resource", "=", "'references_by_element'", ",", "json", "=", "{", "'value'", ":", "element_href", "}",...
31.285714
11.571429
def __potential_connection_failure(self, e): """ OperationalError's are emitted by the _mysql library for almost every error code emitted by MySQL. Because of this we verify that the error is actually a connection error before terminating the connection and firing off a PoolConnectionException """ try: self._conn.query('SELECT 1') except (IOError, _mysql.OperationalError): # ok, it's actually an issue. self.__handle_connection_failure(e) else: # seems ok, probably programmer error raise _mysql.DatabaseError(*e.args)
[ "def", "__potential_connection_failure", "(", "self", ",", "e", ")", ":", "try", ":", "self", ".", "_conn", ".", "query", "(", "'SELECT 1'", ")", "except", "(", "IOError", ",", "_mysql", ".", "OperationalError", ")", ":", "# ok, it's actually an issue.", "self...
45.214286
13.142857
def basic_addresses_write(self, cycles, last_op_address, address, word): """ 0113 0019 TXTTAB RMB 2 *PV BEGINNING OF BASIC PROGRAM 0114 001B VARTAB RMB 2 *PV START OF VARIABLES 0115 001D ARYTAB RMB 2 *PV START OF ARRAYS 0116 001F ARYEND RMB 2 *PV END OF ARRAYS (+1) 0117 0021 FRETOP RMB 2 *PV START OF STRING STORAGE (TOP OF FREE RAM) 0118 0023 STRTAB RMB 2 *PV START OF STRING VARIABLES 0119 0025 FRESPC RMB 2 UTILITY STRING POINTER 0120 0027 MEMSIZ RMB 2 *PV TOP OF STRING SPACE """ log.critical("%04x| write $%04x to $%04x", last_op_address, word, address) return word
[ "def", "basic_addresses_write", "(", "self", ",", "cycles", ",", "last_op_address", ",", "address", ",", "word", ")", ":", "log", ".", "critical", "(", "\"%04x| write $%04x to $%04x\"", ",", "last_op_address", ",", "word", ",", "address", ")", "return", "word" ]
50.384615
18.076923
def serveInBackground(port, serverName, prefix='/status/'): """Convenience function: spawn a background server thread that will serve HTTP requests to get the status. Returns the thread.""" import flask, threading from wsgiref.simple_server import make_server app = flask.Flask(__name__) registerStatsHandler(app, serverName, prefix) server = threading.Thread(target=make_server('', port, app).serve_forever) server.daemon = True server.start() return server
[ "def", "serveInBackground", "(", "port", ",", "serverName", ",", "prefix", "=", "'/status/'", ")", ":", "import", "flask", ",", "threading", "from", "wsgiref", ".", "simple_server", "import", "make_server", "app", "=", "flask", ".", "Flask", "(", "__name__", ...
42.545455
14.727273
def put_file(self, in_path, out_path): ''' transfer a file from local to remote ''' vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) if not os.path.exists(in_path): raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) cmd = self._password_cmd() if C.DEFAULT_SCP_IF_SSH: cmd += ["scp"] + self.common_args cmd += [in_path,self.host + ":" + out_path] indata = None else: cmd += ["sftp"] + self.common_args + [self.host] indata = "put %s %s\n" % (in_path, out_path) p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self._send_password() stdout, stderr = p.communicate(indata) if p.returncode != 0: raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
[ "def", "put_file", "(", "self", ",", "in_path", ",", "out_path", ")", ":", "vvv", "(", "\"PUT %s TO %s\"", "%", "(", "in_path", ",", "out_path", ")", ",", "host", "=", "self", ".", "host", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "i...
43.136364
21.409091
def perform(self, event): """ Perform the action. """ wizard = NewDotGraphWizard(parent=self.window.control, window=self.window, title="New Graph") # Open the wizard if wizard.open() == OK: wizard.finished = True
[ "def", "perform", "(", "self", ",", "event", ")", ":", "wizard", "=", "NewDotGraphWizard", "(", "parent", "=", "self", ".", "window", ".", "control", ",", "window", "=", "self", ".", "window", ",", "title", "=", "\"New Graph\"", ")", "# Open the wizard", ...
29.888889
13
def git_clean(ctx): """ Delete all files untracked by git. :param ctx: Context object. :return: None. """ # Get command parts cmd_part_s = [ # Program path 'git', # Clean untracked files 'clean', # Remove all untracked files '-x', # Remove untracked directories too '-d', # Force to remove '-f', # Give two `-f` flags to remove sub-repositories too '-f', ] # Print title print_title('git_clean') # Print the command in multi-line format print_text(_format_multi_line_command(cmd_part_s)) # Create subprocess to run the command in top directory proc = subprocess.Popen(cmd_part_s, cwd=ctx.top_dir) # Wait the subprocess to finish proc.wait() # Print end title print_title('git_clean', is_end=True)
[ "def", "git_clean", "(", "ctx", ")", ":", "# Get command parts", "cmd_part_s", "=", "[", "# Program path", "'git'", ",", "# Clean untracked files", "'clean'", ",", "# Remove all untracked files", "'-x'", ",", "# Remove untracked directories too", "'-d'", ",", "# Force to ...
19.395349
22.604651
def lazy_val(func, with_del_hook=False): '''A memoize decorator for class properties. Return a cached property that is calculated by function `func` on first access. ''' def hook_for(that): try: orig_del = that.__del__ except AttributeError: orig_del = None def del_hook(*args, **kwargs): del that._cache[id(that)] del that._del_hook_cache[id(that)] if orig_del is not None: orig_del(that, *args, **kwargs) try: if orig_del is not None: that.__del__ = del_hook except AttributeError: # that.__del__ is a class property and cannot be changed by instance orig_del = None return del_hook def add_to_del_hook_cache(that): if with_del_hook: try: that._del_hook_cache[id(that)] = hook_for(that) except AttributeError: # when that._del_hook_cache not exists, it means it is not a # class property. Then, we don't need a del_hook(). pass @functools.wraps(func) def get(self): try: return self._cache[id(self)][func] except AttributeError: self._cache = {id(self): {}, } add_to_del_hook_cache(self) except KeyError: try: self._cache[id(self)] except KeyError: self._cache[id(self)] = {} add_to_del_hook_cache(self) val = self._cache[id(self)][func] = func(self) return val return property(get)
[ "def", "lazy_val", "(", "func", ",", "with_del_hook", "=", "False", ")", ":", "def", "hook_for", "(", "that", ")", ":", "try", ":", "orig_del", "=", "that", ".", "__del__", "except", "AttributeError", ":", "orig_del", "=", "None", "def", "del_hook", "(",...
30.113208
17.811321
def subclass(cls, *bases, **kwargs): """ Add bases to class (late subclassing) Annoyingly we cannot yet modify __bases__ of an existing class, instead we must create another subclass, see here; http://bugs.python.org/issue672115 >>> class A(object): pass >>> class B(object): pass >>> class C(object): pass >>> issubclass(B, A) False >>> D = subclass(B, A) >>> issubclass(D, A) True >>> issubclass(D, B) True """ last = kwargs.get('last', False) bases = tuple(bases) for base in bases: assert inspect.isclass(base), "bases must be classes" new_bases = (cls,)+bases if last else bases+(cls,) new_cls = type(cls.__name__, tuple(new_bases), {}) return new_cls
[ "def", "subclass", "(", "cls", ",", "*", "bases", ",", "*", "*", "kwargs", ")", ":", "last", "=", "kwargs", ".", "get", "(", "'last'", ",", "False", ")", "bases", "=", "tuple", "(", "bases", ")", "for", "base", "in", "bases", ":", "assert", "insp...
28.038462
16.423077
def call_lights(*args, **kwargs): ''' Get info about all available lamps. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. CLI Example: .. code-block:: bash salt '*' hue.lights salt '*' hue.lights id=1 salt '*' hue.lights id=1,2,3 ''' res = dict() lights = _get_lights() for dev_id in 'id' in kwargs and _get_devices(kwargs) or sorted(lights.keys()): if lights.get(six.text_type(dev_id)): res[dev_id] = lights[six.text_type(dev_id)] return res or False
[ "def", "call_lights", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "res", "=", "dict", "(", ")", "lights", "=", "_get_lights", "(", ")", "for", "dev_id", "in", "'id'", "in", "kwargs", "and", "_get_devices", "(", "kwargs", ")", "or", "sorted",...
24.652174
24.826087
def find_log_files(self, sp_key, filecontents=True, filehandles=False): """ Return matches log files of interest. :param sp_key: Search pattern key specified in config :param filehandles: Set to true to return a file handle instead of slurped file contents :return: Yields a dict with filename (fn), root directory (root), cleaned sample name generated from the filename (s_name) and either the file contents or file handle for the current matched file (f). As yield is used, the results can be iterated over without loading all files at once """ # Pick up path filters if specified. # Allows modules to be called multiple times with different sets of files path_filters = getattr(self, 'mod_cust_config', {}).get('path_filters') path_filters_exclude = getattr(self, 'mod_cust_config', {}).get('path_filters_exclude') # Old, depreciated syntax support. Likely to be removed in a future version. if isinstance(sp_key, dict): report.files[self.name] = list() for sf in report.searchfiles: if report.search_file(sp_key, {'fn': sf[0], 'root': sf[1]}): report.files[self.name].append({'fn': sf[0], 'root': sf[1]}) sp_key = self.name logwarn = "Depreciation Warning: {} - Please use new style for find_log_files()".format(self.name) if len(report.files[self.name]) > 0: logger.warn(logwarn) else: logger.debug(logwarn) elif not isinstance(sp_key, str): logger.warn("Did not understand find_log_files() search key") return for f in report.files[sp_key]: # Make a note of the filename so that we can report it if something crashes report.last_found_file = os.path.join(f['root'], f['fn']) # Filter out files based on exclusion patterns if path_filters_exclude and len(path_filters_exclude) > 0: exlusion_hits = (fnmatch.fnmatch(report.last_found_file, pfe) for pfe in path_filters_exclude) if any(exlusion_hits): logger.debug("{} - Skipping '{}' as it matched the path_filters_exclude for '{}'".format(sp_key, f['fn'], self.name)) continue # Filter out files based on inclusion patterns if path_filters and len(path_filters) > 0: inclusion_hits = (fnmatch.fnmatch(report.last_found_file, pf) for pf in path_filters) if not any(inclusion_hits): logger.debug("{} - Skipping '{}' as it didn't match the path_filters for '{}'".format(sp_key, f['fn'], self.name)) continue else: logger.debug("{} - Selecting '{}' as it matched the path_filters for '{}'".format(sp_key, f['fn'], self.name)) # Make a sample name from the filename f['s_name'] = self.clean_s_name(f['fn'], f['root']) if filehandles or filecontents: try: # Custom content module can now handle image files (ftype, encoding) = mimetypes.guess_type(os.path.join(f['root'], f['fn'])) if ftype is not None and ftype.startswith('image'): with io.open (os.path.join(f['root'],f['fn']), "rb") as fh: # always return file handles f['f'] = fh yield f else: # Everything else - should be all text files with io.open (os.path.join(f['root'],f['fn']), "r", encoding='utf-8') as fh: if filehandles: f['f'] = fh yield f elif filecontents: f['f'] = fh.read() yield f except (IOError, OSError, ValueError, UnicodeDecodeError) as e: if config.report_readerrors: logger.debug("Couldn't open filehandle when returning file: {}\n{}".format(f['fn'], e)) f['f'] = None else: yield f
[ "def", "find_log_files", "(", "self", ",", "sp_key", ",", "filecontents", "=", "True", ",", "filehandles", "=", "False", ")", ":", "# Pick up path filters if specified.", "# Allows modules to be called multiple times with different sets of files", "path_filters", "=", "getatt...
55.153846
27.153846
def get_group_list(user, include_default=True): ''' Returns a list of all of the system group names of which the user is a member. ''' if HAS_GRP is False or HAS_PWD is False: return [] group_names = None ugroups = set() if hasattr(os, 'getgrouplist'): # Try os.getgrouplist, available in python >= 3.3 log.trace('Trying os.getgrouplist for \'%s\'', user) try: group_names = [ grp.getgrgid(grpid).gr_name for grpid in os.getgrouplist(user, pwd.getpwnam(user).pw_gid) ] except Exception: pass elif HAS_PYSSS: # Try pysss.getgrouplist log.trace('Trying pysss.getgrouplist for \'%s\'', user) try: group_names = list(pysss.getgrouplist(user)) except Exception: pass if group_names is None: # Fall back to generic code # Include the user's default group to match behavior of # os.getgrouplist() and pysss.getgrouplist() log.trace('Trying generic group list for \'%s\'', user) group_names = [g.gr_name for g in grp.getgrall() if user in g.gr_mem] try: default_group = get_default_group(user) if default_group not in group_names: group_names.append(default_group) except KeyError: # If for some reason the user does not have a default group pass if group_names is not None: ugroups.update(group_names) if include_default is False: # Historically, saltstack code for getting group lists did not # include the default group. Some things may only want # supplemental groups, so include_default=False omits the users # default group. try: default_group = grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name ugroups.remove(default_group) except KeyError: # If for some reason the user does not have a default group pass log.trace('Group list for user \'%s\': %s', user, sorted(ugroups)) return sorted(ugroups)
[ "def", "get_group_list", "(", "user", ",", "include_default", "=", "True", ")", ":", "if", "HAS_GRP", "is", "False", "or", "HAS_PWD", "is", "False", ":", "return", "[", "]", "group_names", "=", "None", "ugroups", "=", "set", "(", ")", "if", "hasattr", ...
36.561404
20.631579
def project_community(index, start, end): """Compute the metrics for the project community section of the enriched git index. Returns a dictionary containing "author_metrics", "people_top_metrics" and "orgs_top_metrics" as the keys and the related Metrics as the values. :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics """ results = { "author_metrics": [Authors(index, start, end)], "people_top_metrics": [Authors(index, start, end)], "orgs_top_metrics": [Organizations(index, start, end)], } return results
[ "def", "project_community", "(", "index", ",", "start", ",", "end", ")", ":", "results", "=", "{", "\"author_metrics\"", ":", "[", "Authors", "(", "index", ",", "start", ",", "end", ")", "]", ",", "\"people_top_metrics\"", ":", "[", "Authors", "(", "inde...
34.05
21.7
def rotate_around(self, axis, theta): """Return the vector rotated around axis through angle theta. Right hand rule applies. """ # Adapted from equations published by Glenn Murray. # http://inside.mines.edu/~gmurray/ArbitraryAxisRotation/ArbitraryAxisRotation.html x, y, z = self.x, self.y, self.z u, v, w = axis.x, axis.y, axis.z # Extracted common factors for simplicity and efficiency r2 = u**2 + v**2 + w**2 r = math.sqrt(r2) ct = math.cos(theta) st = math.sin(theta) / r dt = (u * x + v * y + w * z) * (1 - ct) / r2 return Vector3((u * dt + x * ct + (-w * y + v * z) * st), (v * dt + y * ct + (w * x - u * z) * st), (w * dt + z * ct + (-v * x + u * y) * st))
[ "def", "rotate_around", "(", "self", ",", "axis", ",", "theta", ")", ":", "# Adapted from equations published by Glenn Murray.", "# http://inside.mines.edu/~gmurray/ArbitraryAxisRotation/ArbitraryAxisRotation.html", "x", ",", "y", ",", "z", "=", "self", ".", "x", ",", "se...
40.25
17.75
def get_comments_by_genus_type(self, comment_genus_type): """Gets a ``CommentList`` corresponding to the given comment genus ``Type`` which does not include comments of genus types derived from the specified ``Type``. arg: comment_genus_type (osid.type.Type): a comment genus type return: (osid.commenting.CommentList) - the returned ``Comment`` list raise: NullArgument - ``comment_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_genus_type # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('commenting', collection='Comment', runtime=self._runtime) result = collection.find( dict({'genusTypeId': str(comment_genus_type)}, **self._view_filter())).sort('_id', DESCENDING) return objects.CommentList(result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_comments_by_genus_type", "(", "self", ",", "comment_genus_type", ")", ":", "# Implemented from template for", "# osid.resource.ResourceLookupSession.get_resources_by_genus_type", "# NOTE: This implementation currently ignores plenary view", "collection", "=", "JSONClientValidat...
54.73913
21.608696
def header(*msg, level='h1', separator=" ", print_out=print): ''' Print header block in text mode ''' out_string = separator.join(str(x) for x in msg) if level == 'h0': # box_len = 80 if len(msg) < 80 else len(msg) box_len = 80 print_out('+' + '-' * (box_len + 2)) print_out("| %s" % out_string) print_out('+' + '-' * (box_len + 2)) elif level == 'h1': print_out("") print_out(out_string) print_out('-' * 60) elif level == 'h2': print_out('\t%s' % out_string) print_out('\t' + ('-' * 40)) else: print_out('\t\t%s' % out_string) print_out('\t\t' + ('-' * 20))
[ "def", "header", "(", "*", "msg", ",", "level", "=", "'h1'", ",", "separator", "=", "\" \"", ",", "print_out", "=", "print", ")", ":", "out_string", "=", "separator", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "msg", ")", "if", "le...
33.15
12.25
def redo(self): """ Performs the top group on the redo stack, if present. Creates an undo group with the same name. Raises RuntimeError if called while undoing. """ if self._undoing or self._redoing: raise RuntimeError if not self._redo: return group = self._redo.pop() self._redoing = True self.begin_grouping() group.perform() self.set_action_name(group.name) self.end_grouping() self._redoing = False self.notify()
[ "def", "redo", "(", "self", ")", ":", "if", "self", ".", "_undoing", "or", "self", ".", "_redoing", ":", "raise", "RuntimeError", "if", "not", "self", ".", "_redo", ":", "return", "group", "=", "self", ".", "_redo", ".", "pop", "(", ")", "self", "....
25.571429
19
def process_files(manager): """ Process a random number of files on a random number of systems across multiple data centers """ # Get a top level progress bar enterprise = manager.counter(total=DATACENTERS, desc='Processing:', unit='datacenters') # Iterate through data centers for dnum in range(1, DATACENTERS + 1): systems = random.randint(*SYSTEMS) # Random number of systems # Get a child progress bar. leave is False so it can be replaced currCenter = manager.counter(total=systems, desc=' Datacenter %d:' % dnum, unit='systems', leave=False) # Iterate through systems for snum in range(1, systems + 1): # Has no total, so will act as counter. Leave is False system = manager.counter(desc=' System %d:' % snum, unit='files', leave=False) files = random.randint(*FILES) # Random file count # Iterate through files for fnum in range(files): # pylint: disable=unused-variable system.update() # Update count time.sleep(random.uniform(0.0001, 0.0005)) # Random processing time system.close() # Close counter so it gets removed # Log status LOGGER.info('Updated %d files on System %d in Datacenter %d', files, snum, dnum) currCenter.update() # Update count currCenter.close() # Close counter so it gets removed enterprise.update() # Update count enterprise.close()
[ "def", "process_files", "(", "manager", ")", ":", "# Get a top level progress bar", "enterprise", "=", "manager", ".", "counter", "(", "total", "=", "DATACENTERS", ",", "desc", "=", "'Processing:'", ",", "unit", "=", "'datacenters'", ")", "# Iterate through data cen...
40.918919
26
def _get_client(self, project_id): """ Provides a client for interacting with the Cloud Spanner API. :param project_id: The ID of the GCP project. :type project_id: str :return: google.cloud.spanner_v1.client.Client :rtype: object """ if not self._client: self._client = Client(project=project_id, credentials=self._get_credentials()) return self._client
[ "def", "_get_client", "(", "self", ",", "project_id", ")", ":", "if", "not", "self", ".", "_client", ":", "self", ".", "_client", "=", "Client", "(", "project", "=", "project_id", ",", "credentials", "=", "self", ".", "_get_credentials", "(", ")", ")", ...
35.75
17.25
def delete(self, uri, default_response=None): """ Call DELETE on the Gitlab server >>> gitlab = Gitlab(host='http://localhost:10080', verify_ssl=False) >>> gitlab.login(user='root', password='5iveL!fe') >>> gitlab.delete('/users/5') :param uri: String with the URI you wish to delete :param default_response: Return value if JSONDecodeError :return: Dictionary containing response data :raise: HttpError: If invalid response returned """ url = self.api_url + uri response = requests.delete( url, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) return self.success_or_raise(response, default_response=default_response)
[ "def", "delete", "(", "self", ",", "uri", ",", "default_response", "=", "None", ")", ":", "url", "=", "self", ".", "api_url", "+", "uri", "response", "=", "requests", ".", "delete", "(", "url", ",", "headers", "=", "self", ".", "headers", ",", "verif...
40.315789
17.684211
def list_all_categories(cls, **kwargs): """List Categories Return a list of Categories This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_categories(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Category] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_categories_with_http_info(**kwargs) else: (data) = cls._list_all_categories_with_http_info(**kwargs) return data
[ "def", "list_all_categories", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_list_all_categories_with_http_info", "(", ...
36.73913
14.695652
def do_check(pool,request,models,include_children_for,modelgb): "request is the output of translate_check. models a dict of {(model_name,pkey_tuple):model}.\ ICF is a {model_name:fields_list} for which we want to add nulls in request for missing children. see AMC for how it's used.\ The caller should have gone through the same ICF logic when looking up models so the arg has all the refs the DB knows.\ modelgb is misc.GetterBy<ModelInfo>, used by AMC for resolution." add_missing_children(models,request,include_children_for,modelgb) return {k:fkapply(models,pool,process_check,None,k,v) for k,v in request.items()}
[ "def", "do_check", "(", "pool", ",", "request", ",", "models", ",", "include_children_for", ",", "modelgb", ")", ":", "add_missing_children", "(", "models", ",", "request", ",", "include_children_for", ",", "modelgb", ")", "return", "{", "k", ":", "fkapply", ...
89.142857
49.142857
def admin_confirm_sign_up(self, username=None): """ Confirms user registration as an admin without using a confirmation code. Works on any user. :param username: User's username :return: """ if not username: username = self.username self.client.admin_confirm_sign_up( UserPoolId=self.user_pool_id, Username=username, )
[ "def", "admin_confirm_sign_up", "(", "self", ",", "username", "=", "None", ")", ":", "if", "not", "username", ":", "username", "=", "self", ".", "username", "self", ".", "client", ".", "admin_confirm_sign_up", "(", "UserPoolId", "=", "self", ".", "user_pool_...
31.846154
10.615385
def circos_radius(n_nodes, node_r): """ Automatically computes the origin-to-node centre radius of the Circos plot using the triangle equality sine rule. a / sin(A) = b / sin(B) = c / sin(C) :param n_nodes: the number of nodes in the plot. :type n_nodes: int :param node_r: the radius of each node. :type node_r: float :returns: Origin-to-node centre radius. """ A = 2 * np.pi / n_nodes # noqa B = (np.pi - A) / 2 # noqa a = 2 * node_r return a * np.sin(B) / np.sin(A)
[ "def", "circos_radius", "(", "n_nodes", ",", "node_r", ")", ":", "A", "=", "2", "*", "np", ".", "pi", "/", "n_nodes", "# noqa", "B", "=", "(", "np", ".", "pi", "-", "A", ")", "/", "2", "# noqa", "a", "=", "2", "*", "node_r", "return", "a", "*...
30.117647
12.823529
def names(self): """ Returns a list of queues available, ``None`` if no such queues found. Remember this will only shows queues with at least one item enqueued. """ data = None if not self.connected: raise ConnectionError('Queue is not connected') try: data = self.rdb.keys("retaskqueue-*") except redis.exceptions.ConnectionError as err: raise ConnectionError(str(err)) return [name[12:] for name in data]
[ "def", "names", "(", "self", ")", ":", "data", "=", "None", "if", "not", "self", ".", "connected", ":", "raise", "ConnectionError", "(", "'Queue is not connected'", ")", "try", ":", "data", "=", "self", ".", "rdb", ".", "keys", "(", "\"retaskqueue-*\"", ...
31.8125
16.4375
def _init_io(self): """! GPIO initialization. Set GPIO into BCM mode and init other IOs mode """ GPIO.setwarnings(False) GPIO.setmode( GPIO.BCM ) pins = [ self._spi_dc ] for pin in pins: GPIO.setup( pin, GPIO.OUT )
[ "def", "_init_io", "(", "self", ")", ":", "GPIO", ".", "setwarnings", "(", "False", ")", "GPIO", ".", "setmode", "(", "GPIO", ".", "BCM", ")", "pins", "=", "[", "self", ".", "_spi_dc", "]", "for", "pin", "in", "pins", ":", "GPIO", ".", "setup", "...
28.1
9