text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def gen_anytext(*args): """ Convenience function to create bag of words for anytext property """ bag = [] for term in args: if term is not None: if isinstance(term, list): for term2 in term: if term2 is not None: bag.append(term2) else: bag.append(term) return ' '.join(bag)
[ "def", "gen_anytext", "(", "*", "args", ")", ":", "bag", "=", "[", "]", "for", "term", "in", "args", ":", "if", "term", "is", "not", "None", ":", "if", "isinstance", "(", "term", ",", "list", ")", ":", "for", "term2", "in", "term", ":", "if", "...
24.5625
15.0625
def get_sequences(self, pdb_id = None): '''Create Sequence objects for each FASTA sequence.''' sequences = {} if pdb_id: for chain_id, sequence in self.get(pdb_id, {}).iteritems(): sequences[chain_id] = Sequence.from_sequence(chain_id, sequence) else: for pdb_id, v in self.iteritems(): sequences[pdb_id] = {} for chain_id, sequence in v.iteritems(): sequences[pdb_id][chain_id] = Sequence.from_sequence(chain_id, sequence) return sequences
[ "def", "get_sequences", "(", "self", ",", "pdb_id", "=", "None", ")", ":", "sequences", "=", "{", "}", "if", "pdb_id", ":", "for", "chain_id", ",", "sequence", "in", "self", ".", "get", "(", "pdb_id", ",", "{", "}", ")", ".", "iteritems", "(", ")",...
46.75
21.083333
def _get_v_angle(self, case, B, v_angle_guess, p_businj, iref): """ Calculates the voltage phase angles. """ buses = case.connected_buses pv_idxs = [bus._i for bus in buses if bus.type == PV] pq_idxs = [bus._i for bus in buses if bus.type == PQ] pvpq_idxs = pv_idxs + pq_idxs pvpq_rows = [[i] for i in pvpq_idxs] # Get the susceptance matrix with the column and row corresponding to # the reference bus removed. Bpvpq = B[pvpq_rows, pvpq_idxs] Bref = B[pvpq_rows, [iref]] # Bus active power injections (generation - load) adjusted for phase # shifters and real shunts. p_surplus = array([case.s_surplus(v).real for v in buses]) g_shunt = array([bus.g_shunt for bus in buses]) Pbus = (p_surplus - p_businj - g_shunt) / case.base_mva Pbus.shape = len(Pbus), 1 A = Bpvpq b = Pbus[pvpq_idxs] - Bref * v_angle_guess[iref] # x, res, rank, s = linalg.lstsq(A.todense(), b) x = spsolve(A, b) # Insert the reference voltage angle of the slack bus. v_angle = r_[x[:iref], v_angle_guess[iref], x[iref:]] return v_angle, Pbus[iref]
[ "def", "_get_v_angle", "(", "self", ",", "case", ",", "B", ",", "v_angle_guess", ",", "p_businj", ",", "iref", ")", ":", "buses", "=", "case", ".", "connected_buses", "pv_idxs", "=", "[", "bus", ".", "_i", "for", "bus", "in", "buses", "if", "bus", "....
34.882353
20.970588
def get_waiting_list(self, force=False): """ Add lines for any waiting fields that can be completed now. """ code_lines = [] skip_autofield = self.options['skip_autofield'] # Process normal fields for field in list(self.waiting_list): try: # Find the value, add the line, remove from waiting list and move on value = get_attribute_value(self.instance, field, self.context, force=force, skip_autofield=skip_autofield) code_lines.append('%s.%s = %s' % (self.variable_name, field.name, value)) self.waiting_list.remove(field) except SkipValue: # Remove from the waiting list and move on self.waiting_list.remove(field) continue except DoLater: # Move on, maybe next time continue return code_lines
[ "def", "get_waiting_list", "(", "self", ",", "force", "=", "False", ")", ":", "code_lines", "=", "[", "]", "skip_autofield", "=", "self", ".", "options", "[", "'skip_autofield'", "]", "# Process normal fields", "for", "field", "in", "list", "(", "self", ".",...
41.090909
21.409091
def migrate_config_file( self, config_file_path, always_update=False, current_file_type=None, output_file_name=None, output_file_type=None, create=True, update_defaults=True, dump_kwargs=None, include_bootstrap=True, ): """Migrates a configuration file. This is used to help you update your configurations throughout the lifetime of your application. It is probably best explained through example. Examples: Assume we have a JSON config file ('/path/to/config.json') like the following: ``{"db_name": "test_db_name", "db_host": "1.2.3.4"}`` >>> spec = YapconfSpec({ ... 'db_name': { ... 'type': 'str', ... 'default': 'new_default', ... 'previous_defaults': ['test_db_name'] ... }, ... 'db_host': { ... 'type': 'str', ... 'previous_defaults': ['localhost'] ... } ... }) We can migrate that file quite easily with the spec object: >>> spec.migrate_config_file('/path/to/config.json') Will result in /path/to/config.json being overwritten: ``{"db_name": "new_default", "db_host": "1.2.3.4"}`` Args: config_file_path (str): The path to your current config always_update (bool): Always update values (even to None) current_file_type (str): Defaults to self._file_type output_file_name (str): Defaults to the current_file_path output_file_type (str): Defaults to self._file_type create (bool): Create the file if it doesn't exist (otherwise error if the file does not exist). update_defaults (bool): Update values that have a value set to something listed in the previous_defaults dump_kwargs (dict): A key-value pair that will be passed to dump include_bootstrap (bool): Include bootstrap items in the output Returns: box.Box: The newly migrated configuration. """ current_file_type = current_file_type or self._file_type output_file_type = output_file_type or self._file_type output_file_name = output_file_name or config_file_path current_config = self._get_config_if_exists(config_file_path, create, current_file_type) migrated_config = {} if include_bootstrap: items = self._yapconf_items.values() else: items = [ item for item in self._yapconf_items.values() if not item.bootstrap ] for item in items: item.migrate_config(current_config, migrated_config, always_update, update_defaults) if create: yapconf.dump_data(migrated_config, filename=output_file_name, file_type=output_file_type, klazz=YapconfLoadError, dump_kwargs=dump_kwargs) return Box(migrated_config)
[ "def", "migrate_config_file", "(", "self", ",", "config_file_path", ",", "always_update", "=", "False", ",", "current_file_type", "=", "None", ",", "output_file_name", "=", "None", ",", "output_file_type", "=", "None", ",", "create", "=", "True", ",", "update_de...
37.295455
22.25
def _parse_curves(block, **kwargs): """Parse nonlinear curves block.""" count = int(block.pop(0)) curves = [] for i in range(count): for param in ['mod_reduc', 'damping']: length, name = parse_fixed_width([(5, int), (65, to_str)], block) curves.append( site.NonlinearProperty( name, parse_fixed_width(length * [(10, float)], block), parse_fixed_width(length * [(10, float)], block), param)) length = int(block[0][:5]) soil_types = parse_fixed_width((length + 1) * [(5, int)], block)[1:] # Group soil type number and curves together return {(soil_types[i // 2], c.param): c for i, c in enumerate(curves)}
[ "def", "_parse_curves", "(", "block", ",", "*", "*", "kwargs", ")", ":", "count", "=", "int", "(", "block", ".", "pop", "(", "0", ")", ")", "curves", "=", "[", "]", "for", "i", "in", "range", "(", "count", ")", ":", "for", "param", "in", "[", ...
38.315789
21
def get_extension(filepath, check_if_exists=False): """Return the extension of fpath. Parameters ---------- fpath: string File name or path check_if_exists: bool Returns ------- str The extension of the file name or path """ if check_if_exists: if not os.path.exists(filepath): err = 'File not found: ' + filepath log.error(err) raise IOError(err) try: rest, ext = os.path.splitext(filepath) except: raise else: return ext
[ "def", "get_extension", "(", "filepath", ",", "check_if_exists", "=", "False", ")", ":", "if", "check_if_exists", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "err", "=", "'File not found: '", "+", "filepath", "log", ".", ...
19.555556
21.037037
def label_list_parser(self, url): """ Extracts comma separate tag=value pairs from a string Assumes all characters other than / and , are valid """ labels = re.findall('([^/,]+=[^/,]+)', url) slabels = set(labels) if '' in slabels: slabels.remove('') return slabels
[ "def", "label_list_parser", "(", "self", ",", "url", ")", ":", "labels", "=", "re", ".", "findall", "(", "'([^/,]+=[^/,]+)'", ",", "url", ")", "slabels", "=", "set", "(", "labels", ")", "if", "''", "in", "slabels", ":", "slabels", ".", "remove", "(", ...
33.2
11.2
def as_wires(val, bitwidth=None, truncating=True, block=None): """ Return wires from val which may be wires, integers, strings, or bools. :param val: a wirevector-like object or something that can be converted into a Const :param bitwidth: The bitwidth the resulting wire should be :param bool truncating: determines whether bits will be dropped to achieve the desired bitwidth if it is too long (if true, the most-significant bits will be dropped) :param Block block: block to use for wire This function is mainly used to coerce values into WireVectors (for example, operations such as "x+1" where "1" needs to be converted to a Const WireVector). An example: :: def myhardware(input_a, input_b): a = as_wires(input_a) b = as_wires(input_b) myhardware(3, x) The function as_wires will covert the 3 to Const but keep `x` unchanged assuming it is a WireVector. """ from .memory import _MemIndexed block = working_block(block) if isinstance(val, (int, six.string_types)): # note that this case captures bool as well (as bools are instances of ints) return Const(val, bitwidth=bitwidth, block=block) elif isinstance(val, _MemIndexed): # convert to a memory read when the value is actually used if val.wire is None: val.wire = as_wires(val.mem._readaccess(val.index), bitwidth, truncating, block) return val.wire elif not isinstance(val, WireVector): raise PyrtlError('error, expecting a wirevector, int, or verilog-style ' 'const string got %s instead' % repr(val)) elif bitwidth == '0': raise PyrtlError('error, bitwidth must be >= 1') elif val.bitwidth is None: raise PyrtlError('error, attempting to use wirevector with no defined bitwidth') elif bitwidth and bitwidth > val.bitwidth: return val.zero_extended(bitwidth) elif bitwidth and truncating and bitwidth < val.bitwidth: return val[:bitwidth] # truncate the upper bits else: return val
[ "def", "as_wires", "(", "val", ",", "bitwidth", "=", "None", ",", "truncating", "=", "True", ",", "block", "=", "None", ")", ":", "from", ".", "memory", "import", "_MemIndexed", "block", "=", "working_block", "(", "block", ")", "if", "isinstance", "(", ...
43.0625
21.583333
def finish(self, code, data=NotImplemented): """ Set response as {'code': xxx, 'data': xxx} :param code: :param data: :return: """ if data is NotImplemented: data = RETCODE.txt_cn.get(code, None) self.ret_val = {'code': code, 'data': data} # for access in inhreads method self.response = web.json_response(self.ret_val, dumps=json_ex_dumps) logger.debug('finish: %s' % self.ret_val) self._finish_end()
[ "def", "finish", "(", "self", ",", "code", ",", "data", "=", "NotImplemented", ")", ":", "if", "data", "is", "NotImplemented", ":", "data", "=", "RETCODE", ".", "txt_cn", ".", "get", "(", "code", ",", "None", ")", "self", ".", "ret_val", "=", "{", ...
37.692308
15.076923
def write(self, filename=None, skipempty=False): """Write mdp file to *filename*. :Keywords: *filename* output mdp file; default is the filename the mdp was read from *skipempty* : boolean ``True`` removes any parameter lines from output that contain empty values [``False``] .. Note:: Overwrites the file that the mdp was read from if no *filename* supplied. """ with open(self.filename(filename, ext='mdp'), 'w') as mdp: for k,v in self.items(): if k[0] == 'B': # blank line mdp.write("\n") elif k[0] == 'C': # comment mdp.write("; {v!s}\n".format(**vars())) else: # parameter = value if skipempty and (v == '' or v is None): continue if isinstance(v, six.string_types) or not hasattr(v, '__iter__'): mdp.write("{k!s} = {v!s}\n".format(**vars())) else: mdp.write("{} = {}\n".format(k,' '.join(map(str, v))))
[ "def", "write", "(", "self", ",", "filename", "=", "None", ",", "skipempty", "=", "False", ")", ":", "with", "open", "(", "self", ".", "filename", "(", "filename", ",", "ext", "=", "'mdp'", ")", ",", "'w'", ")", "as", "mdp", ":", "for", "k", ",",...
42.392857
18.785714
def get_instance(self, payload): """ Build an instance of WebhookInstance :param dict payload: Payload response from the API :returns: twilio.rest.messaging.v1.session.webhook.WebhookInstance :rtype: twilio.rest.messaging.v1.session.webhook.WebhookInstance """ return WebhookInstance(self._version, payload, session_sid=self._solution['session_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "WebhookInstance", "(", "self", ".", "_version", ",", "payload", ",", "session_sid", "=", "self", ".", "_solution", "[", "'session_sid'", "]", ",", ")" ]
40.1
23.5
def varianceOfLaplacian(img): ''''LAPV' algorithm (Pech2000)''' lap = cv2.Laplacian(img, ddepth=-1)#cv2.cv.CV_64F) stdev = cv2.meanStdDev(lap)[1] s = stdev[0]**2 return s[0]
[ "def", "varianceOfLaplacian", "(", "img", ")", ":", "lap", "=", "cv2", ".", "Laplacian", "(", "img", ",", "ddepth", "=", "-", "1", ")", "#cv2.cv.CV_64F)\r", "stdev", "=", "cv2", ".", "meanStdDev", "(", "lap", ")", "[", "1", "]", "s", "=", "stdev", ...
32.166667
12.833333
def plot(self, fmt=None): """ Make a simple plot of the legend. Simply calls Decor.plot() on all of its members. TODO: Build a more attractive plot. """ for d in self.__list: d.plot(fmt=fmt) return None
[ "def", "plot", "(", "self", ",", "fmt", "=", "None", ")", ":", "for", "d", "in", "self", ".", "__list", ":", "d", ".", "plot", "(", "fmt", "=", "fmt", ")", "return", "None" ]
21.833333
16.666667
def make_quadratic(poly, strength, vartype=None, bqm=None): """Create a binary quadratic model from a higher order polynomial. Args: poly (dict): Polynomial as a dict of form {term: bias, ...}, where `term` is a tuple of variables and `bias` the associated bias. strength (float): Strength of the reduction constraint. Insufficient strength can result in the binary quadratic model not having the same minimizations as the polynomial. vartype (:class:`.Vartype`, optional): Vartype of the polynomial. If `bqm` is provided, vartype is not required. bqm (:class:`.BinaryQuadraticModel`, optional): The terms of the reduced polynomial are added to this binary quadratic model. If not provided, a new binary quadratic model is created. Returns: :class:`.BinaryQuadraticModel` Examples: >>> poly = {(0,): -1, (1,): 1, (2,): 1.5, (0, 1): -1, (0, 1, 2): -2} >>> bqm = dimod.make_quadratic(poly, 5.0, dimod.SPIN) """ if bqm is None: if vartype is None: raise ValueError("one of vartype and bqm must be provided") bqm = BinaryQuadraticModel.empty(vartype) else: if not isinstance(bqm, BinaryQuadraticModel): raise TypeError('create_using must be a BinaryQuadraticModel') if vartype is not None and vartype is not bqm.vartype: raise ValueError("one of vartype and create_using must be provided") bqm.info['reduction'] = {} new_poly = {} for term, bias in iteritems(poly): if len(term) == 0: bqm.add_offset(bias) elif len(term) == 1: v, = term bqm.add_variable(v, bias) else: new_poly[term] = bias return _reduce_degree(bqm, new_poly, vartype, strength)
[ "def", "make_quadratic", "(", "poly", ",", "strength", ",", "vartype", "=", "None", ",", "bqm", "=", "None", ")", ":", "if", "bqm", "is", "None", ":", "if", "vartype", "is", "None", ":", "raise", "ValueError", "(", "\"one of vartype and bqm must be provided\...
35.823529
25.411765
def search_one(self, keyword, arg=None, children=None): """Return receiver's substmt with `keyword` and optionally `arg`. """ if children is None: children = self.substmts for ch in children: if ch.keyword == keyword and (arg is None or ch.arg == arg): return ch return None
[ "def", "search_one", "(", "self", ",", "keyword", ",", "arg", "=", "None", ",", "children", "=", "None", ")", ":", "if", "children", "is", "None", ":", "children", "=", "self", ".", "substmts", "for", "ch", "in", "children", ":", "if", "ch", ".", "...
38.444444
12.444444
def surface_2D(num_lat=90, num_lon=180, water_depth=10., lon=None, lat=None, **kwargs): """Creates a 2D slab ocean Domain in latitude and longitude with uniform water depth. Domain has a single heat capacity according to the specified water depth. **Function-call argument** \n :param int num_lat: number of latitude points [default: 90] :param int num_lon: number of longitude points [default: 180] :param float water_depth: depth of the slab ocean in meters [default: 10.] :param lat: specification for latitude axis (optional) :type lat: :class:`~climlab.domain.axis.Axis` or latitude array :param lon: specification for longitude axis (optional) :type lon: :class:`~climlab.domain.axis.Axis` or longitude array :raises: :exc:`ValueError` if `lat` is given but neither Axis nor latitude array. :raises: :exc:`ValueError` if `lon` is given but neither Axis nor longitude array. :returns: surface domain :rtype: :class:`SlabOcean` :Example: :: >>> from climlab import domain >>> sfc = domain.surface_2D(num_lat=36, num_lat=72) >>> print sfc climlab Domain object with domain_type=ocean and shape=(36, 72, 1) """ if lat is None: latax = Axis(axis_type='lat', num_points=num_lat) elif isinstance(lat, Axis): latax = lat else: try: latax = Axis(axis_type='lat', points=lat) except: raise ValueError('lat must be Axis object or latitude array') if lon is None: lonax = Axis(axis_type='lon', num_points=num_lon) elif isinstance(lon, Axis): lonax = lon else: try: lonax = Axis(axis_type='lon', points=lon) except: raise ValueError('lon must be Axis object or longitude array') depthax = Axis(axis_type='depth', bounds=[water_depth, 0.]) axes = {'lat': latax, 'lon': lonax, 'depth': depthax} slab = SlabOcean(axes=axes, **kwargs) return slab
[ "def", "surface_2D", "(", "num_lat", "=", "90", ",", "num_lon", "=", "180", ",", "water_depth", "=", "10.", ",", "lon", "=", "None", ",", "lat", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "lat", "is", "None", ":", "latax", "=", "Axis",...
39.660377
25.735849
def _get_size(self) -> Tuple[int, int]: """Return the (width, height) for this Image. Returns: Tuple[int, int]: The (width, height) of this Image """ w = ffi.new("int *") h = ffi.new("int *") lib.TCOD_image_get_size(self.image_c, w, h) return w[0], h[0]
[ "def", "_get_size", "(", "self", ")", "->", "Tuple", "[", "int", ",", "int", "]", ":", "w", "=", "ffi", ".", "new", "(", "\"int *\"", ")", "h", "=", "ffi", ".", "new", "(", "\"int *\"", ")", "lib", ".", "TCOD_image_get_size", "(", "self", ".", "i...
31.3
13.7
def dwmAll(data, db, configName='', config={}, udfNamespace=__name__, verbose=False): """ Return list of dictionaries after cleaning rules have been applied; optionally with a history record ID appended. :param list data: list of dictionaries (records) to which cleaning rules should be applied :param MongoClient db: MongoDB connection :param string configName: name of configuration to use; will be queried from 'config' collection of MongoDB :param OrderedDict config: pre-queried config dict :param namespace udfNamespace: namespace of current working script; must be passed if using user-defined functions :param bool verbose: use tqdm to display progress of cleaning records """ if config=={} and configName=='': raise Exception("Please either specify configName or pass a config") if config!={} and configName!='': raise Exception("Please either specify configName or pass a config") if config=={}: configColl = db['config'] config = configColl.find_one({"configName": configName}) if not config: raise Exception("configName '" + configName + "' not found in collection 'config'") writeContactHistory = config["history"]["writeContactHistory"] returnHistoryId = config["history"]["returnHistoryId"] returnHistoryField = config["history"]["returnHistoryField"] histIdField = config["history"]["histIdField"] for field in config["fields"]: config["fields"][field]["derive"] = OrderedDict(sorted(config["fields"][field]["derive"].items())) for position in config["userDefinedFunctions"]: config["userDefinedFunctions"][position] = OrderedDict(sorted(config["userDefinedFunctions"][position].items())) if verbose: for row in tqdm(data): row, historyId = dwmOne(data=row, db=db, config=config, writeContactHistory=writeContactHistory, returnHistoryId=returnHistoryId, histIdField=histIdField, udfNamespace=udfNamespace) if returnHistoryId and writeContactHistory: row[returnHistoryField] = historyId else: for row in data: row, historyId = dwmOne(data=row, db=db, config=config, writeContactHistory=writeContactHistory, returnHistoryId=returnHistoryId, histIdField=histIdField, udfNamespace=udfNamespace) if returnHistoryId and writeContactHistory: row[returnHistoryField] = historyId return data
[ "def", "dwmAll", "(", "data", ",", "db", ",", "configName", "=", "''", ",", "config", "=", "{", "}", ",", "udfNamespace", "=", "__name__", ",", "verbose", "=", "False", ")", ":", "if", "config", "==", "{", "}", "and", "configName", "==", "''", ":",...
47.235294
35.431373
def reload(self): """ Refreshes the file with the data from the server. """ try: data = self._api.get(self.href, append_base=False).json() resource = File(api=self._api, **data) except Exception: try: data = self._api.get( self._URL['get'].format(id=self.id)).json() resource = File(api=self._api, **data) except Exception: raise SbgError('Resource can not be refreshed!') self._data = resource._data self._dirty = resource._dirty self._old = copy.deepcopy(self._data.data) # If file.metadata = value was executed # file object will have attribute _method='PUT', which tells us # to force overwrite of metadata on the server. This is metadata # specific. Once we reload the resource we delete the attribute # _method from the instance. try: delattr(self, '_method') except AttributeError: pass
[ "def", "reload", "(", "self", ")", ":", "try", ":", "data", "=", "self", ".", "_api", ".", "get", "(", "self", ".", "href", ",", "append_base", "=", "False", ")", ".", "json", "(", ")", "resource", "=", "File", "(", "api", "=", "self", ".", "_a...
36.714286
17.5
def set_index(self, index): """Set the current index to the row of the given index :param index: the index to set the level to :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None """ self.setCurrentIndex(index) self.new_root.emit(index) self.scrollTo(index)
[ "def", "set_index", "(", "self", ",", "index", ")", ":", "self", ".", "setCurrentIndex", "(", "index", ")", "self", ".", "new_root", ".", "emit", "(", "index", ")", "self", ".", "scrollTo", "(", "index", ")" ]
29.083333
12.166667
def calc_equivalent_modulus(self): """Calculates the equivalent laminate properties. The following attributes are calculated: e1, e2, g12, nu12, nu21 """ AI = np.matrix(self.ABD, dtype=np.float64).I a11, a12, a22, a33 = AI[0,0], AI[0,1], AI[1,1], AI[2,2] self.e1 = 1./(self.h*a11) self.e2 = 1./(self.h*a22) self.g12 = 1./(self.h*a33) self.nu12 = - a12 / a11 self.nu21 = - a12 / a22
[ "def", "calc_equivalent_modulus", "(", "self", ")", ":", "AI", "=", "np", ".", "matrix", "(", "self", ".", "ABD", ",", "dtype", "=", "np", ".", "float64", ")", ".", "I", "a11", ",", "a12", ",", "a22", ",", "a33", "=", "AI", "[", "0", ",", "0", ...
33
12.285714
def run(self, dag): """ Pick a layout by assigning n circuit qubits to device qubits 0, .., n-1. Args: dag (DAGCircuit): DAG to find layout for. Raises: TranspilerError: if dag wider than self.coupling_map """ num_dag_qubits = sum([qreg.size for qreg in dag.qregs.values()]) if num_dag_qubits > self.coupling_map.size(): raise TranspilerError('Number of qubits greater than device.') self.property_set['layout'] = Layout.generate_trivial_layout(*dag.qregs.values())
[ "def", "run", "(", "self", ",", "dag", ")", ":", "num_dag_qubits", "=", "sum", "(", "[", "qreg", ".", "size", "for", "qreg", "in", "dag", ".", "qregs", ".", "values", "(", ")", "]", ")", "if", "num_dag_qubits", ">", "self", ".", "coupling_map", "."...
39.571429
25.571429
def get_spectre_plot(self, sigma=0.05, step=0.01): """ Get a matplotlib plot of the UV-visible xas. Transition are plotted as vertical lines and as a sum of normal functions with sigma with. The broadening is applied in energy and the xas is plotted as a function of the wavelength. Args: sigma: Full width at half maximum in eV for normal functions. step: bin interval in eV Returns: A dict: {"energies": values, "lambda": values, "xas": values} where values are lists of abscissa (energies, lamba) and the sum of gaussian functions (xas). A matplotlib plot. """ from pymatgen.util.plotting import pretty_plot from matplotlib.mlab import normpdf plt = pretty_plot(12, 8) transitions = self.read_excitation_energies() minval = min([val[0] for val in transitions]) - 5.0 * sigma maxval = max([val[0] for val in transitions]) + 5.0 * sigma npts = int((maxval - minval) / step) + 1 eneval = np.linspace(minval, maxval, npts) # in eV lambdaval = [cst.h * cst.c / (val * cst.e) * 1.e9 for val in eneval] # in nm # sum of gaussian functions spectre = np.zeros(npts) for trans in transitions: spectre += trans[2] * normpdf(eneval, trans[0], sigma) spectre /= spectre.max() plt.plot(lambdaval, spectre, "r-", label="spectre") data = {"energies": eneval, "lambda": lambdaval, "xas": spectre} # plot transitions as vlines plt.vlines([val[1] for val in transitions], 0., [val[2] for val in transitions], color="blue", label="transitions", linewidth=2) plt.xlabel("$\\lambda$ (nm)") plt.ylabel("Arbitrary unit") plt.legend() return data, plt
[ "def", "get_spectre_plot", "(", "self", ",", "sigma", "=", "0.05", ",", "step", "=", "0.01", ")", ":", "from", "pymatgen", ".", "util", ".", "plotting", "import", "pretty_plot", "from", "matplotlib", ".", "mlab", "import", "normpdf", "plt", "=", "pretty_pl...
36.54717
20.207547
def controlprompt_cmd(self, cmd): """Perform a "controlpromptentry" command.""" data = tags.string_tag('cmbe', cmd) + tags.uint8_tag('cmcc', 0) return self.daap.post(_CTRL_PROMPT_CMD, data=data)
[ "def", "controlprompt_cmd", "(", "self", ",", "cmd", ")", ":", "data", "=", "tags", ".", "string_tag", "(", "'cmbe'", ",", "cmd", ")", "+", "tags", ".", "uint8_tag", "(", "'cmcc'", ",", "0", ")", "return", "self", ".", "daap", ".", "post", "(", "_C...
53.75
14
def validate_conversion_arguments(to_wrap): """ Validates arguments for conversion functions. - Only a single argument is present - Kwarg must be 'primitive' 'hexstr' or 'text' - If it is 'hexstr' or 'text' that it is a text type """ @functools.wraps(to_wrap) def wrapper(*args, **kwargs): _assert_one_val(*args, **kwargs) if kwargs: _validate_supported_kwarg(kwargs) if len(args) == 0 and "primitive" not in kwargs: _assert_hexstr_or_text_kwarg_is_text_type(**kwargs) return to_wrap(*args, **kwargs) return wrapper
[ "def", "validate_conversion_arguments", "(", "to_wrap", ")", ":", "@", "functools", ".", "wraps", "(", "to_wrap", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_assert_one_val", "(", "*", "args", ",", "*", "*", "kwargs", ...
31.157895
14
def get_primary(self): """Return primary's address or None.""" # Implemented here in Topology instead of MongoClient, so it can lock. with self._lock: topology_type = self._description.topology_type if topology_type != TOPOLOGY_TYPE.ReplicaSetWithPrimary: return None return writable_server_selector(self._new_selection())[0].address
[ "def", "get_primary", "(", "self", ")", ":", "# Implemented here in Topology instead of MongoClient, so it can lock.", "with", "self", ".", "_lock", ":", "topology_type", "=", "self", ".", "_description", ".", "topology_type", "if", "topology_type", "!=", "TOPOLOGY_TYPE",...
44.666667
23.222222
def fit(self, X, y=None, **fit_params): """Fit the StackingEstimator meta-transformer. Parameters ---------- X: array-like of shape (n_samples, n_features) The training input samples. y: array-like, shape (n_samples,) The target values (integers that correspond to classes in classification, real numbers in regression). fit_params: Other estimator-specific parameters. Returns ------- self: object Returns a copy of the estimator """ self.estimator.fit(X, y, **fit_params) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "fit_params", ")", ":", "self", ".", "estimator", ".", "fit", "(", "X", ",", "y", ",", "*", "*", "fit_params", ")", "return", "self" ]
32.263158
18.105263
def _log_level_changed(self, name, old, new): """Adjust the log level when log_level is set.""" if isinstance(new, basestring): new = getattr(logging, new) self.log_level = new self.log.setLevel(new)
[ "def", "_log_level_changed", "(", "self", ",", "name", ",", "old", ",", "new", ")", ":", "if", "isinstance", "(", "new", ",", "basestring", ")", ":", "new", "=", "getattr", "(", "logging", ",", "new", ")", "self", ".", "log_level", "=", "new", "self"...
40.333333
4.166667
def connect(self, f, t): """Connect two existing vertices. Nothing happens if the vertices are already connected. """ if t not in self._vertices: raise KeyError(t) self._forwards[f].add(t) self._backwards[t].add(f)
[ "def", "connect", "(", "self", ",", "f", ",", "t", ")", ":", "if", "t", "not", "in", "self", ".", "_vertices", ":", "raise", "KeyError", "(", "t", ")", "self", ".", "_forwards", "[", "f", "]", ".", "add", "(", "t", ")", "self", ".", "_backwards...
29.666667
12.111111
def plot_chain(chain, joints, ax, target=None, show=False): """Plots the chain""" # LIst of nodes and orientations nodes = [] axes = [] transformation_matrixes = chain.forward_kinematics(joints, full_kinematics=True) # Get the nodes and the orientation from the tranformation matrix for (index, link) in enumerate(chain.links): (node, rotation) = geometry_utils.from_transformation_matrix(transformation_matrixes[index]) nodes.append(node) rotation_axis = link._get_rotation_axis() if index == 0: axes.append(rotation_axis) else: axes.append(geometry_utils.homogeneous_to_cartesian_vectors(np.dot(transformation_matrixes[index - 1], rotation_axis))) # Plot the chain ax.plot([x[0] for x in nodes], [x[1] for x in nodes], [x[2] for x in nodes]) # Plot of the nodes of the chain ax.scatter([x[0] for x in nodes], [x[1] for x in nodes], [x[2] for x in nodes]) # Plot rotation axes for index, axe in enumerate(axes): ax.plot([nodes[index][0], axe[0]], [nodes[index][1], axe[1]], [nodes[index][2], axe[2]])
[ "def", "plot_chain", "(", "chain", ",", "joints", ",", "ax", ",", "target", "=", "None", ",", "show", "=", "False", ")", ":", "# LIst of nodes and orientations", "nodes", "=", "[", "]", "axes", "=", "[", "]", "transformation_matrixes", "=", "chain", ".", ...
42.5
27.615385
def handle_fault(self, event): """ handle failure/error processing for event """ msg = "Faulty event '{}' with payload '{}' was received by state '{}'".format(event.name, event.payload, self.name) raise RuntimeError(msg)
[ "def", "handle_fault", "(", "self", ",", "event", ")", ":", "msg", "=", "\"Faulty event '{}' with payload '{}' was received by state '{}'\"", ".", "format", "(", "event", ".", "name", ",", "event", ".", "payload", ",", "self", ".", "name", ")", "raise", "Runtime...
42.5
18.5
def set_loader(fxn): """Set __loader__ on the returned module. This function is deprecated. """ @functools.wraps(fxn) def set_loader_wrapper(self, *args, **kwargs): warnings.warn('The import system now takes care of this automatically.', DeprecationWarning, stacklevel=2) module = fxn(self, *args, **kwargs) if getattr(module, '__loader__', None) is None: module.__loader__ = self return module return set_loader_wrapper
[ "def", "set_loader", "(", "fxn", ")", ":", "@", "functools", ".", "wraps", "(", "fxn", ")", "def", "set_loader_wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "'The import system now takes care of t...
38.307692
12.307692
def get(obj_name, init=False): "Find an object already created" wx_parent = None # check if new_parent is given as string (useful for designer!) if isinstance(obj_name, basestring): # find the object reference in the already created gui2py objects # TODO: only useful for designer, get a better way obj_parent = COMPONENTS.get(obj_name) if not obj_parent: # try to find window (it can be a plain wx frame/control) wx_parent = wx.FindWindowByName(obj_name) if wx_parent: # store gui object (if any) obj_parent = getattr(wx_parent, "obj") else: # fallback using just object name (backward compatibility) for obj in COMPONENTS.values(): if obj.name==obj_name: obj_parent = obj else: obj_parent = obj_name # use the provided parent (as is) return obj_parent or wx_parent
[ "def", "get", "(", "obj_name", ",", "init", "=", "False", ")", ":", "wx_parent", "=", "None", "# check if new_parent is given as string (useful for designer!)\r", "if", "isinstance", "(", "obj_name", ",", "basestring", ")", ":", "# find the object reference in the already...
45.090909
15.636364
def infer_transportation_modes(self, dt_threshold=10): """In-place transportation inferring of segments Returns: This track """ self.segments = [ segment.infer_transportation_mode(dt_threshold=dt_threshold) for segment in self.segments ] return self
[ "def", "infer_transportation_modes", "(", "self", ",", "dt_threshold", "=", "10", ")", ":", "self", ".", "segments", "=", "[", "segment", ".", "infer_transportation_mode", "(", "dt_threshold", "=", "dt_threshold", ")", "for", "segment", "in", "self", ".", "seg...
29.818182
17.363636
def to_period(self, freq=None): """ Cast to PeriodArray/Index at a particular frequency. Converts DatetimeArray/Index to PeriodArray/Index. Parameters ---------- freq : str or Offset, optional One of pandas' :ref:`offset strings <timeseries.offset_aliases>` or an Offset object. Will be inferred by default. Returns ------- PeriodArray/Index Raises ------ ValueError When converting a DatetimeArray/Index with non-regular values, so that a frequency cannot be inferred. See Also -------- PeriodIndex: Immutable ndarray holding ordinal values. DatetimeIndex.to_pydatetime: Return DatetimeIndex as object. Examples -------- >>> df = pd.DataFrame({"y": [1, 2, 3]}, ... index=pd.to_datetime(["2000-03-31 00:00:00", ... "2000-05-31 00:00:00", ... "2000-08-31 00:00:00"])) >>> df.index.to_period("M") PeriodIndex(['2000-03', '2000-05', '2000-08'], dtype='period[M]', freq='M') Infer the daily frequency >>> idx = pd.date_range("2017-01-01", periods=2) >>> idx.to_period() PeriodIndex(['2017-01-01', '2017-01-02'], dtype='period[D]', freq='D') """ from pandas.core.arrays import PeriodArray if self.tz is not None: warnings.warn("Converting to PeriodArray/Index representation " "will drop timezone information.", UserWarning) if freq is None: freq = self.freqstr or self.inferred_freq if freq is None: raise ValueError("You must pass a freq argument as " "current index has none.") freq = get_period_alias(freq) return PeriodArray._from_datetime64(self._data, freq, tz=self.tz)
[ "def", "to_period", "(", "self", ",", "freq", "=", "None", ")", ":", "from", "pandas", ".", "core", ".", "arrays", "import", "PeriodArray", "if", "self", ".", "tz", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"Converting to PeriodArray/Index ...
33.3
23.333333
def list(self, search=False, **kwargs): """ Returns a list of :class:`Message` objects and a pager dict. :Example: messages, pager = client.messages.list() :param bool search: If True then search messages using `ids`, `sessionId`, and/or `query`. Default=False :param int page: Fetch specified results page. Default=1 :param int limit: How many results on page. Default=10 :param str ids: Find message by ID(s). Using with `search`=True. :param str sessionId: Find messages by session ID. Using with `search`=True. :param str query: Find messages by specified search query. Using with `search`=True. """ kwargs["search"] = search return self.get_instances(kwargs)
[ "def", "list", "(", "self", ",", "search", "=", "False", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"search\"", "]", "=", "search", "return", "self", ".", "get_instances", "(", "kwargs", ")" ]
46.294118
27.470588
def add_activity(self, activity): """ Add an activity to the component. :param activity: The activity. """ self.gl.structure.validate_account_names( activity.get_referenced_accounts()) self.activities.append(activity) activity.set_parent_path(self.path)
[ "def", "add_activity", "(", "self", ",", "activity", ")", ":", "self", ".", "gl", ".", "structure", ".", "validate_account_names", "(", "activity", ".", "get_referenced_accounts", "(", ")", ")", "self", ".", "activities", ".", "append", "(", "activity", ")",...
28.454545
9.909091
def _send_loop(self): """ Waits for data in the output queue to send. """ while True: try: line = self.oqueue.get().splitlines()[0][:500] self._obuffer += line + '\r\n' while self._obuffer: sent = self._socket.send(self._obuffer) self._obuffer = self._obuffer[sent:] except Exception: break
[ "def", "_send_loop", "(", "self", ")", ":", "while", "True", ":", "try", ":", "line", "=", "self", ".", "oqueue", ".", "get", "(", ")", ".", "splitlines", "(", ")", "[", "0", "]", "[", ":", "500", "]", "self", ".", "_obuffer", "+=", "line", "+"...
33.692308
13.230769
def run(self): """Runs the command. Args: self (CleanCommand): the ``CleanCommand`` instance Returns: ``None`` """ for build_dir in self.build_dirs: if os.path.isdir(build_dir): sys.stdout.write('Removing %s%s' % (build_dir, os.linesep)) shutil.rmtree(build_dir) for (root, dirs, files) in os.walk(self.cwd): for name in files: fullpath = os.path.join(root, name) if any(fullpath.endswith(ext) for ext in self.build_artifacts): sys.stdout.write('Removing %s%s' % (fullpath, os.linesep)) os.remove(fullpath)
[ "def", "run", "(", "self", ")", ":", "for", "build_dir", "in", "self", ".", "build_dirs", ":", "if", "os", ".", "path", ".", "isdir", "(", "build_dir", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'Removing %s%s'", "%", "(", "build_dir", ",", ...
34.3
19.35
def parse_number_of_html_pages(html_question): """Parse number of answer pages to paginate over them. :param html_question: raw HTML question element :returns: an integer with the number of pages """ bs_question = bs4.BeautifulSoup(html_question, "html.parser") try: bs_question.select('div.paginator')[0] except IndexError: return 1 else: return int(bs_question.select('div.paginator')[0].attrs['data-num-pages'])
[ "def", "parse_number_of_html_pages", "(", "html_question", ")", ":", "bs_question", "=", "bs4", ".", "BeautifulSoup", "(", "html_question", ",", "\"html.parser\"", ")", "try", ":", "bs_question", ".", "select", "(", "'div.paginator'", ")", "[", "0", "]", "except...
35.928571
20.571429
def ip(ip_address, return_format=None): """Returns a summary of the information our database holds for a particular IP address (similar to /ipinfo.html). In the returned data: Count: (also reports or records) total number of packets blocked from this IP. Attacks: (also targets) number of unique destination IP addresses for these packets. :param ip_address: a valid IP address """ response = _get('ip/{address}'.format(address=ip_address), return_format) if 'bad IP address' in str(response): raise Error('Bad IP address, {address}'.format(address=ip_address)) else: return response
[ "def", "ip", "(", "ip_address", ",", "return_format", "=", "None", ")", ":", "response", "=", "_get", "(", "'ip/{address}'", ".", "format", "(", "address", "=", "ip_address", ")", ",", "return_format", ")", "if", "'bad IP address'", "in", "str", "(", "resp...
35.166667
21.444444
def send(sms_to, sms_body, **kwargs): """ Site: http://smsaero.ru/ API: http://smsaero.ru/api/ """ headers = { "User-Agent": "DBMail/%s" % get_version(), } kwargs.update({ 'user': settings.SMSAERO_LOGIN, 'password': settings.SMSAERO_MD5_PASSWORD, 'from': kwargs.pop('sms_from', settings.SMSAERO_FROM), 'to': sms_to.replace('+', ''), 'text': from_unicode(sms_body), 'answer': 'json', }) http = HTTPConnection(kwargs.pop("api_url", "gate.smsaero.ru")) http.request("GET", "/send/?" + urlencode(kwargs), headers=headers) response = http.getresponse() if response.status != 200: raise AeroSmsError(response.reason) read = response.read().decode(response.headers.get_content_charset()) data = json.loads(read) status = None if 'result' in data: status = data['result'] sms_id = None if 'id' in data: sms_id = data['id'] if sms_id and status == 'accepted': return True return False
[ "def", "send", "(", "sms_to", ",", "sms_body", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "{", "\"User-Agent\"", ":", "\"DBMail/%s\"", "%", "get_version", "(", ")", ",", "}", "kwargs", ".", "update", "(", "{", "'user'", ":", "settings", ".", ...
25.948718
19.333333
def load(self): '''获取当前的离线任务列表''' def on_list_task(info, error=None): self.loading_spin.stop() self.loading_spin.hide() if not info: self.app.toast(_('Network error, info is empty')) if error or not info: logger.error('CloudPage.load: %s, %s' % (info, error)) return tasks = info['task_info'] for task in tasks: self.liststore.append([ task['task_id'], task['task_name'], task['save_path'], task['source_url'], 0, 0, int(task['status']), 0, '0', gutil.escape(task['save_path']) ]) self.scan_tasks() nonlocal start start = start + len(tasks) if info['total'] > start: gutil.async_call(pcs.cloud_list_task, self.app.cookie, self.app.tokens, start, callback=on_list_task) self.loading_spin.start() self.loading_spin.show_all() start = 0 gutil.async_call(pcs.cloud_list_task, self.app.cookie, self.app.tokens, start, callback=on_list_task)
[ "def", "load", "(", "self", ")", ":", "def", "on_list_task", "(", "info", ",", "error", "=", "None", ")", ":", "self", ".", "loading_spin", ".", "stop", "(", ")", "self", ".", "loading_spin", ".", "hide", "(", ")", "if", "not", "info", ":", "self",...
35.756757
14.567568
def parse(bin_payload, block_height): """ Interpret a block's nulldata back into a name. The first three bytes (2 magic + 1 opcode) will not be present in bin_payload. The name will be directly represented by the bytes given. This works for registrations and renewals. Record format (pre F-day 2017): 0 2 3 39 |----|--|----------------------------------| magic op name.ns_id (up to 37 bytes) Record format (post F-day 2017): 0 2 3 39 59 |----|--|----------------------------------|-------------------| magic op name.ns_id (37 bytes, 0-padded) zone file hash Record format (STACKs phase 1): (for register, tokens burned is ignored) (for renew, tokens burned is the number of tokens to burn) 0 2 3 39 59 67 |----|--|----------------------------------|-------------------|------------------------------| magic op name.ns_id (37 bytes, 0-padded) zone file hash tokens burned (big-endian) """ # pre F-day 2017: bin_payload is the name. # post F-day 2017: bin_payload is the name and possibly the update hash # STACKs phase 1: bin_payload possibly has a token burn attached to the end epoch_features = get_epoch_features(block_height) fqn = None value_hash = None tokens_burned = 0 if EPOCH_FEATURE_OP_REGISTER_UPDATE in epoch_features or EPOCH_FEATURE_OP_RENEW_TRANSFER_UPDATE in epoch_features: # payload is possibly name + zonefile hash, or name + zonefile hash + tokens # if so, it's guaranteed to be max_name_len + value_hash_len bytes long. name_value_len = LENGTHS['blockchain_id_name'] + LENGTHS['value_hash'] if len(bin_payload) >= name_value_len: # has name and value hash, and possibly a token burn # get name and value hash value_hash = bin_payload[LENGTHS['blockchain_id_name']: LENGTHS['blockchain_id_name'] + LENGTHS['value_hash']].encode('hex') fqn = bin_payload[:LENGTHS['blockchain_id_name']] fqn = fqn.rstrip('\x00') if EPOCH_FEATURE_NAMEOPS_COST_TOKENS in epoch_features: # might have tokens burned. If so, it's all or nothing. if len(bin_payload) == name_value_len + LENGTHS['tokens_burnt']: # we have a token count (this is a name renewal) bin_tokens = bin_payload[name_value_len: name_value_len + LENGTHS['tokens_burnt']] tokens_burned = int(bin_tokens.encode('hex'), 16) # NOTE: big-endian else: # must not have any bits dangling off the end if len(bin_payload) != name_value_len: log.warning('Invalid payload {}: expected {} bytes or {} bytes'.format(bin_payload.encode('hex'), name_value_len, name_value_len + LENGTHS['tokens_burnt'])) return None # no token count (might be a register) tokens_burned = None else: # tokens are not active in this epoch. # payload must be *exactly* name + value hash. if len(bin_payload) != name_value_len: log.warning("Invalid payload {}: expected {} bytes".format(bin_payload.encode('hex'), name_value_len)) return None else: # payload is just a name fqn = bin_payload else: # payload is only the name fqn = bin_payload if not is_name_valid( fqn ): log.warning("Invalid name: {} ({})".format(fqn, fqn.encode('hex'))) return None return { 'opcode': 'NAME_REGISTRATION', # NOTE: could be NAME_RENEWAL 'name': fqn, 'value_hash': value_hash, 'token_fee': tokens_burned, }
[ "def", "parse", "(", "bin_payload", ",", "block_height", ")", ":", "# pre F-day 2017: bin_payload is the name.", "# post F-day 2017: bin_payload is the name and possibly the update hash", "# STACKs phase 1: bin_payload possibly has a token burn attached to the end", "epoch_features", "=", ...
42.301075
28.451613
def _set_zone(self, v, load=False): """ Setter method for zone, mapped from YANG variable /zoning/defined_configuration/zone (list) If this variable is read-only (config: false) in the source YANG file, then _set_zone is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_zone() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("zone_name",zone.zone, yang_name="zone", rest_name="zone", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='zone-name', extensions={u'tailf-common': {u'info': u'List of defined Zones', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_zone'}}), is_container='list', yang_name="zone", rest_name="zone", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined Zones', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_zone'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """zone must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("zone_name",zone.zone, yang_name="zone", rest_name="zone", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='zone-name', extensions={u'tailf-common': {u'info': u'List of defined Zones', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_zone'}}), is_container='list', yang_name="zone", rest_name="zone", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined Zones', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_zone'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)""", }) self.__zone = t if hasattr(self, '_set'): self._set()
[ "def", "_set_zone", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", ...
115.545455
55.863636
def sign(self, filename): """ Signs the filename with the certificate associated with this builder. :param filename | <str> :return <bool> | success """ sign = self.signcmd() certificate = self.certificate() if not sign: log.error('No signcmd defined.') return False elif not certificate and '{cert}' in sign: log.error('No sign certificated defined.') return False log.info('Signing {0}...'.format(filename)) sign = os.path.expandvars(sign) filename = os.path.expandvars(filename) cert = os.path.expandvars(certificate) # let the previous process finish fully, or we might get some file errors time.sleep(2) return cmdexec(sign.format(filename=filename, cert=cert)) == 0
[ "def", "sign", "(", "self", ",", "filename", ")", ":", "sign", "=", "self", ".", "signcmd", "(", ")", "certificate", "=", "self", ".", "certificate", "(", ")", "if", "not", "sign", ":", "log", ".", "error", "(", "'No signcmd defined.'", ")", "return", ...
34.08
16.4
def get_instance(self, payload): """ Build an instance of AssistantFallbackActionsInstance :param dict payload: Payload response from the API :returns: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsInstance :rtype: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsInstance """ return AssistantFallbackActionsInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "AssistantFallbackActionsInstance", "(", "self", ".", "_version", ",", "payload", ",", "assistant_sid", "=", "self", ".", "_solution", "[", "'assistant_sid'", "]", ",", ")" ]
40.571429
26.571429
def get_plain_image_as_widget(self): """Used for generating thumbnails. Does not include overlaid graphics. """ arr = self.getwin_array(order=self.rgb_order) # convert numpy array to native image widget image_w = self._get_wimage(arr) return image_w
[ "def", "get_plain_image_as_widget", "(", "self", ")", ":", "arr", "=", "self", ".", "getwin_array", "(", "order", "=", "self", ".", "rgb_order", ")", "# convert numpy array to native image widget", "image_w", "=", "self", ".", "_get_wimage", "(", "arr", ")", "re...
33.222222
12.333333
def _get_config_type(cla55: type) -> Optional[str]: """ Find the name (if any) that a subclass was registered under. We do this simply by iterating through the registry until we find it. """ # Special handling for pytorch RNN types: if cla55 == torch.nn.RNN: return "rnn" elif cla55 == torch.nn.LSTM: return "lstm" elif cla55 == torch.nn.GRU: return "gru" for subclass_dict in Registrable._registry.values(): for name, subclass in subclass_dict.items(): if subclass == cla55: return name # Special handling for initializer functions if hasattr(subclass, '_initializer_wrapper'): sif = subclass()._init_function if sif == cla55: return sif.__name__.rstrip("_") return None
[ "def", "_get_config_type", "(", "cla55", ":", "type", ")", "->", "Optional", "[", "str", "]", ":", "# Special handling for pytorch RNN types:", "if", "cla55", "==", "torch", ".", "nn", ".", "RNN", ":", "return", "\"rnn\"", "elif", "cla55", "==", "torch", "."...
31.730769
16.423077
def merge(self, other): """ Merge another stats. """ Stats.merge(self, other) self.changes += other.changes
[ "def", "merge", "(", "self", ",", "other", ")", ":", "Stats", ".", "merge", "(", "self", ",", "other", ")", "self", ".", "changes", "+=", "other", ".", "changes" ]
32
7
def setval(key, val, dict_=None, delim=DEFAULT_TARGET_DELIM): ''' Set a value under the dictionary hierarchy identified under the key. The target 'foo/bar/baz' returns the dictionary hierarchy {'foo': {'bar': {'baz': {}}}}. .. note:: Currently this doesn't work with integers, i.e. cannot build lists dynamically. CLI Example: .. code-block:: bash salt '*' formula.setval foo:baz:bar True ''' if not dict_: dict_ = {} prev_hier = dict_ dict_hier = key.split(delim) for each in dict_hier[:-1]: if each not in prev_hier: prev_hier[each] = {} prev_hier = prev_hier[each] prev_hier[dict_hier[-1]] = copy.deepcopy(val) return dict_
[ "def", "setval", "(", "key", ",", "val", ",", "dict_", "=", "None", ",", "delim", "=", "DEFAULT_TARGET_DELIM", ")", ":", "if", "not", "dict_", ":", "dict_", "=", "{", "}", "prev_hier", "=", "dict_", "dict_hier", "=", "key", ".", "split", "(", "delim"...
26.703704
20.703704
def _check_syntax(code, lang, temp_dir, enforce=True): """ Checks that the code whose text is in CODE parses as LANG. Raises DXSyntaxError if there is a problem and "enforce" is True. """ # This function needs the language to be explicitly set, so we can # generate an appropriate temp filename. if lang == 'python2.7': temp_basename = 'inlined_code_from_dxapp_json.py' elif lang == 'bash': temp_basename = 'inlined_code_from_dxapp_json.sh' else: raise ValueError('lang must be one of "python2.7" or "bash"') # Dump the contents out to a temporary file, then call _check_file_syntax. with open(os.path.join(temp_dir, temp_basename), 'w') as ofile: ofile.write(code) _check_file_syntax(os.path.join(temp_dir, temp_basename), temp_dir, override_lang=lang, enforce=enforce)
[ "def", "_check_syntax", "(", "code", ",", "lang", ",", "temp_dir", ",", "enforce", "=", "True", ")", ":", "# This function needs the language to be explicitly set, so we can", "# generate an appropriate temp filename.", "if", "lang", "==", "'python2.7'", ":", "temp_basename...
46.333333
22.777778
def hist_from_values_list(values_list, fillers=(None,), normalize=False, cumulative=False, to_str=False, sep=',', min_bin=None, max_bin=None): """Compute an emprical histogram, PMF or CDF in a list of lists or a csv string Only works for discrete (integer) values (doesn't bin real values). `fillers`: list or tuple of values to ignore in computing the histogram >>> hist_from_values_list([1,1,2,1,1,1,2,3,2,4,4,5,7,7,9]) # doctest: +NORMALIZE_WHITESPACE [(1, 5), (2, 3), (3, 1), (4, 2), (5, 1), (6, 0), (7, 2), (8, 0), (9, 1)] >>> hist_from_values_list([(1,9),(1,8),(2,),(1,),(1,4),(2,5),(3,3),(5,0),(2,2)]) # doctest: +NORMALIZE_WHITESPACE [[(1, 4), (2, 3), (3, 1), (4, 0), (5, 1)], [(0, 1), (1, 0), ... (6, 0), (7, 0), (8, 1), (9, 1)]] >>> hist_from_values_list(transposed_matrix([(8,),(1,3,5),(2,),(3,4,5,8)])) # doctest: +NORMALIZE_WHITESPACE [[(8, 1)], [(1, 1), (2, 0), (3, 1), (4, 0), (5, 1)], [(2, 1)], [(3, 1), (4, 1), (5, 1), (6, 0), (7, 0), (8, 1)]] """ value_types = tuple([int, float] + [type(filler) for filler in fillers]) if all(isinstance(value, value_types) for value in values_list): # ignore all fillers and convert all floats to ints when doing counting counters = [Counter(int(value) for value in values_list if isinstance(value, (int, float)))] elif all(len(row) == 1 for row in values_list) and all(isinstance(row[0], value_types) for row in values_list): return hist_from_values_list([values[0] for values in values_list], fillers=fillers, normalize=normalize, cumulative=cumulative, to_str=to_str, sep=sep, min_bin=min_bin, max_bin=max_bin) else: # assume it's a row-wise table (list of rows) return [ hist_from_values_list(col, fillers=fillers, normalize=normalize, cumulative=cumulative, to_str=to_str, sep=sep, min_bin=min_bin, max_bin=max_bin) for col in transposed_matrix(values_list) ] if not values_list: return [] intkeys_list = [[c for c in counts if (isinstance(c, int) or (isinstance(c, float) and int(c) == c))] for counts in counters] try: min_bin = int(min_bin) except (IndexError, ValueError, AttributeError, TypeError): min_bin = min(min(intkeys) for intkeys in intkeys_list) try: max_bin = int(max_bin) except (IndexError, ValueError, AttributeError, TypeError): max_bin = max(max(intkeys) for intkeys in intkeys_list) # FIXME: this looks slow and hazardous (like it's ignore min/max bin): # TODO: reuse min(intkeys) min_bin = max(min_bin, min((min(intkeys) if intkeys else 0) for intkeys in intkeys_list)) max_bin = min(max_bin, max((max(intkeys) if intkeys else 0) for intkeys in intkeys_list)) histograms = [] for intkeys, counts in zip(intkeys_list, counters): histograms += [OrderedDict()] if not intkeys: continue if normalize: N = sum(counts[c] for c in intkeys) for c in intkeys: counts[c] = float(counts[c]) / N if cumulative: for i in range(min_bin, max_bin + 1): histograms[-1][i] = counts.get(i, 0) + histograms[-1].get(i - 1, 0) else: for i in range(min_bin, max_bin + 1): histograms[-1][i] = counts.get(i, 0) if not histograms: histograms = [OrderedDict()] # fill in the zero counts between the integer bins of the histogram aligned_histograms = [] for i in range(min_bin, max_bin + 1): aligned_histograms += [tuple([i] + [hist.get(i, 0) for hist in histograms])] if to_str: # FIXME: add header row return str_from_table(aligned_histograms, sep=sep, max_rows=365 * 2 + 1) return aligned_histograms
[ "def", "hist_from_values_list", "(", "values_list", ",", "fillers", "=", "(", "None", ",", ")", ",", "normalize", "=", "False", ",", "cumulative", "=", "False", ",", "to_str", "=", "False", ",", "sep", "=", "','", ",", "min_bin", "=", "None", ",", "max...
49.461538
31.025641
def parse(self, argv, usedname, location): """Consume and process arguments and store the result. ARGS: argv <list str>: The argument list to parse. usedname <str>: The string used by the user to invoke the option. location <str>: A user friendly sring describing where the parser got this data from. """ try: value = self.format.parse(argv) except formats.BadNumberOfArguments, e: raise BadNumberOfArguments(usedname, e.required, e.supplied) except formats.BadArgument, e: raise BadArgument(usedname, e.argument, e.message) if self.recurring: self.value.append(value) else: self.value = value self.location = location
[ "def", "parse", "(", "self", ",", "argv", ",", "usedname", ",", "location", ")", ":", "try", ":", "value", "=", "self", ".", "format", ".", "parse", "(", "argv", ")", "except", "formats", ".", "BadNumberOfArguments", ",", "e", ":", "raise", "BadNumberO...
34.826087
15.043478
def start(cls, now, number, firstweekday=calendar.SATURDAY, **options): """ Return the starting datetime: ``number`` of weeks before ``now``. ``firstweekday`` determines when the week starts. It defaults to Saturday. """ week = cls.mask(now, firstweekday=firstweekday, **options) days = (number - 1) * cls.DAYS_IN_WEEK return week - timedelta(days=days)
[ "def", "start", "(", "cls", ",", "now", ",", "number", ",", "firstweekday", "=", "calendar", ".", "SATURDAY", ",", "*", "*", "options", ")", ":", "week", "=", "cls", ".", "mask", "(", "now", ",", "firstweekday", "=", "firstweekday", ",", "*", "*", ...
40.9
18.7
def forward_on_instances(self, instances: List[Instance]) -> List[Dict[str, numpy.ndarray]]: """ Takes a list of :class:`~allennlp.data.instance.Instance`s, converts that text into arrays using this model's :class:`Vocabulary`, passes those arrays through :func:`self.forward()` and :func:`self.decode()` (which by default does nothing) and returns the result. Before returning the result, we convert any ``torch.Tensors`` into numpy arrays and separate the batched output into a list of individual dicts per instance. Note that typically this will be faster on a GPU (and conditionally, on a CPU) than repeated calls to :func:`forward_on_instance`. Parameters ---------- instances : List[Instance], required The instances to run the model on. Returns ------- A list of the models output for each instance. """ batch_size = len(instances) with torch.no_grad(): cuda_device = self._get_prediction_device() dataset = Batch(instances) dataset.index_instances(self.vocab) model_input = util.move_to_device(dataset.as_tensor_dict(), cuda_device) outputs = self.decode(self(**model_input)) instance_separated_output: List[Dict[str, numpy.ndarray]] = [{} for _ in dataset.instances] for name, output in list(outputs.items()): if isinstance(output, torch.Tensor): # NOTE(markn): This is a hack because 0-dim pytorch tensors are not iterable. # This occurs with batch size 1, because we still want to include the loss in that case. if output.dim() == 0: output = output.unsqueeze(0) if output.size(0) != batch_size: self._maybe_warn_for_unseparable_batches(name) continue output = output.detach().cpu().numpy() elif len(output) != batch_size: self._maybe_warn_for_unseparable_batches(name) continue for instance_output, batch_element in zip(instance_separated_output, output): instance_output[name] = batch_element return instance_separated_output
[ "def", "forward_on_instances", "(", "self", ",", "instances", ":", "List", "[", "Instance", "]", ")", "->", "List", "[", "Dict", "[", "str", ",", "numpy", ".", "ndarray", "]", "]", ":", "batch_size", "=", "len", "(", "instances", ")", "with", "torch", ...
50.191489
24.446809
def show_group(self, group_id): """ Get information about a group :type group_id: int :param group_id: Group ID Number :rtype: dict :return: a dictionary containing group information """ res = self.post('loadGroups', {'groupId': group_id}) if isinstance(res, list): return _fix_group(res[0]) else: return _fix_group(res)
[ "def", "show_group", "(", "self", ",", "group_id", ")", ":", "res", "=", "self", ".", "post", "(", "'loadGroups'", ",", "{", "'groupId'", ":", "group_id", "}", ")", "if", "isinstance", "(", "res", ",", "list", ")", ":", "return", "_fix_group", "(", "...
27.466667
13.733333
def to_camel(s): """ :param string s: under_scored string to be CamelCased :return: CamelCase version of input :rtype: str """ # r'(?!^)_([a-zA-Z]) original regex wasn't process first groups return re.sub(r'_([a-zA-Z])', lambda m: m.group(1).upper(), '_' + s)
[ "def", "to_camel", "(", "s", ")", ":", "# r'(?!^)_([a-zA-Z]) original regex wasn't process first groups", "return", "re", ".", "sub", "(", "r'_([a-zA-Z])'", ",", "lambda", "m", ":", "m", ".", "group", "(", "1", ")", ".", "upper", "(", ")", ",", "'_'", "+", ...
35
15.75
def _render_relationships(self, resource): """Render the resource's relationships.""" relationships = {} related_models = resource.__mapper__.relationships.keys() primary_key_val = getattr(resource, self.primary_key) if self.dasherize: mapped_relationships = { x: dasherize(underscore(x)) for x in related_models} else: mapped_relationships = {x: x for x in related_models} for model in related_models: relationships[mapped_relationships[model]] = { 'links': { 'self': '/{}/{}/relationships/{}'.format( resource.__tablename__, primary_key_val, mapped_relationships[model]), 'related': '/{}/{}/{}'.format( resource.__tablename__, primary_key_val, mapped_relationships[model]) } } return relationships
[ "def", "_render_relationships", "(", "self", ",", "resource", ")", ":", "relationships", "=", "{", "}", "related_models", "=", "resource", ".", "__mapper__", ".", "relationships", ".", "keys", "(", ")", "primary_key_val", "=", "getattr", "(", "resource", ",", ...
39.230769
15.692308
def get_human_readable_type(item): """Return human-readable type string of an item""" if isinstance(item, (ndarray, MaskedArray)): return item.dtype.name elif isinstance(item, Image): return "Image" else: text = get_type_string(item) if text is None: text = to_text_string('unknown') else: return text[text.find('.')+1:]
[ "def", "get_human_readable_type", "(", "item", ")", ":", "if", "isinstance", "(", "item", ",", "(", "ndarray", ",", "MaskedArray", ")", ")", ":", "return", "item", ".", "dtype", ".", "name", "elif", "isinstance", "(", "item", ",", "Image", ")", ":", "r...
32.416667
11.083333
def add_config(self, key, type_, default=NOT_SET, env_var=None): """Add a configuration setting. Parameters ---------- key : str The name of the configuration setting. This must be a valid Python attribute name i.e. alphanumeric with underscores. type : function A function such as ``float``, ``int`` or ``str`` which takes the configuration value and returns an object of the correct type. Note that the values retrieved from environment variables are always strings, while those retrieved from the YAML file might already be parsed. Hence, the function provided here must accept both types of input. default : object, optional The default configuration to return if not set. By default none is set and an error is raised instead. env_var : str, optional The environment variable name that holds this configuration value. If not given, this configuration can only be set in the YAML configuration file. """ self.config[key] = {'type': type_} if env_var is not None: self.config[key]['env_var'] = env_var if default is not NOT_SET: self.config[key]['default'] = default
[ "def", "add_config", "(", "self", ",", "key", ",", "type_", ",", "default", "=", "NOT_SET", ",", "env_var", "=", "None", ")", ":", "self", ".", "config", "[", "key", "]", "=", "{", "'type'", ":", "type_", "}", "if", "env_var", "is", "not", "None", ...
45.241379
19.931034
def tryCppComment(self, block): """C++ comment checking. when we want to insert slashes: #, #/, #! #/<, #!< and ##... return: filler string or null, if not in a star comment NOTE: otherwise comments get skipped generally and we use the last code-line """ if not block.previous().isValid() or \ not CFG_AUTO_INSERT_SLACHES: return None prevLineText = block.previous().text() indentation = None comment = prevLineText.lstrip().startswith('#') # allowed are: #, #/, #! #/<, #!< and ##... if comment: prevLineText = block.previous().text() lstrippedText = block.previous().text().lstrip() if len(lstrippedText) >= 4: char3 = lstrippedText[2] char4 = lstrippedText[3] indentation = self._lineIndent(prevLineText) if CFG_AUTO_INSERT_SLACHES: if prevLineText[2:4] == '//': # match ##... and replace by only two: # match = re.match(r'^\s*(\/\/)', prevLineText) elif (char3 == '/' or char3 == '!'): # match #/, #!, #/< and #! match = re.match(r'^\s*(\/\/[\/!][<]?\s*)', prevLineText) else: # only #, nothing else: match = re.match(r'^\s*(\/\/\s*)', prevLineText) if match is not None: self._qpart.insertText((block.blockNumber(), 0), match.group(1)) if indentation is not None: dbg("tryCppComment: success in line %d" % block.previous().blockNumber()) return indentation
[ "def", "tryCppComment", "(", "self", ",", "block", ")", ":", "if", "not", "block", ".", "previous", "(", ")", ".", "isValid", "(", ")", "or", "not", "CFG_AUTO_INSERT_SLACHES", ":", "return", "None", "prevLineText", "=", "block", ".", "previous", "(", ")"...
38.651163
18.72093
def from_data(source): """Infers a table/view schema from its JSON representation, a list of records, or a Pandas dataframe. Args: source: the Pandas Dataframe, a dictionary representing a record, a list of heterogeneous data (record) or homogeneous data (list of records) from which to infer the schema, or a definition of the schema as a list of dictionaries with 'name' and 'type' entries and possibly 'mode' and 'description' entries. Only used if no data argument was provided. 'mode' can be 'NULLABLE', 'REQUIRED' or 'REPEATED'. For the allowed types, see: https://cloud.google.com/bigquery/preparing-data-for-bigquery#datatypes Note that there is potential ambiguity when passing a list of lists or a list of dicts between whether that should be treated as a list of records or a single record that is a list. The heuristic used is to check the length of the entries in the list; if they are equal then a list of records is assumed. To avoid this ambiguity you can instead use the Schema.from_record method which assumes a single record, in either list of values or dictionary of key-values form. Returns: A Schema for the data. """ if isinstance(source, pandas.DataFrame): bq_schema = Schema._from_dataframe(source) elif isinstance(source, list): if len(source) == 0: bq_schema = source elif all(isinstance(d, dict) for d in source): if all('name' in d and 'type' in d for d in source): # It looks like a bq_schema; use it as-is. bq_schema = source elif all(len(d) == len(source[0]) for d in source): bq_schema = Schema._from_dict_record(source[0]) else: raise Exception(('Cannot create a schema from heterogeneous list %s; perhaps you meant ' + 'to use Schema.from_record?') % str(source)) elif isinstance(source[0], list) and \ all([isinstance(l, list) and len(l) == len(source[0]) for l in source]): # A list of lists all of the same length; treat first entry as a list record. bq_schema = Schema._from_record(source[0]) else: # A heterogeneous list; treat as a record. raise Exception(('Cannot create a schema from heterogeneous list %s; perhaps you meant ' + 'to use Schema.from_record?') % str(source)) elif isinstance(source, dict): raise Exception(('Cannot create a schema from dict %s; perhaps you meant to use ' + 'Schema.from_record?') % str(source)) else: raise Exception('Cannot create a schema from %s' % str(source)) return Schema(bq_schema)
[ "def", "from_data", "(", "source", ")", ":", "if", "isinstance", "(", "source", ",", "pandas", ".", "DataFrame", ")", ":", "bq_schema", "=", "Schema", ".", "_from_dataframe", "(", "source", ")", "elif", "isinstance", "(", "source", ",", "list", ")", ":",...
54.3
29.2
def verify(self): """ Executes ``ansible-playbook`` against the verify playbook and returns None. :return: None """ pb = self._get_ansible_playbook(self.playbooks.verify) pb.execute()
[ "def", "verify", "(", "self", ")", ":", "pb", "=", "self", ".", "_get_ansible_playbook", "(", "self", ".", "playbooks", ".", "verify", ")", "pb", ".", "execute", "(", ")" ]
25.777778
20.888889
def profile(self, tile=None): """ Create a metadata dictionary for rasterio. Parameters ---------- tile : ``BufferedTile`` Returns ------- metadata : dictionary output profile dictionary used for rasterio. """ dst_metadata = GTIFF_DEFAULT_PROFILE dst_metadata.pop("transform", None) dst_metadata.update( count=self.output_params["bands"], dtype=self.output_params["dtype"], driver="GTiff" ) if tile is not None: dst_metadata.update( crs=tile.crs, width=tile.width, height=tile.height, affine=tile.affine) else: for k in ["crs", "width", "height", "affine"]: dst_metadata.pop(k, None) if "nodata" in self.output_params: dst_metadata.update(nodata=self.output_params["nodata"]) try: if "compression" in self.output_params: warnings.warn( DeprecationWarning("use 'compress' instead of 'compression'") ) dst_metadata.update(compress=self.output_params["compression"]) else: dst_metadata.update(compress=self.output_params["compress"]) dst_metadata.update(predictor=self.output_params["predictor"]) except KeyError: pass return dst_metadata
[ "def", "profile", "(", "self", ",", "tile", "=", "None", ")", ":", "dst_metadata", "=", "GTIFF_DEFAULT_PROFILE", "dst_metadata", ".", "pop", "(", "\"transform\"", ",", "None", ")", "dst_metadata", ".", "update", "(", "count", "=", "self", ".", "output_params...
34.463415
17.878049
def get_parameter_defaults(self, include_flags=True): """ Get a dict mapping parameter names to their defaults (if set). :rtype: dict[str, object] """ return { name: parameter.default for (name, parameter) in self.parameters.items() if parameter.default is not None and (include_flags or parameter.type != 'flag') }
[ "def", "get_parameter_defaults", "(", "self", ",", "include_flags", "=", "True", ")", ":", "return", "{", "name", ":", "parameter", ".", "default", "for", "(", "name", ",", "parameter", ")", "in", "self", ".", "parameters", ".", "items", "(", ")", "if", ...
36.454545
15.545455
def _dispatch_event(self, event, data=None): """Dispatches the event and executes any associated callbacks. Note: To prevent the app from crashing due to callback errors. We catch all exceptions and send all data to the logger. Args: event (str): The type of event. e.g. 'bot_added' data (dict): The data Slack sent. e.g. { "type": "bot_added", "bot": { "id": "B024BE7LH", "app_id": "A4H1JB4AZ", "name": "hugbot" } } """ for callback in self._callbacks[event]: self._logger.debug( "Running %s callbacks for event: '%s'", len(self._callbacks[event]), event, ) try: if self._stopped and event not in ["close", "error"]: # Don't run callbacks if client was stopped unless they're close/error callbacks. break if self.run_async: self._execute_callback_async(callback, data) else: self._execute_callback(callback, data) except Exception as err: name = callback.__name__ module = callback.__module__ msg = f"When calling '#{name}()' in the '{module}' module the following error was raised: {err}" self._logger.error(msg) raise
[ "def", "_dispatch_event", "(", "self", ",", "event", ",", "data", "=", "None", ")", ":", "for", "callback", "in", "self", ".", "_callbacks", "[", "event", "]", ":", "self", ".", "_logger", ".", "debug", "(", "\"Running %s callbacks for event: '%s'\"", ",", ...
38.282051
18.358974
def delete_unit(unit_id, **kwargs): """ Delete a unit from the DB. Raises and exception if the unit does not exist """ try: db_unit = db.DBSession.query(Unit).filter(Unit.id==unit_id).one() db.DBSession.delete(db_unit) db.DBSession.flush() return True except NoResultFound: raise ResourceNotFoundError("Unit (ID=%s) does not exist"%(unit_id))
[ "def", "delete_unit", "(", "unit_id", ",", "*", "*", "kwargs", ")", ":", "try", ":", "db_unit", "=", "db", ".", "DBSession", ".", "query", "(", "Unit", ")", ".", "filter", "(", "Unit", ".", "id", "==", "unit_id", ")", ".", "one", "(", ")", "db", ...
28.785714
18.5
def ax(self): """Axes instance of the plot""" if self._ax is None: import matplotlib.pyplot as plt plt.figure() self._ax = plt.axes(projection=self._get_sample_projection()) return self._ax
[ "def", "ax", "(", "self", ")", ":", "if", "self", ".", "_ax", "is", "None", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "plt", ".", "figure", "(", ")", "self", ".", "_ax", "=", "plt", ".", "axes", "(", "projection", "=", "self", ".",...
34.714286
15.428571
def _confirm_or_prompt_or_command(pymux): " True when we are waiting for a command, prompt or confirmation. " client_state = pymux.get_client_state() if client_state.confirm_text or client_state.prompt_command or client_state.command_mode: return True
[ "def", "_confirm_or_prompt_or_command", "(", "pymux", ")", ":", "client_state", "=", "pymux", ".", "get_client_state", "(", ")", "if", "client_state", ".", "confirm_text", "or", "client_state", ".", "prompt_command", "or", "client_state", ".", "command_mode", ":", ...
53.4
21.8
def release_client(self, client): """Releases a client object to the pool. Args: client: Client object. """ if isinstance(client, Client): if not self._is_expired_client(client): LOG.debug('Client is not expired. Adding back to pool') self.__pool.append(client) elif client.is_connected(): LOG.debug('Client is expired and connected. Disconnecting') client.disconnect() if self.__sem is not None: self.__sem.release()
[ "def", "release_client", "(", "self", ",", "client", ")", ":", "if", "isinstance", "(", "client", ",", "Client", ")", ":", "if", "not", "self", ".", "_is_expired_client", "(", "client", ")", ":", "LOG", ".", "debug", "(", "'Client is not expired. Adding back...
37.066667
12.066667
def parse_lcov_file_info(args, filepath, line_iter, line_coverage_re, file_end_string): """ Parse the file content in lcov info file """ coverage = [] lines_covered = [] for line in line_iter: if line != "end_of_record": line_coverage_match = line_coverage_re.match(line) if line_coverage_match: line_no = line_coverage_match.group(1) cov_count = int(line_coverage_match.group(2)) if args.max_cov_count: if cov_count > args.max_cov_count: cov_count = args.max_cov_count + 1 lines_covered.append((line_no, cov_count)) else: break num_code_lines = len([line.rstrip('\n') for line in open(filepath, 'r')]) coverage = [None] * num_code_lines for line_covered in lines_covered: coverage[int(line_covered[0]) - 1] = line_covered[1] return coverage
[ "def", "parse_lcov_file_info", "(", "args", ",", "filepath", ",", "line_iter", ",", "line_coverage_re", ",", "file_end_string", ")", ":", "coverage", "=", "[", "]", "lines_covered", "=", "[", "]", "for", "line", "in", "line_iter", ":", "if", "line", "!=", ...
38.5
18.041667
def xmon_op_from_proto_dict(proto_dict: Dict) -> ops.Operation: """Convert the proto dictionary to the corresponding operation. See protos in api/google/v1 for specification of the protos. Args: proto_dict: Dictionary representing the proto. Keys are always strings, but values may be types correspond to a raw proto type or another dictionary (for messages). Returns: The operation. Raises: ValueError if the dictionary does not contain required values corresponding to the proto. """ def raise_missing_fields(gate_name: str): raise ValueError( '{} missing required fields: {}'.format(gate_name, proto_dict)) param = _parameterized_value_from_proto_dict qubit = devices.GridQubit.from_proto_dict if 'exp_w' in proto_dict: exp_w = proto_dict['exp_w'] if ('half_turns' not in exp_w or 'axis_half_turns' not in exp_w or 'target' not in exp_w): raise_missing_fields('ExpW') return ops.PhasedXPowGate( exponent=param(exp_w['half_turns']), phase_exponent=param(exp_w['axis_half_turns']), ).on(qubit(exp_w['target'])) elif 'exp_z' in proto_dict: exp_z = proto_dict['exp_z'] if 'half_turns' not in exp_z or 'target' not in exp_z: raise_missing_fields('ExpZ') return ops.Z(qubit(exp_z['target']))**param(exp_z['half_turns']) elif 'exp_11' in proto_dict: exp_11 = proto_dict['exp_11'] if ('half_turns' not in exp_11 or 'target1' not in exp_11 or 'target2' not in exp_11): raise_missing_fields('Exp11') return ops.CZ(qubit(exp_11['target1']), qubit(exp_11['target2']))**param(exp_11['half_turns']) elif 'measurement' in proto_dict: meas = proto_dict['measurement'] invert_mask = cast(Tuple[Any, ...], ()) if 'invert_mask' in meas: invert_mask = tuple(json.loads(x) for x in meas['invert_mask']) if 'key' not in meas or 'targets' not in meas: raise_missing_fields('Measurement') return ops.MeasurementGate( num_qubits=len(meas['targets']), key=meas['key'], invert_mask=invert_mask ).on(*[qubit(q) for q in meas['targets']]) else: raise ValueError('invalid operation: {}'.format(proto_dict))
[ "def", "xmon_op_from_proto_dict", "(", "proto_dict", ":", "Dict", ")", "->", "ops", ".", "Operation", ":", "def", "raise_missing_fields", "(", "gate_name", ":", "str", ")", ":", "raise", "ValueError", "(", "'{} missing required fields: {}'", ".", "format", "(", ...
40.844828
16.12069
def _get_text_for_grounding(stmt, agent_text): """Get text context for Deft disambiguation If the INDRA database is available, attempts to get the fulltext from which the statement was extracted. If the fulltext is not available, the abstract is returned. If the indra database is not available, uses the pubmed client to get the abstract. If no abstract can be found, falls back on returning the evidence text for the statement. Parameters ---------- stmt : py:class:`indra.statements.Statement` Statement with agent we seek to disambiguate. agent_text : str Agent text that needs to be disambiguated Returns ------- text : str Text for Feft disambiguation """ text = None # First we will try to get content from the DB try: from indra_db.util.content_scripts \ import get_text_content_from_text_refs from indra.literature.deft_tools import universal_extract_text refs = stmt.evidence[0].text_refs # Prioritize the pmid attribute if given if stmt.evidence[0].pmid: refs['PMID'] = stmt.evidence[0].pmid logger.info('Obtaining text for disambiguation with refs: %s' % refs) content = get_text_content_from_text_refs(refs) text = universal_extract_text(content, contains=agent_text) if text: return text except Exception as e: logger.info('Could not get text for disambiguation from DB.') # If that doesn't work, we try PubMed next if text is None: from indra.literature import pubmed_client pmid = stmt.evidence[0].pmid if pmid: logger.info('Obtaining abstract for disambiguation for PMID%s' % pmid) text = pubmed_client.get_abstract(pmid) if text: return text # Finally, falling back on the evidence sentence if text is None: logger.info('Falling back on sentence-based disambiguation') text = stmt.evidence[0].text return text return None
[ "def", "_get_text_for_grounding", "(", "stmt", ",", "agent_text", ")", ":", "text", "=", "None", "# First we will try to get content from the DB", "try", ":", "from", "indra_db", ".", "util", ".", "content_scripts", "import", "get_text_content_from_text_refs", "from", "...
36.839286
19.303571
def _loadDummyModelParameters(self, params): """ Loads all the parameters for this dummy model. For any paramters specified as lists, read the appropriate value for this model using the model index """ for key, value in params.iteritems(): if type(value) == list: index = self.modelIndex % len(params[key]) self._params[key] = params[key][index] else: self._params[key] = params[key]
[ "def", "_loadDummyModelParameters", "(", "self", ",", "params", ")", ":", "for", "key", ",", "value", "in", "params", ".", "iteritems", "(", ")", ":", "if", "type", "(", "value", ")", "==", "list", ":", "index", "=", "self", ".", "modelIndex", "%", "...
38.727273
13
def get_uri(self, alias): """ Get the URI for a given alias. A registered URI will return itself, otherwise ``None`` is returned. """ if alias in self.aliases.keys(): return self.aliases[alias] if alias in self.aliases.values(): return alias raise GraphException('No such schema: %r' % alias)
[ "def", "get_uri", "(", "self", ",", "alias", ")", ":", "if", "alias", "in", "self", ".", "aliases", ".", "keys", "(", ")", ":", "return", "self", ".", "aliases", "[", "alias", "]", "if", "alias", "in", "self", ".", "aliases", ".", "values", "(", ...
43.625
6.625
def add_text(self, text): """ Append the run content elements corresponding to *text* to the ``<w:r>`` element of this instance. """ for char in text: self.add_char(char) self.flush()
[ "def", "add_text", "(", "self", ",", "text", ")", ":", "for", "char", "in", "text", ":", "self", ".", "add_char", "(", "char", ")", "self", ".", "flush", "(", ")" ]
29.5
11.5
def capabilities(self): """Returns the list of system capabilities. :return: A ``list`` of capabilities. """ response = self.get(PATH_CAPABILITIES) return _load_atom(response, MATCH_ENTRY_CONTENT).capabilities
[ "def", "capabilities", "(", "self", ")", ":", "response", "=", "self", ".", "get", "(", "PATH_CAPABILITIES", ")", "return", "_load_atom", "(", "response", ",", "MATCH_ENTRY_CONTENT", ")", ".", "capabilities" ]
34.857143
13.714286
def get_vert_connectivity(mesh_v, mesh_f): """Returns a sparse matrix (of size #verts x #verts) where each nonzero element indicates a neighborhood relation. For example, if there is a nonzero element in position (15,12), that means vertex 15 is connected by an edge to vertex 12.""" vpv = sp.csc_matrix((len(mesh_v),len(mesh_v))) # for each column in the faces... for i in range(3): IS = mesh_f[:,i] JS = mesh_f[:,(i+1)%3] data = np.ones(len(IS)) ij = np.vstack((row(IS.flatten()), row(JS.flatten()))) mtx = sp.csc_matrix((data, ij), shape=vpv.shape) vpv = vpv + mtx + mtx.T return vpv
[ "def", "get_vert_connectivity", "(", "mesh_v", ",", "mesh_f", ")", ":", "vpv", "=", "sp", ".", "csc_matrix", "(", "(", "len", "(", "mesh_v", ")", ",", "len", "(", "mesh_v", ")", ")", ")", "# for each column in the faces...", "for", "i", "in", "range", "(...
36.222222
18.222222
def preston_bin(data, max_num): """ Bins data on base 2 using Preston's method Parameters ---------- data : array-like Data to be binned max_num : float The maximum upper value of the data Returns ------- tuple (binned_data, bin_edges) Notes ----- Uses Preston's method of binning, which has exclusive lower boundaries and inclusive upper boundaries. Densities are not split between bins. Examples -------- >>> import macroeco.compare as comp >>> import numpy as np >>> # Load some data and get Preston bins >>> data = np.array([1, 1, 1, 1, 4, 5, 6, 7, 12, 34, 56]) >>> comp.preston_bin(data, np.max(data)) (array([4, 0, 1, 3, 1, 0, 2]), array([ 1., 2., 3., 5., 9., 17., 33., 65.])) References ---------- .. [#] Preston, F. (1962). The canonical distribution of commonness and rarity. Ecology, 43, 185-215 """ log_ub = np.ceil(np.log2(max_num)) # Make an exclusive lower bound in keeping with Preston if log_ub == 0: boundaries = np.array([0, 1]) elif log_ub == 1: boundaries = np.arange(1, 4) else: boundaries = 2 ** np.arange(0, log_ub + 1) boundaries = np.insert(boundaries, 2, 3) boundaries[3:] = boundaries[3:] + 1 hist_data = np.histogram(data, bins=boundaries) return hist_data
[ "def", "preston_bin", "(", "data", ",", "max_num", ")", ":", "log_ub", "=", "np", ".", "ceil", "(", "np", ".", "log2", "(", "max_num", ")", ")", "# Make an exclusive lower bound in keeping with Preston", "if", "log_ub", "==", "0", ":", "boundaries", "=", "np...
24.8
21.854545
def _ip_int_from_string(cls, ip_str): """Turn the given IP string into an integer for comparison. Args: ip_str: A string, the IP ip_str. Returns: The IP ip_str as an integer. Raises: AddressValueError: if ip_str isn't a valid IPv4 Address. """ if not ip_str: raise AddressValueError('Address cannot be empty') octets = ip_str.split('.') if len(octets) != 4: raise AddressValueError("Expected 4 octets in %r" % ip_str) try: return _compat_int_from_byte_vals( map(cls._parse_octet, octets), 'big') except ValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str))
[ "def", "_ip_int_from_string", "(", "cls", ",", "ip_str", ")", ":", "if", "not", "ip_str", ":", "raise", "AddressValueError", "(", "'Address cannot be empty'", ")", "octets", "=", "ip_str", ".", "split", "(", "'.'", ")", "if", "len", "(", "octets", ")", "!=...
29.4
20.68
def get_subclass(cls, name): """Get Benchmark subclass by name :param name: name returned by ``Benchmark.name`` property :return: instance of ``Benchmark`` class """ for subclass in cls.__subclasses__(): if subclass.name == name: return subclass raise NameError("Not a valid Benchmark class: " + name)
[ "def", "get_subclass", "(", "cls", ",", "name", ")", ":", "for", "subclass", "in", "cls", ".", "__subclasses__", "(", ")", ":", "if", "subclass", ".", "name", "==", "name", ":", "return", "subclass", "raise", "NameError", "(", "\"Not a valid Benchmark class:...
41
9.444444
def rebuild(self, **kwargs): '''Repopulate the node-tracking data structures. Shouldn't really ever be needed. ''' self.nodes = [] self.node_types = [] self.id_dict = {} self.type_dict = {} self.add_node(self.root)
[ "def", "rebuild", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "nodes", "=", "[", "]", "self", ".", "node_types", "=", "[", "]", "self", ".", "id_dict", "=", "{", "}", "self", ".", "type_dict", "=", "{", "}", "self", ".", "add_n...
27.1
18.3
def add(self, other): """Return the QuantumChannel self + other. Args: other (QuantumChannel): a quantum channel. Returns: SuperOp: the linear addition self + other as a SuperOp object. Raises: QiskitError: if other cannot be converted to a channel or has incompatible dimensions. """ # Convert other to SuperOp if not isinstance(other, SuperOp): other = SuperOp(other) if self.dim != other.dim: raise QiskitError("other QuantumChannel dimensions are not equal") return SuperOp(self._data + other.data, self.input_dims(), self.output_dims())
[ "def", "add", "(", "self", ",", "other", ")", ":", "# Convert other to SuperOp", "if", "not", "isinstance", "(", "other", ",", "SuperOp", ")", ":", "other", "=", "SuperOp", "(", "other", ")", "if", "self", ".", "dim", "!=", "other", ".", "dim", ":", ...
34.6
18.95
def remove_domain_user_role(request, user, role, domain=None): """Removes a given single role for a user from a domain.""" manager = keystoneclient(request, admin=True).roles return manager.revoke(role, user=user, domain=domain)
[ "def", "remove_domain_user_role", "(", "request", ",", "user", ",", "role", ",", "domain", "=", "None", ")", ":", "manager", "=", "keystoneclient", "(", "request", ",", "admin", "=", "True", ")", ".", "roles", "return", "manager", ".", "revoke", "(", "ro...
59.25
13.5
def default_job_name(self): """Slurm job name if not already specified in the `sbatch` section""" name = '' if not self.root.existing_campaign: campaign_file = osp.basename(self.root.campaign_file) campaign = osp.splitext(campaign_file)[0] name += campaign + '/' name += self.tag return name
[ "def", "default_job_name", "(", "self", ")", ":", "name", "=", "''", "if", "not", "self", ".", "root", ".", "existing_campaign", ":", "campaign_file", "=", "osp", ".", "basename", "(", "self", ".", "root", ".", "campaign_file", ")", "campaign", "=", "osp...
36.6
12
def sort_data(data, cols): """Sort `data` rows and order columns""" return data.sort_values(cols)[cols + ['value']].reset_index(drop=True)
[ "def", "sort_data", "(", "data", ",", "cols", ")", ":", "return", "data", ".", "sort_values", "(", "cols", ")", "[", "cols", "+", "[", "'value'", "]", "]", ".", "reset_index", "(", "drop", "=", "True", ")" ]
48
16
def stream_fastq_full(fastq, threads): """Generator for returning metrics extracted from fastq. Extract from a fastq file: -readname -average and median quality -read_lenght """ logging.info("Nanoget: Starting to collect full metrics from plain fastq file.") inputfastq = handle_compressed_input(fastq) with cfutures.ProcessPoolExecutor(max_workers=threads) as executor: for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")): yield results logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
[ "def", "stream_fastq_full", "(", "fastq", ",", "threads", ")", ":", "logging", ".", "info", "(", "\"Nanoget: Starting to collect full metrics from plain fastq file.\"", ")", "inputfastq", "=", "handle_compressed_input", "(", "fastq", ")", "with", "cfutures", ".", "Proce...
42.714286
21.785714
def strip_tweet(text, remove_url=True): """Strip tweet message. This method removes mentions strings and urls(optional). :param text: tweet message :type text: :class:`str` :param remove_url: Remove urls. default :const:`True`. :type remove_url: :class:`boolean` :returns: Striped tweet message :rtype: :class:`str` """ if remove_url: text = url_pattern.sub('', text) else: text = expand_url(text) text = mention_pattern.sub('', text) text = html_parser.unescape(text) text = text.strip() return text
[ "def", "strip_tweet", "(", "text", ",", "remove_url", "=", "True", ")", ":", "if", "remove_url", ":", "text", "=", "url_pattern", ".", "sub", "(", "''", ",", "text", ")", "else", ":", "text", "=", "expand_url", "(", "text", ")", "text", "=", "mention...
24.304348
17
def __draw_block(ax, pair, block, density_scale): """! @brief Display BANG-block on the specified ax. @param[in] ax (Axis): Axis where block should be displayed. @param[in] pair (tuple): Pair of coordinate index that should be displayed. @param[in] block (bang_block): BANG-block that should be displayed. @param[in] density_scale (double): Max density to display density of the block by appropriate tone. """ max_corner, min_corner = bang_visualizer.__get_rectangle_description(block, pair) belong_cluster = block.get_cluster() is not None if density_scale != 0.0: density_scale = bang_visualizer.__maximum_density_alpha * block.get_density() / density_scale face_color = matplotlib.colors.to_rgba('blue', alpha=density_scale) edge_color = matplotlib.colors.to_rgba('black', alpha=1.0) rect = patches.Rectangle(min_corner, max_corner[0] - min_corner[0], max_corner[1] - min_corner[1], fill=belong_cluster, facecolor=face_color, edgecolor=edge_color, linewidth=0.5) ax.add_patch(rect)
[ "def", "__draw_block", "(", "ax", ",", "pair", ",", "block", ",", "density_scale", ")", ":", "max_corner", ",", "min_corner", "=", "bang_visualizer", ".", "__get_rectangle_description", "(", "block", ",", "pair", ")", "belong_cluster", "=", "block", ".", "get_...
47.923077
29.653846
def _merge_setting(cli_options, conf, name, name_cli=None, default=None): # type: (dict, dict, str, str, Any) -> Any """Merge a setting, preferring the CLI option if set :param dict cli_options: cli options :param dict conf: configuration sub-block :param str name: key name :param str name_cli: override key name for cli_options :param Any default: default value to set if missing :rtype: Any :return: merged setting value """ val = cli_options.get(name_cli or name) if val is None: val = conf.get(name, default) return val
[ "def", "_merge_setting", "(", "cli_options", ",", "conf", ",", "name", ",", "name_cli", "=", "None", ",", "default", "=", "None", ")", ":", "# type: (dict, dict, str, str, Any) -> Any", "val", "=", "cli_options", ".", "get", "(", "name_cli", "or", "name", ")",...
38
11.533333
def chrome_decrypt(encrypted_value: bytes, key: bytes, init_vector: bytes) \ -> str: """Decrypt Chrome/Chromium's encrypted cookies. Args: encrypted_value: Encrypted cookie from Chrome/Chromium's cookie file key: Key to decrypt encrypted_value init_vector: Initialization vector for decrypting encrypted_value Returns: Decrypted value of encrypted_value """ # Encrypted cookies should be prefixed with 'v10' or 'v11' according to the # Chromium code. Strip it off. encrypted_value = encrypted_value[3:] cipher = AES.new(key, AES.MODE_CBC, IV=init_vector) decrypted = cipher.decrypt(encrypted_value) return clean(decrypted)
[ "def", "chrome_decrypt", "(", "encrypted_value", ":", "bytes", ",", "key", ":", "bytes", ",", "init_vector", ":", "bytes", ")", "->", "str", ":", "# Encrypted cookies should be prefixed with 'v10' or 'v11' according to the", "# Chromium code. Strip it off.", "encrypted_value"...
34.35
21.75
def create(cls, name, certificate): """ Create a new external VPN CA for signing internal gateway certificates. :param str name: Name of VPN CA :param str certificate: file name, path or certificate string. :raises CreateElementFailed: Failed creating cert with reason :rtype: VPNCertificateCA """ json = {'name': name, 'certificate': certificate} return ElementCreator(cls, json)
[ "def", "create", "(", "cls", ",", "name", ",", "certificate", ")", ":", "json", "=", "{", "'name'", ":", "name", ",", "'certificate'", ":", "certificate", "}", "return", "ElementCreator", "(", "cls", ",", "json", ")" ]
34.357143
13.928571
def _write4bits(self, value): """Write 4 bits of data into the data bus.""" for i in range(4): bit = (value >> i) & 0x01 GPIO.output(self.pins[i + 7], bit) self._pulse_enable()
[ "def", "_write4bits", "(", "self", ",", "value", ")", ":", "for", "i", "in", "range", "(", "4", ")", ":", "bit", "=", "(", "value", ">>", "i", ")", "&", "0x01", "GPIO", ".", "output", "(", "self", ".", "pins", "[", "i", "+", "7", "]", ",", ...
36.5
7.666667
def main(): """ NAME sort_specimens.py DESCRIPTION Reads in a pmag_specimen formatted file and separates it into different components (A,B...etc.) SYNTAX sort_specimens.py [-h] [command line options] INPUT takes pmag_specimens.txt formatted input file OPTIONS -h: prints help message and quits -f FILE: specify input file, default is 'pmag_specimens.txt' OUTPUT makes pmag_specimen formatted files with input filename plus _X_Y where X is the component name and Y is s,g,t for coordinate system """ dir_path='.' inspec="pmag_specimens.txt" if '-WD' in sys.argv: ind=sys.argv.index('-WD') dir_path=sys.argv[ind+1] if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-f' in sys.argv: ind=sys.argv.index('-f') inspec=sys.argv[ind+1] basename=inspec.split('.')[:-1] inspec=dir_path+"/"+inspec ofile_base=dir_path+"/"+basename[0] # # read in data # prior_spec_data,file_type=pmag.magic_read(inspec) if file_type != 'pmag_specimens': print(file_type, " this is not a valid pmag_specimens file") sys.exit() # get list of specimens in file, components, coordinate systems available specs,comps,coords=[],[],[] for spec in prior_spec_data: if spec['er_specimen_name'] not in specs:specs.append(spec['er_specimen_name']) if 'specimen_comp_name' not in list(spec.keys()):spec['specimen_comp_name']='A' if 'specimen_tilt_correction' not in list(spec.keys()):spec['tilt_correction']='-1' # assume specimen coordinates if spec['specimen_comp_name'] not in comps:comps.append(spec['specimen_comp_name']) if spec['specimen_tilt_correction'] not in coords:coords.append(spec['specimen_tilt_correction']) # work on separating out components, coordinate systems by specimen for coord in coords: print(coord) for comp in comps: print(comp) speclist=[] for spec in prior_spec_data: if spec['specimen_tilt_correction']==coord and spec['specimen_comp_name']==comp:speclist.append(spec) ofile=ofile_base+'_'+coord+'_'+comp+'.txt' pmag.magic_write(ofile,speclist,'pmag_specimens') print('coordinate system: ',coord,' component name: ',comp,' saved in ',ofile)
[ "def", "main", "(", ")", ":", "dir_path", "=", "'.'", "inspec", "=", "\"pmag_specimens.txt\"", "if", "'-WD'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-WD'", ")", "dir_path", "=", "sys", ".", "argv", "[", ...
37.758065
25.080645
def convert_table(self, markup): """ Subtitutes <table> content to Wikipedia markup. """ for table in re.findall(self.re["html-table"], markup): wiki = table wiki = re.sub(r"<table(.*?)>", "{|\\1", wiki) wiki = re.sub(r"<tr(.*?)>", "|-\\1", wiki) wiki = re.sub(r"<td(.*?)>", "|\\1|", wiki) wiki = wiki.replace("</td>", "\n") wiki = wiki.replace("</tr>", "\n") wiki = wiki.replace("</table>", "\n|}") markup = markup.replace(table, wiki) return markup
[ "def", "convert_table", "(", "self", ",", "markup", ")", ":", "for", "table", "in", "re", ".", "findall", "(", "self", ".", "re", "[", "\"html-table\"", "]", ",", "markup", ")", ":", "wiki", "=", "table", "wiki", "=", "re", ".", "sub", "(", "r\"<ta...
36.875
14.875
def parse_individual(self, individual): """Converts a deap individual into a full list of parameters. Parameters ---------- individual: deap individual from optimization Details vary according to type of optimization, but parameters within deap individual are always between -1 and 1. This function converts them into the values used to actually build the model Returns ------- fullpars: list Full parameter list for model building. """ scaled_ind = [] for i in range(len(self._params['value_means'])): scaled_ind.append(self._params['value_means'][i] + ( individual[i] * self._params['value_ranges'][i])) fullpars = list(self._params['arrangement']) for k in range(len(self._params['variable_parameters'])): for j in range(len(fullpars)): if fullpars[j] == self._params['variable_parameters'][k]: fullpars[j] = scaled_ind[k] return fullpars
[ "def", "parse_individual", "(", "self", ",", "individual", ")", ":", "scaled_ind", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_params", "[", "'value_means'", "]", ")", ")", ":", "scaled_ind", ".", "append", "(", "self", ...
42.32
17.6
def cluster_on_extra_high_voltage(network, busmap, with_time=True): """ Main function of the EHV-Clustering approach. Creates a new clustered pypsa.Network given a busmap mapping all bus_ids to other bus_ids of the same network. Parameters ---------- network : pypsa.Network Container for all network components. busmap : dict Maps old bus_ids to new bus_ids. with_time : bool If true time-varying data will also be aggregated. Returns ------- network : pypsa.Network Container for all network components of the clustered network. """ network_c = Network() buses = aggregatebuses( network, busmap, { 'x': _leading( busmap, network.buses), 'y': _leading( busmap, network.buses)}) # keep attached lines lines = network.lines.copy() mask = lines.bus0.isin(buses.index) lines = lines.loc[mask, :] # keep attached links links = network.links.copy() mask = links.bus0.isin(buses.index) links = links.loc[mask, :] # keep attached transformer transformers = network.transformers.copy() mask = transformers.bus0.isin(buses.index) transformers = transformers.loc[mask, :] io.import_components_from_dataframe(network_c, buses, "Bus") io.import_components_from_dataframe(network_c, lines, "Line") io.import_components_from_dataframe(network_c, links, "Link") io.import_components_from_dataframe(network_c, transformers, "Transformer") if with_time: network_c.snapshots = network.snapshots network_c.set_snapshots(network.snapshots) network_c.snapshot_weightings = network.snapshot_weightings.copy() # dealing with generators network.generators.control = "PV" network.generators['weight'] = 1 new_df, new_pnl = aggregategenerators(network, busmap, with_time) io.import_components_from_dataframe(network_c, new_df, 'Generator') for attr, df in iteritems(new_pnl): io.import_series_from_dataframe(network_c, df, 'Generator', attr) # dealing with all other components aggregate_one_ports = components.one_port_components.copy() aggregate_one_ports.discard('Generator') for one_port in aggregate_one_ports: new_df, new_pnl = aggregateoneport( network, busmap, component=one_port, with_time=with_time) io.import_components_from_dataframe(network_c, new_df, one_port) for attr, df in iteritems(new_pnl): io.import_series_from_dataframe(network_c, df, one_port, attr) network_c.determine_network_topology() return network_c
[ "def", "cluster_on_extra_high_voltage", "(", "network", ",", "busmap", ",", "with_time", "=", "True", ")", ":", "network_c", "=", "Network", "(", ")", "buses", "=", "aggregatebuses", "(", "network", ",", "busmap", ",", "{", "'x'", ":", "_leading", "(", "bu...
33.493506
20.402597
def set_baselines(self): """ Modify coords to shift tree position for x,y baseline arguments. This is useful for arrangeing trees onto a Canvas with other plots, but still sharing a common cartesian axes coordinates. """ if self.style.xbaseline: if self.style.orient in ("up", "down"): self.coords.coords[:, 0] += self.style.xbaseline self.coords.verts[:, 0] += self.style.xbaseline else: self.coords.coords[:, 1] += self.style.xbaseline self.coords.verts[:, 1] += self.style.xbaseline
[ "def", "set_baselines", "(", "self", ")", ":", "if", "self", ".", "style", ".", "xbaseline", ":", "if", "self", ".", "style", ".", "orient", "in", "(", "\"up\"", ",", "\"down\"", ")", ":", "self", ".", "coords", ".", "coords", "[", ":", ",", "0", ...
48.230769
19.923077