code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def fRD( a, M): f = (lal.C_SI**3.0 / (2.0*lal.PI*lal.G_SI*M*lal.MSUN_SI)) * (1.5251 - 1.1568*(1.0-a)**0.1292) return f
Calculate the ring-down frequency for the final Kerr BH. Using Eq. 5.5 of Main paper
def non_neighbors(graph, node, t=None): if graph.is_directed(): values = chain(graph.predecessors(node, t=t), graph.successors(node, t=t)) else: values = graph.neighbors(node, t=t) nbors = set(values) | {node} return (nnode for nnode in graph if nnode not in nbors)
Returns the non-neighbors of the node in the graph at time t. Parameters ---------- graph : DyNetx graph Graph to find neighbors. node : node The node whose neighbors will be returned. t : snapshot id (default=None) If None the non-neighbors are identified on the flattened graph. Returns ------- non_neighbors : iterator Iterator of nodes in the graph that are not neighbors of the node.
def _build(value, property_path=None): if not property_path: property_path = [] if is_config_type(value): return _build_config(value, property_path=property_path) elif is_config_var(value): return _build_var(value, property_path=property_path) elif is_builtin_type(value): return _build_type(value, value, property_path=property_path) elif is_regex_type(value): return _build_type(str, value, property_path=property_path) elif is_typing_type(value): return _build_type(value, value, property_path=property_path) return _build_type(type(value), value, property_path=property_path)
The generic schema definition build method. :param value: The value to build a schema definition for :param List[str] property_path: The property path of the current type, defaults to None, optional :return: The built schema definition :rtype: Dict[str, Any]
def advertiseBrokerWorkerDown(exctype, value, traceback): if not scoop.SHUTDOWN_REQUESTED: execQueue.shutdown() sys.__excepthook__(exctype, value, traceback)
Hook advertizing the broker if an impromptu shutdown is occuring.
def getchild(self, name, parent): if name.startswith('@'): return parent.get_attribute(name[1:]) else: return parent.get_child(name)
get a child by name
def loadTargetPatterns(self, filename, cols = None, everyNrows = 1, delim = ' ', checkEven = 1): self.loadTargetPatternssFromFile(filename, cols, everyNrows, delim, checkEven)
Loads targets as patterns from file.
def natural_neighbor_to_points(points, values, xi): r tri = Delaunay(points) members, triangle_info = geometry.find_natural_neighbors(tri, xi) img = np.empty(shape=(xi.shape[0]), dtype=values.dtype) img.fill(np.nan) for ind, (grid, neighbors) in enumerate(members.items()): if len(neighbors) > 0: points_transposed = np.array(points).transpose() img[ind] = natural_neighbor_point(points_transposed[0], points_transposed[1], values, xi[grid], tri, neighbors, triangle_info) return img
r"""Generate a natural neighbor interpolation to the given points. This assigns values to the given interpolation points using the Liang and Hale [Liang2010]_. approach. Parameters ---------- points: array_like, shape (n, 2) Coordinates of the data points. values: array_like, shape (n,) Values of the data points. xi: array_like, shape (M, 2) Points to interpolate the data onto. Returns ------- img: (M,) ndarray Array representing the interpolated values for each input point in `xi` See Also -------- natural_neighbor_to_grid
def bures_angle(rho0: Density, rho1: Density) -> float: return np.arccos(np.sqrt(fidelity(rho0, rho1)))
Return the Bures angle between mixed quantum states Note: Bures angle cannot be calculated within the tensor backend.
def write_svg(self): import plantuml puml = self.write_puml() server = plantuml.PlantUML(url=self.url) svg = server.processes(puml) return svg
Returns PUML from the system as a SVG image. Requires plantuml library.
def _calc_mask(self): mask = [] for row in self._constraints: mask.append(tuple(x is None for x in row)) return tuple(mask)
Computes a boolean mask from the user defined constraints.
def align_file_position(f, size): align = (size - 1) - (f.tell() % size) f.seek(align, 1)
Align the position in the file to the next block of specified size
def _verify_query(self, query_params): error_message = None if self.state_token is not False: received_state_token = query_params.get('state') if received_state_token is None: error_message = 'Bad Request. Missing state parameter.' raise UberIllegalState(error_message) if self.state_token != received_state_token: error_message = 'CSRF Error. Expected {}, got {}' error_message = error_message.format( self.state_token, received_state_token, ) raise UberIllegalState(error_message) error = query_params.get('error') authorization_code = query_params.get(auth.CODE_RESPONSE_TYPE) if error and authorization_code: error_message = ( 'Code and Error query params code and error ' 'can not both be set.' ) raise UberIllegalState(error_message) if error is None and authorization_code is None: error_message = 'Neither query parameter code or error is set.' raise UberIllegalState(error_message) if error: raise UberIllegalState(error) return authorization_code
Verify response from the Uber Auth server. Parameters query_params (dict) Dictionary of query parameters attached to your redirect URL after user approved your app and was redirected. Returns authorization_code (str) Code received when user grants your app access. Use this code to request an access token. Raises UberIllegalState (ApiError) Thrown if the redirect URL was missing parameters or if the given parameters were not valid.
def apply_dict_of_variables_vfunc( func, *args, signature, join='inner', fill_value=None ): args = [_as_variables_or_variable(arg) for arg in args] names = join_dict_keys(args, how=join) grouped_by_name = collect_dict_values(args, names, fill_value) result_vars = OrderedDict() for name, variable_args in zip(names, grouped_by_name): result_vars[name] = func(*variable_args) if signature.num_outputs > 1: return _unpack_dict_tuples(result_vars, signature.num_outputs) else: return result_vars
Apply a variable level function over dicts of DataArray, DataArray, Variable and ndarray objects.
def truth(val, context): try: 0 + val except TypeError: lower_val = val.lower() if lower_val in TRUE: return True elif lower_val in FALSE: return False else: raise FilterError("Bad boolean value %r in %r (expected one of '%s', or '%s')" % ( val, context, "' '".join(TRUE), "' '".join(FALSE) )) else: return bool(val)
Convert truth value in "val" to a boolean.
def _set_current_options(self, options): opts = self._get_current_options() opts.update(options) response = self.__proxy__.set_current_options(opts) return response
Set current options for a model. Parameters ---------- options : dict A dictionary of the desired option settings. The key should be the name of the option and each value is the desired value of the option.
def axis_updated(self, event: InputEvent, prefix=None): if prefix is not None: axis = self.axes_by_code.get(prefix + str(event.code)) else: axis = self.axes_by_code.get(event.code) if axis is not None: axis.receive_device_value(event.value) else: logger.debug('Unknown axis code {} ({}), value {}'.format(event.code, prefix, event.value))
Called to process an absolute axis event from evdev, this is called internally by the controller implementations :internal: :param event: The evdev event to process :param prefix: If present, a named prefix that should be applied to the event code when searching for the axis
def _build_one(self, req, output_dir, python_tag=None): with req.build_env: return self._build_one_inside_env(req, output_dir, python_tag=python_tag)
Build one wheel. :return: The filename of the built wheel, or None if the build failed.
def Detach(self): if not self.IsAttached(): return None pid = gdb.selected_inferior().pid self.Interrupt([pid, None, None]) self.Continue([pid, None, None]) result = gdb.execute('detach', to_string=True) if not result: return None return result
Detaches from the inferior. If not attached, this is a no-op.
def public_keys_as_files(self): if not self.public_keys_tempfiles: for pk in self.public_keys(): f = tempfile.NamedTemporaryFile(prefix='trezor-ssh-pubkey-', mode='w') f.write(pk) f.flush() self.public_keys_tempfiles.append(f) return self.public_keys_tempfiles
Store public keys as temporary SSH identity files.
def envs(backend=None, sources=False): fileserver = salt.fileserver.Fileserver(__opts__) return sorted(fileserver.envs(back=backend, sources=sources))
Return the available fileserver environments. If no backend is provided, then the environments for all configured backends will be returned. backend Narrow fileserver backends to a subset of the enabled ones. .. versionchanged:: 2015.5.0 If all passed backends start with a minus sign (``-``), then these backends will be excluded from the enabled backends. However, if there is a mix of backends with and without a minus sign (ex: ``backend=-roots,git``) then the ones starting with a minus sign will be disregarded. Additionally, fileserver backends can now be passed as a comma-separated list. In earlier versions, they needed to be passed as a python list (ex: ``backend="['roots', 'git']"``) CLI Example: .. code-block:: bash salt-run fileserver.envs salt-run fileserver.envs backend=roots,git salt-run fileserver.envs git
def _structure_list(self, obj, cl): if is_bare(cl) or cl.__args__[0] is Any: return [e for e in obj] else: elem_type = cl.__args__[0] return [ self._structure_func.dispatch(elem_type)(e, elem_type) for e in obj ]
Convert an iterable to a potentially generic list.
def _has_ipv6(host): sock = None has_ipv6 = False if _appengine_environ.is_appengine_sandbox(): return False if socket.has_ipv6: try: sock = socket.socket(socket.AF_INET6) sock.bind((host, 0)) has_ipv6 = True except Exception: pass if sock: sock.close() return has_ipv6
Returns True if the system can bind an IPv6 address.
def get(self, url): self._query() return Enclosure(self._resp.get(url), url)
Get the response for the given enclosure URL
def _call(self, x): y = np.bincount(self._indices_flat, weights=x, minlength=self.range.size) out = y.reshape(self.range.shape) if self.variant == 'dirac': weights = getattr(self.range, 'cell_volume', 1.0) elif self.variant == 'char_fun': weights = 1.0 else: raise RuntimeError('The variant "{!r}" is not yet supported' ''.format(self.variant)) if weights != 1.0: out /= weights return out
Sum all values if indices are given multiple times.
def set_data_from_iterable(self, frames, values, labels=None): if not isinstance(frames, collections.Iterable): raise TypeError, "frames must be an iterable" if not isinstance(values, collections.Iterable): raise TypeError, "values must be an iterable" assert(len(frames) == len(values)) self.frames = frames self.values = values if labels is None: self.label2int['New Point'] = 0 self.int2label[0] = 'New Point' self.labels = [0 for i in xrange(len(frames))] else: if not isinstance(labels, collections.Iterable): raise TypeError, "labels must be an iterable" for l in labels: if l not in self.label2int: self.label2int[l] = len(self.label2int) self.int2label[len(self.int2label)] = l self.labels.append(self.label2int[l])
Initialize a dataset structure from iterable parameters :param x: The temporal indices of the dataset :param y: The values of the dataset :type x: iterable :type y: iterable
def search(self): params = self.solr_params() logging.info("PARAMS=" + str(params)) results = self.solr.search(**params) logging.info("Docs found: {}".format(results.hits)) return self._process_search_results(results)
Execute solr search query
def __bytes_to_share_data(self, payload): rbytes = payload[P_DATA] mime = payload[P_MIME] if mime is None or not self.__auto_encode_decode: return rbytes, mime mime = expand_idx_mimetype(mime).lower() try: if mime == 'application/ubjson': return ubjloadb(rbytes), None elif mime == 'text/plain; charset=utf8': return rbytes.decode('utf-8'), None else: return rbytes, mime except: logger.warning('auto-decode failed, returning bytes', exc_info=DEBUG_ENABLED) return rbytes, mime
Attempt to auto-decode data
def _return_feature(self, feature_type, feature_name, new_feature_name=...): if self.new_names: return feature_type, feature_name, (self.rename_function(feature_name) if new_feature_name is ... else new_feature_name) return feature_type, feature_name
Helping function of `get_features`
def usufyToTextExport(d, fPath=None): if d == []: return "+------------------+\n| No data found... |\n+------------------+" import pyexcel as pe import pyexcel.ext.text as text if fPath == None: isTerminal = True else: isTerminal = False try: oldData = get_data(fPath) except: oldData = {"OSRFramework":[]} tabularData = _generateTabularData(d, {"OSRFramework":[[]]}, True, canUnicode=False) sheet = pe.Sheet(tabularData["OSRFramework"]) sheet.name = "Profiles recovered (" + getCurrentStrDatetime() +")." sheet.name_columns_by_row(0) text.TABLEFMT = "grid" try: with open(fPath, "w") as oF: oF.write(str(sheet)) except Exception as e: return unicode(sheet)
Workaround to export to a .txt file or to show the information. Args: ----- d: Data to export. fPath: File path for the output file. If None was provided, it will assume that it has to print it. Returns: -------- unicode: It sometimes returns a unicode representation of the Sheet received.
def return_dat(self, chan, begsam, endsam): assert begsam < endsam dat = empty((len(chan), endsam - begsam)) dat.fill(NaN) with self.filename.open('rb') as f: for i_dat, blk, i_blk in _select_blocks(self.blocks, begsam, endsam): dat_in_rec = self._read_record(f, blk, chan) dat[:, i_dat[0]:i_dat[1]] = dat_in_rec[:, i_blk[0]:i_blk[1]] dat = ((dat.astype('float64') - self.dig_min[chan, newaxis]) * self.gain[chan, newaxis] + self.phys_min[chan, newaxis]) return dat
Read data from an EDF file. Reads channel by channel, and adjusts the values by calibration. Parameters ---------- chan : list of int index (indices) of the channels to read begsam : int index of the first sample endsam : int index of the last sample Returns ------- numpy.ndarray A 2d matrix, where the first dimension is the channels and the second dimension are the samples.
def set(self, option, value): if self.config is None: self.config = {} self.config[option] = value
Sets an option to a value.
def parse_rst(text: str) -> docutils.nodes.document: parser = docutils.parsers.rst.Parser() components = (docutils.parsers.rst.Parser,) settings = docutils.frontend.OptionParser(components=components).get_default_values() document = docutils.utils.new_document('<rst-doc>', settings=settings) parser.parse(text, document) return document
Parse text assuming it's an RST markup.
def initialize_repo(self): logging.info('Repo {} doesn\'t exist. Cloning...'.format(self.repo_dir)) clone_args = ['git', 'clone'] if self.depth and self.depth > 0: clone_args.extend(['--depth', str(self.depth)]) clone_args.extend(['--branch', self.branch_name]) clone_args.extend([self.git_url, self.repo_dir]) yield from execute_cmd(clone_args) yield from execute_cmd(['git', 'config', 'user.email', 'nbgitpuller@example.com'], cwd=self.repo_dir) yield from execute_cmd(['git', 'config', 'user.name', 'nbgitpuller'], cwd=self.repo_dir) logging.info('Repo {} initialized'.format(self.repo_dir))
Clones repository & sets up usernames.
def _load_sets(self,directory): _sets=[] try: fullfile=os.path.join(directory,SET_FILE) lsets=open(fullfile).readline().split(',') for tag in lsets: _sets.append(tag.strip()) except: logger.error("No sets found in %s, FB needs an album name (%s)"\ %(directory,SET_FILE)) sys.exit(1) return _sets
Loads sets from set file and return as list of strings
def source_cmd(args, stdin=None): args = list(args) fpath = locate_binary(args[0]) args[0] = fpath if fpath else args[0] if not os.path.isfile(args[0]): raise RuntimeError("Command not found: %s" % args[0]) prevcmd = 'call ' prevcmd += ' '.join([argvquote(arg, force=True) for arg in args]) prevcmd = escape_windows_cmd_string(prevcmd) args.append('--prevcmd={}'.format(prevcmd)) args.insert(0, 'cmd') args.append('--interactive=0') args.append('--sourcer=call') args.append('--envcmd=set') args.append('--seterrpostcmd=if errorlevel 1 exit 1') args.append('--use-tmpfile=1') return source_foreign(args, stdin=stdin)
Simple cmd.exe-specific wrapper around source-foreign. returns a dict to be used as a new environment
def semiyearly(date=datetime.date.today()): return datetime.date(date.year, 1 if date.month < 7 else 7, 1)
Twice a year.
def clean_blobstore_cache(self): url = self.api_url + self.blobstores_builpack_cache_url resp, rcode = self.request('DELETE', url) if rcode != 202: raise CFException(resp, rcode) return resp
Deletes all of the existing buildpack caches in the blobstore
def _render_reward(self, r: np.float32) -> None: print("reward = {:.4f}".format(float(r))) print()
Prints reward `r`.
def _tune(self, args): client_heartbeat = self.client_heartbeat or 0 self.channel_max = args.read_short() or self.channel_max self.frame_max = args.read_long() or self.frame_max self.method_writer.frame_max = self.frame_max self.server_heartbeat = args.read_short() or 0 if self.server_heartbeat == 0 or client_heartbeat == 0: self.heartbeat = max(self.server_heartbeat, client_heartbeat) else: self.heartbeat = min(self.server_heartbeat, client_heartbeat) if not self.client_heartbeat: self.heartbeat = 0 self._x_tune_ok(self.channel_max, self.frame_max, self.heartbeat)
Propose connection tuning parameters This method proposes a set of connection configuration values to the client. The client can accept and/or adjust these. PARAMETERS: channel_max: short proposed maximum channels The maximum total number of channels that the server allows per connection. Zero means that the server does not impose a fixed limit, but the number of allowed channels may be limited by available server resources. frame_max: long proposed maximum frame size The largest frame size that the server proposes for the connection. The client can negotiate a lower value. Zero means that the server does not impose any specific limit but may reject very large frames if it cannot allocate resources for them. RULE: Until the frame-max has been negotiated, both peers MUST accept frames of up to 4096 octets large. The minimum non-zero value for the frame- max field is 4096. heartbeat: short desired heartbeat delay The delay, in seconds, of the connection heartbeat that the server wants. Zero means the server does not want a heartbeat.
def make_mutant_tuples(example_protos, original_feature, index_to_mutate, viz_params): mutant_features = make_mutant_features(original_feature, index_to_mutate, viz_params) mutant_examples = [] for example_proto in example_protos: for mutant_feature in mutant_features: copied_example = copy.deepcopy(example_proto) feature_name = mutant_feature.original_feature.feature_name try: feature_list = proto_value_for_feature(copied_example, feature_name) if index_to_mutate is None: new_values = mutant_feature.mutant_value else: new_values = list(feature_list) new_values[index_to_mutate] = mutant_feature.mutant_value del feature_list[:] feature_list.extend(new_values) mutant_examples.append(copied_example) except (ValueError, IndexError): mutant_examples.append(copied_example) return mutant_features, mutant_examples
Return a list of `MutantFeatureValue`s and a list of mutant Examples. Args: example_protos: The examples to mutate. original_feature: A `OriginalFeatureList` that encapsulates the feature to mutate. index_to_mutate: The index of the int64_list or float_list to mutate. viz_params: A `VizParams` object that contains the UI state of the request. Returns: A list of `MutantFeatureValue`s and a list of mutant examples.
def _mass_from_knownmass_eta(known_mass, eta, known_is_secondary=False, force_real=True): r roots = numpy.roots([eta, (2*eta - 1)*known_mass, eta*known_mass**2.]) if force_real: roots = numpy.real(roots) if known_is_secondary: return roots[roots.argmax()] else: return roots[roots.argmin()]
r"""Returns the other component mass given one of the component masses and the symmetric mass ratio. This requires finding the roots of the quadratic equation: .. math:: \eta m_2^2 + (2\eta - 1)m_1 m_2 + \eta m_1^2 = 0. This has two solutions which correspond to :math:`m_1` being the heavier mass or it being the lighter mass. By default, `known_mass` is assumed to be the heavier (primary) mass, and the smaller solution is returned. Use the `other_is_secondary` to invert. Parameters ---------- known_mass : float The known component mass. eta : float The symmetric mass ratio. known_is_secondary : {False, bool} Whether the known component mass is the primary or the secondary. If True, `known_mass` is assumed to be the secondary (lighter) mass and the larger solution is returned. Otherwise, the smaller solution is returned. Default is False. force_real : {True, bool} Force the returned mass to be real. Returns ------- float The other component mass.
def decrypt_file(file, key): if not file.endswith('.enc'): raise ValueError("%s does not end with .enc" % file) fer = Fernet(key) with open(file, 'rb') as f: decrypted_file = fer.decrypt(f.read()) with open(file[:-4], 'wb') as f: f.write(decrypted_file) os.chmod(file[:-4], 0o600)
Decrypts the file ``file``. The encrypted file is assumed to end with the ``.enc`` extension. The decrypted file is saved to the same location without the ``.enc`` extension. The permissions on the decrypted file are automatically set to 0o600. See also :func:`doctr.local.encrypt_file`.
def cms_check(migrate_cmd=False): from django.core.management import call_command try: import cms _create_db(migrate_cmd) call_command('cms', 'check') except ImportError: print('cms_check available only if django CMS is installed')
Runs the django CMS ``cms check`` command
def month_days(year, month): if month > 13: raise ValueError("Incorrect month index") if month in (IYYAR, TAMMUZ, ELUL, TEVETH, VEADAR): return 29 if month == ADAR and not leap(year): return 29 if month == HESHVAN and (year_days(year) % 10) != 5: return 29 if month == KISLEV and (year_days(year) % 10) == 3: return 29 return 30
How many days are in a given month of a given year
def _move_file_with_sizecheck(tx_file, final_file): tmp_file = final_file + ".bcbiotmp" open(tmp_file, 'wb').close() want_size = utils.get_size(tx_file) shutil.move(tx_file, final_file) transfer_size = utils.get_size(final_file) assert want_size == transfer_size, ( 'distributed.transaction.file_transaction: File copy error: ' 'file or directory on temporary storage ({}) size {} bytes ' 'does not equal size of file or directory after transfer to ' 'shared storage ({}) size {} bytes'.format( tx_file, want_size, final_file, transfer_size) ) utils.remove_safe(tmp_file)
Move transaction file to final location, with size checks avoiding failed transfers. Creates an empty file with '.bcbiotmp' extention in the destination location, which serves as a flag. If a file like that is present, it means that transaction didn't finish successfully.
def get_hierarchy_design_session_for_hierarchy(self, hierarchy_id, proxy): if not self.supports_hierarchy_design(): raise errors.Unimplemented() return sessions.HierarchyDesignSession(hierarchy_id, proxy, self._runtime)
Gets the ``OsidSession`` associated with the topology design service using for the given hierarchy. arg: hierarchy_id (osid.id.Id): the ``Id`` of the hierarchy arg: proxy (osid.proxy.Proxy): a proxy return: (osid.hierarchy.HierarchyDesignSession) - a ``HierarchyDesignSession`` raise: NotFound - ``hierarchy_id`` is not found raise: NullArgument - ``hierarchy_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_hierarchy_design()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_hierarchy_design()`` and ``supports_visible_federation()`` are ``true``.*
def destroy(self, request, project, pk=None): job_id, bug_id = map(int, pk.split("-")) job = Job.objects.get(repository__name=project, id=job_id) BugJobMap.objects.filter(job=job, bug_id=bug_id).delete() return Response({"message": "Bug job map deleted"})
Delete bug-job-map entry. pk is a composite key in the form bug_id-job_id
def mirror_pull(self, **kwargs): path = '/projects/%s/mirror/pull' % self.get_id() self.manager.gitlab.http_post(path, **kwargs)
Start the pull mirroring process for the project. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server failed to perform the request
def dict2dzn( objs, declare=False, assign=True, declare_enums=True, wrap=True, fout=None ): log = logging.getLogger(__name__) vals = [] enums = set() for key, val in objs.items(): if _is_enum(val) and declare_enums: enum_type = type(val) enum_name = enum_type.__name__ if enum_name not in enums: enum_stmt = stmt2enum( enum_type, declare=declare, assign=assign, wrap=wrap ) vals.append(enum_stmt) enums.add(enum_name) stmt = stmt2dzn(key, val, declare=declare, assign=assign, wrap=wrap) vals.append(stmt) if fout: log.debug('Writing file: {}'.format(fout)) with open(fout, 'w') as f: for val in vals: f.write('{}\n\n'.format(val)) return vals
Serializes the objects in input and produces a list of strings encoding them into dzn format. Optionally, the produced dzn is written on a file. Supported types of objects include: ``str``, ``int``, ``float``, ``set``, ``list`` or ``dict``. List and dict are serialized into dzn (multi-dimensional) arrays. The key-set of a dict is used as index-set of dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``. Parameters ---------- objs : dict A dictionary containing the objects to serialize, the keys are the names of the variables. declare : bool Whether to include the declaration of the variable in the statements or just the assignment. Default is ``False``. assign : bool Whether to include assignment of the value in the statements or just the declaration. declare_enums : bool Whether to declare the enums found as types of the objects to serialize. Default is ``True``. wrap : bool Whether to wrap the serialized values. fout : str Path to the output file, if None no output file is written. Returns ------- list List of strings containing the dzn-encoded objects.
def _software_params_to_argparse(parameters): argparse = ArgumentParser() boolean_defaults = {} for parameter in parameters: arg_desc = {"dest": parameter.name, "required": parameter.required, "help": ""} if parameter.type == "Boolean": default = _to_bool(parameter.defaultParamValue) arg_desc["action"] = "store_true" if not default else "store_false" boolean_defaults[parameter.name] = default else: python_type = _convert_type(parameter.type) arg_desc["type"] = python_type arg_desc["default"] = None if parameter.defaultParamValue is None else python_type(parameter.defaultParamValue) argparse.add_argument(*_cytomine_parameter_name_synonyms(parameter.name), **arg_desc) argparse.set_defaults(**boolean_defaults) return argparse
Converts a SoftwareParameterCollection into an ArgumentParser object. Parameters ---------- parameters: SoftwareParameterCollection The software parameters Returns ------- argparse: ArgumentParser An initialized argument parser
def _get_oauth_params(self, req_kwargs): oauth_params = {} oauth_params['oauth_consumer_key'] = self.consumer_key oauth_params['oauth_nonce'] = sha1( str(random()).encode('ascii')).hexdigest() oauth_params['oauth_signature_method'] = self.signature.NAME oauth_params['oauth_timestamp'] = int(time()) if self.access_token is not None: oauth_params['oauth_token'] = self.access_token oauth_params['oauth_version'] = self.VERSION self._parse_optional_params(oauth_params, req_kwargs) return oauth_params
Prepares OAuth params for signing.
def plotMatches2(listofNValues, errors, listOfScales, scaleErrors, fileName = "images/scalar_matches.pdf"): w, h = figaspect(0.4) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(w,h)) plotMatches(listofNValues, errors, fileName=None, fig=fig, ax=ax1) plotScaledMatches(listOfScales, scaleErrors, fileName=None, fig=fig, ax=ax2) plt.savefig(fileName) plt.close()
Plot two figures side by side in an aspect ratio appropriate for the paper.
def iscan(self, *, match=None, count=None): return _ScanIter(lambda cur: self.scan(cur, match=match, count=count))
Incrementally iterate the keys space using async for. Usage example: >>> async for key in redis.iscan(match='something*'): ... print('Matched:', key)
def use(self, tube: str) -> None: self._send_cmd(b'use %b' % tube.encode('ascii'), b'USING')
Changes the currently used tube. :param tube: The tube to use.
def add_package(self, check_name, package): self._package_set.add(package) package_data = self._packages[package.name] self._checks_deps[check_name].append(package) if package.version: versions = package_data['versions'] versions[package.version].append(check_name) if package.marker: markers = package_data['markers'] markers[package.marker].append(check_name)
Add a Package to the catalog for the given check
def features(self): if self._features is None: metadata = self.metadata() if "features" in metadata: self._features = metadata["features"] else: self._features = [] return self._features
lazy fetch and cache features
def enum_models(self, assumptions=[]): if self.glucose: done = False while not done: if self.use_timer: start_time = time.clock() self.status = pysolvers.glucose41_solve(self.glucose, assumptions) if self.use_timer: self.call_time = time.clock() - start_time self.accu_time += self.call_time model = self.get_model() if model: self.add_clause([-l for l in model]) yield model else: done = True
Iterate over models of the internal formula.
def template_substitute(text, **kwargs): for name, value in kwargs.items(): placeholder_pattern = "{%s}" % name if placeholder_pattern in text: text = text.replace(placeholder_pattern, value) return text
Replace placeholders in text by using the data mapping. Other placeholders that is not represented by data is left untouched. :param text: Text to search and replace placeholders. :param data: Data mapping/dict for placeholder key and values. :return: Potentially modified text with replaced placeholders.
def create_new_attachment_by_content_id(self, content_id, attachments, callback=None): if isinstance(attachments, list): assert all(isinstance(at, dict) and "file" in list(at.keys()) for at in attachments) elif isinstance(attachments, dict): assert "file" in list(attachments.keys()) else: assert False return self._service_post_request("rest/api/content/{id}/child/attachment".format(id=content_id), headers={"X-Atlassian-Token": "nocheck"}, files=attachments, callback=callback)
Add one or more attachments to a Confluence Content entity, with optional comments. Comments are optional, but if included there must be as many comments as there are files, and the comments must be in the same order as the files. :param content_id (string): A string containing the id of the attachments content container. :param attachments (list of dicts or dict): This is a list of dictionaries or a dictionary. Each dictionary must have the key "file" with a value that is I/O like (file, StringIO, etc.), and may also have a key "comment" with a string for file comments. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/child/attachment endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
def _broks(self, broker_name): with self.app.broks_lock: res = self.app.get_broks() return serialize(res, True)
Get the broks from the daemon This is used by the brokers to get the broks list of a daemon :return: Brok list serialized :rtype: dict
def human_readable_number(number, suffix=""): for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: if abs(number) < 1024.0: return "%3.1f%s%s" % (number, unit, suffix) number /= 1024.0 return "%.1f%s%s" % (number, "Y", suffix)
Format the given number into a human-readable string. Code adapted from http://stackoverflow.com/a/1094933 :param variant number: the number (int or float) :param string suffix: the unit of the number :rtype: string
def console_progress_callback(current, maximum, message=None): if maximum > 1000 and current % 1000 != 0 and current != maximum: return if message is not None: LOGGER.info(message['description']) LOGGER.info('Task progress: %i of %i' % (current, maximum))
Simple console based callback implementation for tests. :param current: Current progress. :type current: int :param maximum: Maximum range (point at which task is complete. :type maximum: int :param message: Optional message dictionary to containing content we can display to the user. See safe.definitions.analysis_steps for an example of the expected format :type message: dict
def save_auxiliary_files(self, layer, destination): enable_busy_cursor() auxiliary_files = ['xml', 'json'] for auxiliary_file in auxiliary_files: source_basename = os.path.splitext(layer.source())[0] source_file = "%s.%s" % (source_basename, auxiliary_file) destination_basename = os.path.splitext(destination)[0] destination_file = "%s.%s" % (destination_basename, auxiliary_file) try: if os.path.isfile(source_file): shutil.copy(source_file, destination_file) except (OSError, IOError): display_critical_message_bar( title=self.tr('Error while saving'), message=self.tr( 'The destination location must be writable.'), iface_object=self.iface ) except Exception: display_critical_message_bar( title=self.tr('Error while saving'), message=self.tr('Something went wrong.'), iface_object=self.iface ) disable_busy_cursor()
Save auxiliary files when using the 'save as' function. If some auxiliary files (.xml, .json) exist, this function will copy them when the 'save as' function is used on the layer. :param layer: The layer which has been saved as. :type layer: QgsMapLayer :param destination: The new filename of the layer. :type destination: str
def description(self): if self._description is None: if 'payloadBase64' not in self._properties: self._properties = self.taskqueue.get_task(id=self.id)._properties self._description = base64.b64decode(self._properties.get('payloadBase64', b'')).decode("ascii") return self._description
The description for this task. See: https://cloud.google.com/appengine/docs/python/taskqueue/rest/tasks :rtype: string :returns: The description for this task.
def credentials(self): if self.filename: db_filename = os.path.join(os.getcwd(), self.filename) else: db_filename = None return { "username": self.username, "password": self.password, "hostname": self.hostname, "port": self.port, "filename": db_filename, "dbname": self.dbname, "dbtype": self.dbtype, "schemas": self.schemas, "limit": self.limit, "keys_per_column": self.keys_per_column, }
Dict representation of all credentials for the database.
def reference_of(selector: shapeLabel, cntxt: Union[Context, ShExJ.Schema] ) -> Optional[ShExJ.shapeExpr]: schema = cntxt.schema if isinstance(cntxt, Context) else cntxt if selector is START: return schema.start for expr in schema.shapes: if not isinstance(expr, ShExJ.ShapeExternal) and expr.id == selector: return expr return schema.start if schema.start is not None and schema.start.id == selector else None
Return the shape expression in the schema referenced by selector, if any :param cntxt: Context node or ShEx Schema :param selector: identifier of element to select within the schema :return:
def angledependentairtransmission_errorprop(twotheta, dtwotheta, mu_air, dmu_air, sampletodetectordistance, dsampletodetectordistance): return (np.exp(mu_air * sampletodetectordistance / np.cos(twotheta)), np.sqrt(dmu_air ** 2 * sampletodetectordistance ** 2 * np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta)) / np.cos(twotheta) ** 2 + dsampletodetectordistance ** 2 * mu_air ** 2 * np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta)) / np.cos(twotheta) ** 2 + dtwotheta ** 2 * mu_air ** 2 * sampletodetectordistance ** 2 * np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta)) * np.sin(twotheta) ** 2 / np.cos(twotheta) ** 4) )
Correction for the angle dependent absorption of air in the scattered beam path, with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: absolute error matrix of two-theta mu_air: the linear absorption coefficient of air dmu_air: error of the linear absorption coefficient of air sampletodetectordistance: sample-to-detector distance dsampletodetectordistance: error of the sample-to-detector distance 1/mu_air and sampletodetectordistance should have the same dimension The scattering intensity matrix should be multiplied by the resulting correction matrix.
def tags(self, where, archiver="", timeout=DEFAULT_TIMEOUT): return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{})
Retrieves tags for all streams matching the given WHERE clause Arguments: [where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"') [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver
def show_event_handlers(self, stream=sys.stdout, verbose=0): lines = ["List of event handlers installed:"] for handler in self.event_handlers: if verbose: lines.extend(handler.__class__.cls2str().split("\n")) else: lines.extend(str(handler).split("\n")) stream.write("\n".join(lines)) stream.write("\n")
Print to `stream` the event handlers installed for this flow.
def buy(ctx, buy_amount, buy_asset, price, sell_asset, order_expiration, account): amount = Amount(buy_amount, buy_asset) price = Price( price, base=sell_asset, quote=buy_asset, bitshares_instance=ctx.bitshares ) print_tx( price.market.buy(price, amount, account=account, expiration=order_expiration) )
Buy a specific asset at a certain rate against a base asset
def set_attributes(self, **attributes): auto_set = IxeObject.get_auto_set() IxeObject.set_auto_set(False) for name, value in attributes.items(): setattr(self, name, value) if auto_set: self.ix_set() IxeObject.set_auto_set(auto_set)
Set group of attributes without calling set between attributes regardless of global auto_set. Set will be called only after all attributes are set based on global auto_set. :param attributes: dictionary of <attribute, value> to set.
def share_with_names(self): for container in self.share_with: if isinstance(container, six.string_types): yield container else: yield container.container_name
The names of the containers that we share with the running container
def migrate_autoload_details(autoload_details, shell_name, shell_type): mapping = {} for resource in autoload_details.resources: resource.model = "{shell_name}.{model}".format(shell_name=shell_name, model=resource.model) mapping[resource.relative_address] = resource.model for attribute in autoload_details.attributes: if not attribute.relative_address: attribute.attribute_name = "{shell_type}.{attr_name}".format(shell_type=shell_type, attr_name=attribute.attribute_name) else: attribute.attribute_name = "{model}.{attr_name}".format(model=mapping[attribute.relative_address], attr_name=attribute.attribute_name) return autoload_details
Migrate autoload details. Add namespace for attributes :param autoload_details: :param shell_name: :param shell_type: :return:
def interleave_longest(*iterables): i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) return (x for x in i if x is not _marker)
Return a new iterable yielding from each iterable in turn, skipping any that are exhausted. >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8])) [1, 4, 6, 2, 5, 7, 3, 8] This function produces the same output as :func:`roundrobin`, but may perform better for some inputs (in particular when the number of iterables is large).
def get(self): return {k:v for k,v in list(self.options.items()) if k in self._allowed_layout}
Get layout options.
def _init_threads(self): if self._io_thread is None: self._io_thread = Thread(target=self._select) self._io_thread.start() if self._writer_thread is None: self._writer_thread = Thread(target=self._writer) self._writer_thread.start()
Initializes the IO and Writer threads
def _absolute(self, path): path = FilePath(path) if isabs(path): return path else: return self.WorkingDir + path
Convert a filename to an absolute path
def highlight_multi_regex(str_, pat_to_color, reflags=0): colored = str_ to_replace = [] for pat, color in pat_to_color.items(): matches = list(re.finditer(pat, str_, flags=reflags)) for match in matches: start = match.start() end = match.end() to_replace.append((end, start, color)) for tup in reversed(sorted(to_replace)): end, start, color = tup colored_part = color_text(colored[start:end], color) colored = colored[:start] + colored_part + colored[end:] return colored
FIXME Use pygments instead. must be mututally exclusive
def get_instance(self, payload): return ExecutionStepInstance( self._version, payload, flow_sid=self._solution['flow_sid'], execution_sid=self._solution['execution_sid'], )
Build an instance of ExecutionStepInstance :param dict payload: Payload response from the API :returns: twilio.rest.studio.v1.flow.execution.execution_step.ExecutionStepInstance :rtype: twilio.rest.studio.v1.flow.execution.execution_step.ExecutionStepInstance
def stmt_type(obj, mk=True): if isinstance(obj, Statement) and mk: return type(obj) else: return type(obj).__name__
Return standardized, backwards compatible object type String. This is a temporary solution to make sure type comparisons and matches keys of Statements and related classes are backwards compatible.
def iterate_specific_packet_range(): now = datetime.utcnow() start = now - timedelta(hours=1) total = 0 for packet in archive.list_packets(start=start, stop=now): total += 1 print('Found', total, 'packets in range')
Count the number of packets in a specific range.
def AsDict(self, dt=True): data = {} if self.name: data['name'] = self.name data['mlkshk_url'] = self.mlkshk_url if self.profile_image_url: data['profile_image_url'] = self.profile_image_url if self.id: data['id'] = self.id if self.about: data['about'] = self.about if self.website: data['website'] = self.website if self.shakes: data['shakes'] = [shk.AsDict(dt=dt) for shk in self.shakes] data['shake_count'] = self.shake_count return data
A dict representation of this User instance. The return value uses the same key names as the JSON representation. Args: dt (bool): If True, return dates as python datetime objects. If False, return dates as ISO strings. Return: A dict representing this User instance
def delete_note(self, note_id): note, status = self.trash_note(note_id) if (status == -1): return note, status params = '/i/%s' % (str(note_id)) request = Request(url=DATA_URL+params, method='DELETE') request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) except IOError as e: return e, -1 except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 return {}, 0
Method to permanently delete a note Arguments: - note_id (string): key of the note to trash Returns: A tuple `(note, status)` - note (dict): an empty dict or an error message - status (int): 0 on success and -1 otherwise
def click(self, locator, params=None, timeout=None): self._click(locator, params, timeout)
Click web element. :param locator: locator tuple or WebElement instance :param params: (optional) locator parameters :param timeout: (optional) time to wait for element :return: None
def delete(self, interface, vrid): vrrp_str = "no vrrp %d" % vrid return self.configure_interface(interface, vrrp_str)
Deletes a vrrp instance from an interface Note: This method will attempt to delete the vrrp from the node's operational config. If the vrrp does not exist on the interface then this method will not perform any changes but still return True Args: interface (string): The interface to configure. vrid (integer): The vrid number for the vrrp to be deleted. Returns: True if the vrrp could be deleted otherwise False (see Node)
def swap(self, c2): inv = False c1 = self if c1.order > c2.order: ct = c1 c1 = c2 c2 = ct inv = True return inv, c1, c2
put the order of currencies as market standard
def _namespace_requested(self, namespace): if namespace is None: return False namespace_tuple = self._tuplefy_namespace(namespace) if namespace_tuple[0] in IGNORE_DBS: return False elif namespace_tuple[1] in IGNORE_COLLECTIONS: return False else: return self._tuple_requested(namespace_tuple)
Checks whether the requested_namespaces contain the provided namespace
def get_templatetag_module(cls): if cls not in CacheTag._templatetags_modules: all_tags = cls.get_all_tags_and_filters_by_function()['tags'] CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0] return CacheTag._templatetags_modules[cls]
Return the templatetags module name for which the current class is used. It's used to render the nocache blocks by loading the correct module
def create_api_network_ipv4(self): return ApiNetworkIPv4( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of Api Networkv4 services facade.
def swap(ctx): controller = ctx.obj['controller'] click.echo('Swapping slots...') try: controller.swap_slots() except YkpersError as e: _failed_to_write_msg(ctx, e)
Swaps the two slot configurations.
def _check_field_names(self, field_names): if field_names: for field_name in field_names: try: self.column[field_name] except KeyError: raise InvalidIndexError('Trying to use non indexed field "%s"' % field_name)
Raises InvalidIndexError if any of a field_name in field_names is not indexed.
def _set_or_check_remote_id(self, remote_id): if not self.remote_id: assert self.closed_state == self.ClosedState.PENDING, 'Bad ClosedState!' self.remote_id = remote_id self.closed_state = self.ClosedState.OPEN elif self.remote_id != remote_id: raise usb_exceptions.AdbProtocolError( '%s remote-id change to %s', self, remote_id)
Set or check the remote id.
def local_machine_uuid(): result = subprocess.check_output( 'hal-get-property --udi ' '/org/freedesktop/Hal/devices/computer ' '--key system.hardware.uuid'.split() ).strip() return uuid.UUID(hex=result)
Return local machine unique identifier. >>> uuid = local_machine_uuid()
def get_mean_masked_features_distance(mean_features_0, mean_features_1, mean_masks_0, mean_masks_1, n_features_per_channel=None, ): assert n_features_per_channel > 0 mu_0 = mean_features_0.ravel() mu_1 = mean_features_1.ravel() omeg_0 = mean_masks_0 omeg_1 = mean_masks_1 omeg_0 = np.repeat(omeg_0, n_features_per_channel) omeg_1 = np.repeat(omeg_1, n_features_per_channel) d_0 = mu_0 * omeg_0 d_1 = mu_1 * omeg_1 return np.linalg.norm(d_0 - d_1)
Compute the distance between the mean masked features.
def _assert_valid_categorical_missing_value(value): label_types = LabelArray.SUPPORTED_SCALAR_TYPES if not isinstance(value, label_types): raise TypeError( "Categorical terms must have missing values of type " "{types}.".format( types=' or '.join([t.__name__ for t in label_types]), ) )
Check that value is a valid categorical missing_value. Raises a TypeError if the value is cannot be used as the missing_value for a categorical_dtype Term.
def create(self): headers = dict() if self.tfa_token: headers["X-GitHub-OTP"] = self.tfa_token token_name = self.app_name + platform.node() payload = dict(note=token_name, scopes=self.scopes) response = requests.post( self.api_url + "authorizations", auth=(self.user, self.password), headers=headers, json=payload ) if response.status_code == 401 and "required" in response.headers.get("X-GitHub-OTP", ""): raise TFARequired("TFA required for the user") if response.status_code == 422: raise AlreadyExistsError("APP already exists. Please delete {} token".format(token_name)) if response.status_code == 401: raise BadPassword("Bad User/Password") response.raise_for_status() return response.json()["token"]
Creates a token It uses the app_name as the notes and the scopes are the permissions required by the application. See those in github when configuring an app token Raises a TFARequired if a two factor is required after the atempt to create it without having call tfa before
def which(program): " Check program is exists. " head, _ = op.split(program) if head: if is_exe(program): return program else: for path in environ["PATH"].split(pathsep): exe_file = op.join(path, program) if is_exe(exe_file): return exe_file return None
Check program is exists.
def __insert_data(postid, userid, rating): uid = tools.get_uuid() TabRating.create( uid=uid, post_id=postid, user_id=userid, rating=rating, timestamp=tools.timestamp(), ) return uid
Inert new record.
def exhaustive_iri_check( self, ontology:pd.DataFrame, iri_predicate:str, diff:bool=True, ) -> Tuple[list]: inside, outside = [], [] header = ['Index'] + list(ontology.columns) for row in ontology.itertuples(): row = {header[i]:val for i, val in enumerate(row)} entity_iri = row[iri_predicate] if isinstance(entity_iri, list): if len(entity_iri) != 0: exit('Need to have only 1 iri in the cell from the onotology.') else: entity_iri = entity_iri[0] ilx_row = self.iri2row.get(entity_iri) if ilx_row: inside.append({ 'external_ontology_row': row, 'ilx_rows': [ilx_row], }) else: outside.append(row) if diff: diff = self.__exhaustive_diff(inside) return inside, outside, diff return inside, outside
All entities with conflicting iris gets a full diff to see if they belong Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. iri_predicate: usually in qname form and is the colname of the DataFrame for iri Default is "iri" for graph2pandas module diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only
def numRegisteredForRoleName(event, roleName): if not isinstance(event, Event): return None try: role = DanceRole.objects.get(name=roleName) except ObjectDoesNotExist: return None return event.numRegisteredForRole(role)
This tag allows one to access the number of registrations for any dance role using only the role's name.