code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def from_points(cls, point1, point2): if isinstance(point1, Point) and isinstance(point2, Point): displacement = point1.substract(point2) return cls(displacement.x, displacement.y, displacement.z) raise TypeError
Return a Vector instance from two given points.
def _output_text(complete_output, categories): output = "" for result in complete_output: list_result = complete_output[result] if list_result: list_result_sorted = sorted(list_result, key=lambda x: list_result[x], reverse=True) output += "\n\n{0}:\n".format(result) for element in list_result_sorted: output += "\n{0} {1}".format(list_result[element], element) output += "\n--" return output
Output the results obtained in text format. :return: str, html formatted output
def _deploy(self): timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) try: self.d.setup(timeout=timeout) self.d.sentry.wait(timeout=timeout) except amulet.helpers.TimeoutError: amulet.raise_status( amulet.FAIL, msg="Deployment timed out ({}s)".format(timeout) ) except Exception: raise
Deploy environment and wait for all hooks to finish executing.
def db_connect(cls, path): con = sqlite3.connect(path, isolation_level=None, timeout=2**30) con.row_factory = StateEngine.db_row_factory return con
connect to our chainstate db
def add_separator(self, sub_menu='Advanced'): action = QtWidgets.QAction(self) action.setSeparator(True) if sub_menu: try: mnu = self._sub_menus[sub_menu] except KeyError: pass else: mnu.addAction(action) else: self._actions.append(action) return action
Adds a sepqrator to the editor's context menu. :return: The sepator that has been added. :rtype: QtWidgets.QAction
def load_module(self, module_name, path=None): self.ensure_started() if path is None: path = sys.path mod = self.client.call(_load_module, module_name, path) mod.__isolation_context__ = self return mod
Import a module into this isolation context and return a proxy for it.
def updateEvent(self, event=None, home=None): if not home: home=self.default_home if not event: listEvent = dict() for cam_id in self.lastEvent: listEvent[self.lastEvent[cam_id]['time']] = self.lastEvent[cam_id] event = listEvent[sorted(listEvent)[0]] home_data = self.homeByName(home) postParams = { "access_token" : self.getAuthToken, "home_id" : home_data['id'], "event_id" : event['id'] } resp = postRequest(_GETEVENTSUNTIL_REQ, postParams) eventList = resp['body']['events_list'] for e in eventList: self.events[ e['camera_id'] ][ e['time'] ] = e for camera in self.events: self.lastEvent[camera]=self.events[camera][sorted(self.events[camera])[-1]]
Update the list of event with the latest ones
def do_block(parser, token): name, args, kwargs = get_signature(token, contextable=True) kwargs['nodelist'] = parser.parse(('end%s' % name,)) parser.delete_first_token() return BlockNode(parser, name, *args, **kwargs)
Process several nodes inside a single block Block functions take ``context``, ``nodelist`` as first arguments If the second to last argument is ``as``, the rendered result is stored in the context and is named whatever the last argument is. Syntax:: {% [block] [var args...] [name=value kwargs...] [as varname] %} ... nodelist ... {% end[block] %} Examples:: {% render_block as rendered_output %} {{ request.path }}/blog/{{ blog.slug }} {% endrender_block %} {% highlight_block python %} import this {% endhighlight_block %}
def _is_list_iter(self): iter_var_type = ( self.context.vars.get(self.stmt.iter.id).typ if isinstance(self.stmt.iter, ast.Name) else None ) if isinstance(self.stmt.iter, ast.List) or isinstance(iter_var_type, ListType): return True if isinstance(self.stmt.iter, ast.Attribute): iter_var_type = self.context.globals.get(self.stmt.iter.attr) if iter_var_type and isinstance(iter_var_type.typ, ListType): return True return False
Test if the current statement is a type of list, used in for loops.
def data(self, data): self._buffer = self._buffer + data while self._data_handler(): pass
Use a length prefixed protocol to give the length of a pickled message.
def search(query, team=None): if team is None: team = _find_logged_in_team() if team is not None: session = _get_session(team) response = session.get("%s/api/search/" % get_registry_url(team), params=dict(q=query)) print("* Packages in team %s" % team) packages = response.json()['packages'] for pkg in packages: print(("%s:" % team) + ("%(owner)s/%(name)s" % pkg)) if len(packages) == 0: print("(No results)") print("* Packages in public cloud") public_session = _get_session(None) response = public_session.get("%s/api/search/" % get_registry_url(None), params=dict(q=query)) packages = response.json()['packages'] for pkg in packages: print("%(owner)s/%(name)s" % pkg) if len(packages) == 0: print("(No results)")
Search for packages
def geometric_series(q, n): q = np.asarray(q) if n < 0: raise ValueError('Finite geometric series is only defined for n>=0.') else: if q.ndim == 0: if q == 1: s = (n + 1) * 1.0 return s else: s = (1.0 - q ** (n + 1)) / (1.0 - q) return s s = np.zeros(np.shape(q), dtype=q.dtype) ind = (q == 1.0) s[ind] = (n + 1) * 1.0 not_ind = np.logical_not(ind) s[not_ind] = (1.0 - q[not_ind] ** (n + 1)) / (1.0 - q[not_ind]) return s
Compute finite geometric series. \frac{1-q^{n+1}}{1-q} q \neq 1 \sum_{k=0}^{n} q^{k}= n+1 q = 1 Parameters ---------- q : array-like The common ratio of the geometric series. n : int The number of terms in the finite series. Returns ------- s : float or ndarray The value of the finite series.
def alter_subprocess_kwargs_by_platform(**kwargs): kwargs.setdefault('close_fds', os.name == 'posix') if os.name == 'nt': CONSOLE_CREATION_FLAGS = 0 CREATE_NO_WINDOW = 0x08000000 CONSOLE_CREATION_FLAGS |= CREATE_NO_WINDOW kwargs.setdefault('creationflags', CONSOLE_CREATION_FLAGS) return kwargs
Given a dict, populate kwargs to create a generally useful default setup for running subprocess processes on different platforms. For example, `close_fds` is set on posix and creation of a new console window is disabled on Windows. This function will alter the given kwargs and return the modified dict.
def drop_table(self, cursor, target, options): "Drops the target table." sql = 'DROP TABLE IF EXISTS {0}' cursor.execute(sql.format(self.qualified_names[target]))
Drops the target table.
def same_types(self, index1, index2): try: same = self.table[index1].type == self.table[index2].type != SharedData.TYPES.NO_TYPE except Exception: self.error() return same
Returns True if both symbol table elements are of the same type
def read(self, entity=None, attrs=None, ignore=None, params=None): if attrs is None: attrs = self.update_json([]) if ignore is None: ignore = set() ignore.add('account_password') return super(AuthSourceLDAP, self).read(entity, attrs, ignore, params)
Do not read the ``account_password`` attribute. Work around a bug. For more information, see `Bugzilla #1243036 <https://bugzilla.redhat.com/show_bug.cgi?id=1243036>`_.
def profiles(): paths = [] for pattern in PROFILES: pattern = os.path.expanduser(pattern) paths += glob(pattern) return paths
List of all the connection profile files, ordered by preference. :returns: list of all Koji client config files. Example: ['/home/kdreyer/.koji/config.d/kojidev.conf', '/etc/koji.conf.d/stg.conf', '/etc/koji.conf.d/fedora.conf']
def ids2tokens(token_ids: Iterable[int], vocab_inv: Dict[int, str], exclude_set: Set[int]) -> Iterator[str]: tokens = (vocab_inv[token] for token in token_ids) return (tok for token_id, tok in zip(token_ids, tokens) if token_id not in exclude_set)
Transforms a list of token IDs into a list of words, excluding any IDs in `exclude_set`. :param token_ids: The list of token IDs. :param vocab_inv: The inverse vocabulary. :param exclude_set: The list of token IDs to exclude. :return: The list of words.
def synchronizer_class(self): if not self.synchronizer_path or self.synchronizer_path == 'None' or not self.layer: return False if (self._synchronizer_class is not None and self._synchronizer_class.__name__ not in self.synchronizer_path): self._synchronizer = None self._synchronizer_class = None if not self._synchronizer_class: self._synchronizer_class = import_by_path(self.synchronizer_path) return self._synchronizer_class
returns synchronizer class
def _fmt(self, tag, msg): msg = msg or '<unset>' msg = str(msg) msg = msg.strip() if not msg: return if len(msg) > 2048: msg = msg[:1024] + '...' if msg.count('\n') <= 1: return '{}: {}\n'.format(tag, msg.strip()) else: return '{}:\n {}\n'.format(tag, msg.replace('\n', '\n ').strip())
Format a string for inclusion in the exception's string representation. If msg is None, format to empty string. If msg has a single line, format to: tag: msg If msg has multiple lines, format to: tag: line 1 line 2 Msg is truncated to 1024 chars.
def from_pypirc(pypi_repository): ret = {} pypirc_locations = PYPIRC_LOCATIONS for pypirc_path in pypirc_locations: pypirc_path = os.path.expanduser(pypirc_path) if os.path.isfile(pypirc_path): parser = configparser.SafeConfigParser() parser.read(pypirc_path) if 'distutils' not in parser.sections(): continue if 'index-servers' not in parser.options('distutils'): continue if pypi_repository not in parser.get('distutils', 'index-servers'): continue if pypi_repository in parser.sections(): for option in parser.options(pypi_repository): ret[option] = parser.get(pypi_repository, option) if not ret: raise ConfigError( 'repository does not appear to be configured in pypirc ({})'.format(pypi_repository) + ', remember that it needs an entry in [distutils] and its own section' ) return ret
Load configuration from .pypirc file, cached to only run once
def create_subvariant (self, root_targets, all_targets, build_request, sources, rproperties, usage_requirements): assert is_iterable_typed(root_targets, virtual_target.VirtualTarget) assert is_iterable_typed(all_targets, virtual_target.VirtualTarget) assert isinstance(build_request, property_set.PropertySet) assert is_iterable_typed(sources, virtual_target.VirtualTarget) assert isinstance(rproperties, property_set.PropertySet) assert isinstance(usage_requirements, property_set.PropertySet) for e in root_targets: e.root (True) s = Subvariant (self, build_request, sources, rproperties, usage_requirements, all_targets) for v in all_targets: if not v.creating_subvariant(): v.creating_subvariant(s) return s
Creates a new subvariant-dg instances for 'targets' - 'root-targets' the virtual targets will be returned to dependents - 'all-targets' all virtual targets created while building this main target - 'build-request' is property-set instance with requested build properties
def start(cls, ev): ev.stopPropagation() ev.preventDefault() author = cls.input_el.value.strip() if not author: cls.input_el.style.border = "2px solid red" return cls.hide_errors() AuthorBar.show(50) make_request( url=join(settings.API_PATH, "aleph/authors_by_name"), data={"name": author}, on_complete=cls.on_complete )
Event handler which starts the request to REST API.
def create_handler(target: str): if target == 'stderr': return logging.StreamHandler(sys.stderr) elif target == 'stdout': return logging.StreamHandler(sys.stdout) else: return logging.handlers.WatchedFileHandler(filename=target)
Create a handler for logging to ``target``
def main(): args = parser.parse_args() try: function = args.func except AttributeError: parser.print_usage() parser.exit(1) function(vars(args))
Parse the args and call whatever function was selected
def minimize(self, loss_fn, x, optim_state): grads = self._compute_gradients(loss_fn, x, optim_state) return self._apply_gradients(grads, x, optim_state)
Analogous to tf.Optimizer.minimize :param loss_fn: tf Tensor, representing the loss to minimize :param x: list of Tensor, analogous to tf.Optimizer's var_list :param optim_state: A possibly nested dict, containing any optimizer state. Returns: new_x: list of Tensor, updated version of `x` new_optim_state: dict, updated version of `optim_state`
def same_notebook_code(nb1, nb2): if len(nb1['cells']) != len(nb2['cells']): return False for n in range(len(nb1['cells'])): if nb1['cells'][n]['cell_type'] != nb2['cells'][n]['cell_type']: return False if nb1['cells'][n]['cell_type'] == 'code' and \ nb1['cells'][n]['source'] != nb2['cells'][n]['source']: return False return True
Return true of the code cells of notebook objects `nb1` and `nb2` are the same.
def get_a_satellite_link(sat_type, sat_dict): cls = get_alignak_class('alignak.objects.%slink.%sLink' % (sat_type, sat_type.capitalize())) return cls(params=sat_dict, parsing=False)
Get a SatelliteLink object for a given satellite type and a dictionary :param sat_type: type of satellite :param sat_dict: satellite configuration data :return:
def get_correlations(self, chain=0, parameters=None): parameters, cov = self.get_covariance(chain=chain, parameters=parameters) diag = np.sqrt(np.diag(cov)) divisor = diag[None, :] * diag[:, None] correlations = cov / divisor return parameters, correlations
Takes a chain and returns the correlation between chain parameters. Parameters ---------- chain : int|str, optional The chain index or name. Defaults to first chain. parameters : list[str], optional The list of parameters to compute correlations. Defaults to all parameters for the given chain. Returns ------- tuple The first index giving a list of parameter names, the second index being the 2D correlation matrix.
def validate(ref_intervals, ref_pitches, est_intervals, est_pitches): validate_intervals(ref_intervals, est_intervals) if not ref_intervals.shape[0] == ref_pitches.shape[0]: raise ValueError('Reference intervals and pitches have different ' 'lengths.') if not est_intervals.shape[0] == est_pitches.shape[0]: raise ValueError('Estimated intervals and pitches have different ' 'lengths.') if ref_pitches.size > 0 and np.min(ref_pitches) <= 0: raise ValueError("Reference contains at least one non-positive pitch " "value") if est_pitches.size > 0 and np.min(est_pitches) <= 0: raise ValueError("Estimate contains at least one non-positive pitch " "value")
Checks that the input annotations to a metric look like time intervals and a pitch list, and throws helpful errors if not. Parameters ---------- ref_intervals : np.ndarray, shape=(n,2) Array of reference notes time intervals (onset and offset times) ref_pitches : np.ndarray, shape=(n,) Array of reference pitch values in Hertz est_intervals : np.ndarray, shape=(m,2) Array of estimated notes time intervals (onset and offset times) est_pitches : np.ndarray, shape=(m,) Array of estimated pitch values in Hertz
def sct_report_string(report): ret = [] namespaces = {"svrl": "http://purl.oclc.org/dsdl/svrl"} for index, failed_assert_el in enumerate( report.findall("svrl:failed-assert", namespaces=namespaces) ): ret.append( "{}. {}".format( index + 1, failed_assert_el.find("svrl:text", namespaces=namespaces).text, ) ) ret.append(" test: {}".format(failed_assert_el.attrib["test"])) ret.append(" location: {}".format(failed_assert_el.attrib["location"])) ret.append("\n") return "\n".join(ret)
Return a human-readable string representation of the error report returned by lxml's schematron validator.
def reset_parameters(self): stdv = 1.0 / math.sqrt(self.num_features) self.weight.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) if self.padding_idx is not None: self.weight.weight.data[self.padding_idx].fill_(0)
Reinitiate the weight parameters.
def _handle_request_error(self, orig_request, error, start_response): headers = [('Content-Type', 'application/json')] status_code = error.status_code() body = error.rest_error() response_status = '%d %s' % (status_code, httplib.responses.get(status_code, 'Unknown Error')) cors_handler = self._create_cors_handler(orig_request) return util.send_wsgi_response(response_status, headers, body, start_response, cors_handler=cors_handler)
Handle a request error, converting it to a WSGI response. Args: orig_request: An ApiRequest, the original request from the user. error: A RequestError containing information about the error. start_response: A function with semantics defined in PEP-333. Returns: A string containing the response body.
def send(self, cmd): self._bridge.send(cmd, wait=self.wait, reps=self.reps)
Send a command to the bridge. :param cmd: List of command bytes.
def modules_directory(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), "modules")
Get the core modules directory.
def hide(self): thr_is_alive = self._spin_thread and self._spin_thread.is_alive() if thr_is_alive and not self._hide_spin.is_set(): self._hide_spin.set() sys.stdout.write("\r") self._clear_line() sys.stdout.flush()
Hide the spinner to allow for custom writing to the terminal.
def load(self): if self._modules_loaded is True: return self.load_modules_from_python(routes.ALL_ROUTES) self.aliases.update(routes.ALL_ALIASES) self._load_modules_from_entry_points('softlayer.cli') self._modules_loaded = True
Loads all modules.
def add_query_occurrence(self, report): initial_millis = int(report['parsed']['stats']['millis']) mask = report['queryMask'] existing_report = self._get_existing_report(mask, report) if existing_report is not None: self._merge_report(existing_report, report) else: time = None if 'ts' in report['parsed']: time = report['parsed']['ts'] self._reports.append(OrderedDict([ ('namespace', report['namespace']), ('lastSeenDate', time), ('queryMask', mask), ('supported', report['queryAnalysis']['supported']), ('indexStatus', report['indexStatus']), ('recommendation', report['recommendation']), ('stats', OrderedDict([('count', 1), ('totalTimeMillis', initial_millis), ('avgTimeMillis', initial_millis)]))]))
Adds a report to the report aggregation
def get_any_node(self, addr): for n in self.graph.nodes(): if n.addr == addr: return n
Get any VFG node corresponding to the basic block at @addr. Note that depending on the context sensitivity level, there might be multiple nodes corresponding to different contexts. This function will return the first one it encounters, which might not be what you want.
def resolve(self, pubID, sysID): ret = libxml2mod.xmlACatalogResolve(self._o, pubID, sysID) return ret
Do a complete resolution lookup of an External Identifier
def format_keyword(keyword): import re result = '' if keyword: result = re.sub(r"\W", "", keyword) result = re.sub("_", "", result) return result
Removing special character from a keyword. Analysis Services must have this kind of keywords. E.g. if assay name from the Instrument is 'HIV-1 2.0', an AS must be created on Bika with the keyword 'HIV120'
def load_statements(fname, as_dict=False): logger.info('Loading %s...' % fname) with open(fname, 'rb') as fh: if sys.version_info[0] < 3: stmts = pickle.load(fh) else: stmts = pickle.load(fh, encoding='latin1') if isinstance(stmts, dict): if as_dict: return stmts st = [] for pmid, st_list in stmts.items(): st += st_list stmts = st logger.info('Loaded %d statements' % len(stmts)) return stmts
Load statements from a pickle file. Parameters ---------- fname : str The name of the pickle file to load statements from. as_dict : Optional[bool] If True and the pickle file contains a dictionary of statements, it is returned as a dictionary. If False, the statements are always returned in a list. Default: False Returns ------- stmts : list A list or dict of statements that were loaded.
def match_var(self, tokens, item): setvar, = tokens if setvar != wildcard: if setvar in self.names: self.add_check(self.names[setvar] + " == " + item) else: self.add_def(setvar + " = " + item) self.names[setvar] = item
Matches a variable.
def metric_tensor(self) -> np.ndarray: return dot(self._matrix, self._matrix.T)
The metric tensor of the lattice.
def builds(self, request, pk=None): builds = self.get_object().builds.prefetch_related('test_runs').order_by('-datetime') page = self.paginate_queryset(builds) serializer = BuildSerializer(page, many=True, context={'request': request}) return self.get_paginated_response(serializer.data)
List of builds for the current project.
def cont_cat_split(df, max_card=20, dep_var=None)->Tuple[List,List]: "Helper function that returns column names of cont and cat variables from given df." cont_names, cat_names = [], [] for label in df: if label == dep_var: continue if df[label].dtype == int and df[label].unique().shape[0] > max_card or df[label].dtype == float: cont_names.append(label) else: cat_names.append(label) return cont_names, cat_names
Helper function that returns column names of cont and cat variables from given df.
def _dump_spec(spec): with open("spec.yaml", "w") as f: yaml.dump(spec, f, Dumper=MyDumper, default_flow_style=False)
Dump bel specification dictionary using YAML Formats this with an extra indentation for lists to make it easier to use cold folding on the YAML version of the spec dictionary.
def get_children_treepos(self, treepos): children_treepos = [] for i, child in enumerate(self.dgtree[treepos]): if isinstance(child, nltk.Tree): children_treepos.append(child.treeposition()) elif is_leaf(child): treepos_list = list(treepos) treepos_list.append(i) leaf_treepos = tuple(treepos_list) children_treepos.append(leaf_treepos) return children_treepos
Given a treeposition, return the treepositions of its children.
def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)] return items
Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default value is None.
def filepaths_in_dir(path): filepaths = [] for root, directories, filenames in os.walk(path): for filename in filenames: filepath = os.path.join(root, filename) filepath = filepath.replace(path, '').lstrip('/') filepaths.append(filepath) return filepaths
Find all files in a directory, and return the relative paths to those files. Args: path (str): the directory path to walk Returns: list: the list of relative paths to all files inside of ``path`` or its subdirectories.
def parse_secured_key(secured_key, key_nonce_separator='.', nonce_length=4, base=BASE62): parts = secured_key.split(key_nonce_separator) if len(parts) != 2: raise ValueError('Invalid secured key format') (key, nonce) = parts if len(nonce) != nonce_length: raise ValueError('Invalid length of the key nonce') return key_to_int(key, base=base), key, nonce
Parse a given secured key and return its associated integer, the key itself, and the embedded nonce. @param secured_key a string representation of a secured key composed of a key in Base62, a separator character, and a nonce. @param key_nonce_separator: the character that is used to separate the key and the nonce to form the secured key. @param nonce_length: the number of characters to compose the nonce. @param base: a sequence of characters that is used to encode the integer value. @return: a tuple ``(value, key, nonce)``: * ``value``: the integer value of the key. * ``key``: the plain-text key. * ``nonce``: "number used once", a pseudo-random number to ensure that the key cannot be reused in replay attacks. @raise ValueError: if the format of the secured key is invalid, or if the embedded nonce is of the wrong length.
def handle_target(self, request, controller_args, controller_kwargs): try: param_args, param_kwargs = self.normalize_target_params( request=request, controller_args=controller_args, controller_kwargs=controller_kwargs ) ret = self.target(*param_args, **param_kwargs) if not ret: raise ValueError("{} check failed".format(self.__class__.__name__)) except CallError: raise except Exception as e: self.handle_error(e)
Internal method for this class handles normalizing the passed in values from the decorator using .normalize_target_params() and then passes them to the set .target()
def getTopRight(self): return (float(self.get_x()) + float(self.get_width()), float(self.get_y()) + float(self.get_height()))
Retrieves a tuple with the x,y coordinates of the upper right point of the rect. Requires the coordinates, width, height to be numbers
def _ask_for_ledger_status(self, node_name: str, ledger_id): self.request_msg(LEDGER_STATUS, {f.LEDGER_ID.nm: ledger_id}, [node_name, ]) logger.info("{} asking {} for ledger status of ledger {}".format(self, node_name, ledger_id))
Ask other node for LedgerStatus
def do_logStream(self,args): parser = CommandArgumentParser("logStream") parser.add_argument(dest='logStream',help='logStream index.'); args = vars(parser.parse_args(args)) print "loading log stream {}".format(args['logStream']) index = int(args['logStream']) logStream = self.logStreams[index] print "logStream:{}".format(logStream) self.childLoop(AwsLogStream.AwsLogStream(logStream,self))
Go to the specified log stream. logStream -h for detailed help
def _parse_tile_url(tile_url): props = tile_url.rsplit('/', 7) return ''.join(props[1:4]), '-'.join(props[4:7]), int(props[7])
Extracts tile name, data and AWS index from tile URL :param tile_url: Location of tile at AWS :type: tile_url: str :return: Tuple in a form (tile_name, date, aws_index) :rtype: (str, str, int)
def get_pelican_cls(settings): cls = settings['PELICAN_CLASS'] if isinstance(cls, six.string_types): module, cls_name = cls.rsplit('.', 1) module = __import__(module) cls = getattr(module, cls_name) return cls
Get the Pelican class requested in settings
async def createcsrf(self, csrfarg = '_csrf'): await self.sessionstart() if not csrfarg in self.session.vars: self.session.vars[csrfarg] = uuid.uuid4().hex
Create a anti-CSRF token in the session
def process_origin( headers: Headers, origins: Optional[Sequence[Optional[Origin]]] = None ) -> Optional[Origin]: try: origin = cast(Origin, headers.get("Origin")) except MultipleValuesError: raise InvalidHeader("Origin", "more than one Origin header found") if origins is not None: if origin not in origins: raise InvalidOrigin(origin) return origin
Handle the Origin HTTP request header. Raise :exc:`~websockets.exceptions.InvalidOrigin` if the origin isn't acceptable.
def prepare_jochem(ctx, jochem, output, csoutput): click.echo('chemdataextractor.dict.prepare_jochem') for i, line in enumerate(jochem): print('JC%s' % i) if line.startswith('TM '): if line.endswith(' @match=ci\n'): for tokens in _make_tokens(line[3:-11]): output.write(' '.join(tokens)) output.write('\n') else: for tokens in _make_tokens(line[3:-1]): csoutput.write(' '.join(tokens)) csoutput.write('\n')
Process and filter jochem file to produce list of names for dictionary.
def clean_whitespace(statement): import re statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') statement.text = statement.text.strip() statement.text = re.sub(' +', ' ', statement.text) return statement
Remove any consecutive whitespace characters from the statement text.
def process_json(json_dict): ep = EidosProcessor(json_dict) ep.extract_causal_relations() ep.extract_correlations() ep.extract_events() return ep
Return an EidosProcessor by processing a Eidos JSON-LD dict. Parameters ---------- json_dict : dict The JSON-LD dict to be processed. Returns ------- ep : EidosProcessor A EidosProcessor containing the extracted INDRA Statements in its statements attribute.
def read_unicode(path, encoding, encoding_errors): try: f = open(path, 'rb') return make_unicode(f.read(), encoding, encoding_errors) finally: f.close()
Return the contents of a file as a unicode string.
def api_submit(): data = request.files.file response.content_type = 'application/json' if not data or not hasattr(data, 'file'): return json.dumps({"status": "Failed", "stderr": "Missing form params"}) return json.dumps(analyse_pcap(data.file, data.filename), default=jsondate, indent=4)
Blocking POST handler for file submission. Runs snort on supplied file and returns results as json text.
def recalc_M(S, D_cba, Y, nr_sectors): Y_diag = ioutil.diagonalize_blocks(Y.values, blocksize=nr_sectors) Y_inv = np.linalg.inv(Y_diag) M = D_cba.dot(Y_inv) if type(D_cba) is pd.DataFrame: M.columns = D_cba.columns M.index = D_cba.index return M
Calculate Multipliers based on footprints. Parameters ---------- D_cba : pandas.DataFrame or numpy array Footprint per sector and country Y : pandas.DataFrame or numpy array Final demand: aggregated across categories or just one category, one column per country. This will be diagonalized per country block. The diagonolized form must be invertable for this method to work. nr_sectors : int Number of sectors in the MRIO Returns ------- pandas.DataFrame or numpy.array Multipliers M The type is determined by the type of D_cba. If DataFrame index/columns as D_cba
def record(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('SL record not yet initialized!') outlist = [b'SL', struct.pack('=BBB', self.current_length(), SU_ENTRY_VERSION, self.flags)] for comp in self.symlink_components: outlist.append(comp.record()) return b''.join(outlist)
Generate a string representing the Rock Ridge Symbolic Link record. Parameters: None. Returns: String containing the Rock Ridge record.
def pad_array(in1): padded_size = 2*np.array(in1.shape) out1 = np.zeros([padded_size[0],padded_size[1]]) out1[padded_size[0]/4:3*padded_size[0]/4,padded_size[1]/4:3*padded_size[1]/4] = in1 return out1
Simple convenience function to pad arrays for linear convolution. INPUTS: in1 (no default): Input array which is to be padded. OUTPUTS: out1 Padded version of the input.
def is_diff(self): if not isinstance(self.details, dict): return False for key in ['additions', 'updates', 'deletions']: if self.details.get(key, None): return True return False
Return True if there are any differences logged
def unpack_archive(*components, **kwargs) -> str: path = fs.abspath(*components) compression = kwargs.get("compression", "bz2") dir = kwargs.get("dir", fs.dirname(path)) fs.cd(dir) tar = tarfile.open(path, "r:" + compression) tar.extractall() tar.close() fs.cdpop() return dir
Unpack a compressed archive. Arguments: *components (str[]): Absolute path. **kwargs (dict, optional): Set "compression" to compression type. Default: bz2. Set "dir" to destination directory. Defaults to the directory of the archive. Returns: str: Path to directory.
def qubo_energy(sample, Q, offset=0.0): for v0, v1 in Q: offset += sample[v0] * sample[v1] * Q[(v0, v1)] return offset
Calculate the energy for the specified sample of a QUBO model. Energy of a sample for a binary quadratic model is defined as a sum, offset by the constant energy offset associated with the model, of the sample multipled by the linear bias of the variable and all its interactions. For a quadratic unconstrained binary optimization (QUBO) model, .. math:: E(\mathbf{x}) = \sum_{u,v} Q_{u,v} x_u x_v + c where :math:`x_v` is the sample, :math:`Q_{u,v}` a matrix of biases, and :math:`c` the energy offset. Args: sample (dict[variable, spin]): Sample for a binary quadratic model as a dict of form {v: bin, ...}, where keys are variables of the model and values are binary (either 0 or 1). Q (dict[(variable, variable), coefficient]): QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys are 2-tuples of variables of the model and values are biases associated with the pair of variables. Tuples (u, v) represent interactions and (v, v) linear biases. offset (numeric, optional, default=0): Constant offset to be applied to the energy. Default 0. Returns: float: The induced energy. Notes: No input checking is performed. Examples: This example calculates the energy of a sample representing two zeros for a QUBO model of two variables that have positive biases of value 1 and are positively coupled with an interaction of value 1. >>> import dimod >>> sample = {1: 0, 2: 0} >>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1} >>> dimod.qubo_energy(sample, Q, 0.5) 0.5 References ---------- `QUBO model on Wikipedia <https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization>`_
def getCandScoresMap(self, profile): elecType = profile.getElecType() if elecType != "soc" and elecType != "toc": print("ERROR: unsupported election type") exit() wmg = profile.getWmg() maximinScores = dict() for cand in wmg.keys(): maximinScores[cand] = float("inf") for cand1, cand2 in itertools.combinations(wmg.keys(), 2): if cand2 in wmg[cand1].keys(): maximinScores[cand1] = min(maximinScores[cand1], wmg[cand1][cand2]) maximinScores[cand2] = min(maximinScores[cand2], wmg[cand2][cand1]) return maximinScores
Returns a dictionary that associates integer representations of each candidate with their maximin score. :ivar Profile profile: A Profile object that represents an election profile.
def age(self, as_at_date=None): if self.date_of_death != None or self.is_deceased == True: return None as_at_date = date.today() if as_at_date == None else as_at_date if self.date_of_birth != None: if (as_at_date.month >= self.date_of_birth.month) and (as_at_date.day >= self.date_of_birth.day): return (as_at_date.year - self.date_of_birth.year) else: return ((as_at_date.year - self.date_of_birth.year) -1) else: return None
Compute the person's age
def get_query_targets(cli_ctx, apps, resource_group): if isinstance(apps, list): if resource_group: return [get_id_from_azure_resource(cli_ctx, apps[0], resource_group)] return list(map(lambda x: get_id_from_azure_resource(cli_ctx, x), apps)) else: if resource_group: return [get_id_from_azure_resource(cli_ctx, apps, resource_group)] return apps
Produces a list of uniform GUIDs representing applications to query.
def put_bits( self, value, count ): for _ in range( count ): bit = (value & 1) value >>= 1 if self.bits_reverse: if self.insert_at_msb: self.current_bits |= (bit << (self.bits_remaining-1)) else: self.current_bits <<= 1 self.current_bits |= bit else: if self.insert_at_msb: self.current_bits >>= 1 self.current_bits |= (bit << 7) else: self.current_bits |= (bit << (8-self.bits_remaining)) self.bits_remaining -= 1 if self.bits_remaining <= 0: self.output.append( self.current_bits ) self.current_bits = 0 self.bits_remaining = 8
Push bits into the target. value Integer containing bits to push, ordered from least-significant bit to most-significant bit. count Number of bits to push to the target.
def wait_for(self, event, predicate, result=None): future = self.loop.create_future() entry = EventListener(event=event, predicate=predicate, result=result, future=future) self._dispatch_listeners.append(entry) return future
Waits for a DISPATCH'd event that meets the predicate. Parameters ----------- event: :class:`str` The event name in all upper case to wait for. predicate A function that takes a data parameter to check for event properties. The data parameter is the 'd' key in the JSON message. result A function that takes the same data parameter and executes to send the result to the future. If None, returns the data. Returns -------- asyncio.Future A future to wait for.
def merge_pdfs(pdf_names, output) -> None: merger = PyPDF2.PdfFileMerger() for filename in pdf_names: merger.append(filename) merger.write(output) merger.close()
Merges all pdfs together into a single long PDF.
def load_public_key(vm_): public_key_filename = config.get_cloud_config_value( 'ssh_public_key', vm_, __opts__, search_global=False, default=None ) if public_key_filename is not None: public_key_filename = os.path.expanduser(public_key_filename) if not os.path.isfile(public_key_filename): raise SaltCloudConfigError( 'The defined ssh_public_key \'{0}\' does not exist'.format( public_key_filename ) ) with salt.utils.files.fopen(public_key_filename, 'r') as public_key: key = salt.utils.stringutils.to_unicode(public_key.read().replace('\n', '')) return key
Load the public key file if exists.
def _dict_to_name_value(data): if isinstance(data, dict): sorted_data = sorted(data.items(), key=lambda s: s[0]) result = [] for name, value in sorted_data: if isinstance(value, dict): result.append({name: _dict_to_name_value(value)}) else: result.append({name: value}) else: result = data return result
Convert a dictionary to a list of dictionaries to facilitate ordering
def print_access(access, title): columns = ['id', 'hostname', 'Primary Public IP', 'Primary Private IP', 'Created'] table = formatting.Table(columns, title) for host in access: host_id = host.get('id') host_fqdn = host.get('fullyQualifiedDomainName', '-') host_primary = host.get('primaryIpAddress') host_private = host.get('primaryBackendIpAddress') host_created = host.get('provisionDate') table.add_row([host_id, host_fqdn, host_primary, host_private, host_created]) return table
Prints out the hardware or virtual guests a user can access
def now_heating(self): try: if self.side == 'left': heat = self.device.device_data['leftNowHeating'] elif self.side == 'right': heat = self.device.device_data['rightNowHeating'] return heat except TypeError: return None
Return current heating state.
def process_iter(proc, cmd=""): try: for l in proc.stdout: yield l finally: if proc.poll() is None: return else: proc.wait() if proc.returncode not in (0, None, signal.SIGPIPE, signal.SIGPIPE + 128): sys.stderr.write("cmd was:%s\n" % cmd) sys.stderr.write("return code was:%s\n" % proc.returncode) raise ProcessException(cmd)
helper function to iterate over a process stdout and report error messages when done
def files_comments_edit( self, *, comment: str, file: str, id: str, **kwargs ) -> SlackResponse: kwargs.update({"comment": comment, "file": file, "id": id}) return self.api_call("files.comments.edit", json=kwargs)
Edit an existing file comment. Args: comment (str): The body of the comment. e.g. 'Everyone should take a moment to read this file.' file (str): The file id. e.g. 'F1234467890' id (str): The file comment id. e.g. 'Fc1234567890'
def show_files(md5): if not WORKBENCH: return flask.redirect('/') md5_view = WORKBENCH.work_request('view', md5) return flask.render_template('templates/md5_view.html', md5_view=md5_view['view'], md5=md5)
Renders template with `view` of the md5.
def send_direct_message(self, text, user=None, delegate=None, screen_name=None, user_id=None, params={}): params = params.copy() if user is not None: params['user'] = user if user_id is not None: params['user_id'] = user_id if screen_name is not None: params['screen_name'] = screen_name params['text'] = text parser = txml.Direct(delegate) return self.__postPage('/direct_messages/new.xml', parser, params)
Send a direct message
def get_if_present(self, name, default=None): if not self.processed_data: raise exceptions.FormNotProcessed('The form data has not been processed yet') if name in self.field_dict: return self[name] return default
Returns the value for a field, but if the field doesn't exist will return default instead
def remove_sshkey(host, known_hosts=None): if known_hosts is None: if 'HOME' in os.environ: known_hosts = '{0}/.ssh/known_hosts'.format(os.environ['HOME']) else: try: known_hosts = '{0}/.ssh/known_hosts'.format( pwd.getpwuid(os.getuid()).pwd_dir ) except Exception: pass if known_hosts is not None: log.debug( 'Removing ssh key for %s from known hosts file %s', host, known_hosts ) else: log.debug('Removing ssh key for %s from known hosts file', host) cmd = 'ssh-keygen -R {0}'.format(host) subprocess.call(cmd, shell=True)
Remove a host from the known_hosts file
def validate_capacity(capacity): if capacity not in VALID_SCALING_CONFIGURATION_CAPACITIES: raise ValueError( "ScalingConfiguration capacity must be one of: {}".format( ", ".join(map( str, VALID_SCALING_CONFIGURATION_CAPACITIES )) ) ) return capacity
Validate ScalingConfiguration capacity for serverless DBCluster
def getdminfo(self, columnname=None): dminfo = self._getdminfo() if columnname is None: return dminfo for fld in dminfo.values(): if columnname in fld["COLUMNS"]: fldc = fld.copy() del fldc['COLUMNS'] return fldc raise KeyError("Column " + columnname + " does not exist")
Get data manager info. Each column in a table is stored using a data manager. A storage manager is a data manager storing the physically in a file. A virtual column engine is a data manager that does not store data but calculates it on the fly (e.g. scaling floats to short to reduce storage needs). By default this method returns a dict telling the data managers used. Each field in the dict is a dict containing: - NAME telling the (unique) name of the data manager - TYPE telling the type of data manager (e.g. TiledShapeStMan) - SEQNR telling the sequence number of the data manager (is ''i'' in table.f<i> for storage managers) - SPEC is a dict holding the data manager specification - COLUMNS is a list giving the columns stored by this data manager When giving a column name the data manager info of that particular column is returned (without the COLUMNS field). It can, for instance, be used when adding a column using :func:`addcols` that should use the same data manager type as an existing column. However, when doing that care should be taken to change the NAME because each data manager name has to be unique.
def _post(self, xml_query): req = urllib2.Request(url = 'http://www.rcsb.org/pdb/rest/search', data=xml_query) f = urllib2.urlopen(req) return f.read().strip()
POST the request.
def get_header_example(cls, header): if header.is_array: result = cls.get_example_for_array(header.item) else: example_method = getattr(cls, '{}_example'.format(header.type)) result = example_method(header.properties, header.type_format) return {header.name: result}
Get example for header object :param Header header: Header object :return: example :rtype: dict
def check_command(self, op_description, op=None, data=b'', chk=0, timeout=DEFAULT_TIMEOUT): val, data = self.command(op, data, chk, timeout=timeout) if len(data) < self.STATUS_BYTES_LENGTH: raise FatalError("Failed to %s. Only got %d byte status response." % (op_description, len(data))) status_bytes = data[-self.STATUS_BYTES_LENGTH:] if byte(status_bytes, 0) != 0: raise FatalError.WithResult('Failed to %s' % op_description, status_bytes) if len(data) > self.STATUS_BYTES_LENGTH: return data[:-self.STATUS_BYTES_LENGTH] else: return val
Execute a command with 'command', check the result code and throw an appropriate FatalError if it fails. Returns the "result" of a successful command.
def new(namespace, name, wdl, synopsis, documentation=None, api_url=fapi.PROD_API_ROOT): r = fapi.update_workflow(namespace, name, synopsis, wdl, documentation, api_url) fapi._check_response_code(r, 201) d = r.json() return Method(namespace, name, d["snapshotId"])
Create new FireCloud method. If the namespace + name already exists, a new snapshot is created. Args: namespace (str): Method namespace for this method name (str): Method name wdl (file): WDL description synopsis (str): Short description of task documentation (file): Extra documentation for method
def get_unread_forums_from_list(self, forums, user): unread_forums = [] visibility_contents = ForumVisibilityContentTree.from_forums(forums) forum_ids_to_visibility_nodes = visibility_contents.as_dict tracks = super().get_queryset().select_related('forum').filter( user=user, forum__in=forums) tracked_forums = [] for track in tracks: forum_last_post_on = forum_ids_to_visibility_nodes[track.forum_id].last_post_on if (forum_last_post_on and track.mark_time < forum_last_post_on) \ and track.forum not in unread_forums: unread_forums.extend(track.forum.get_ancestors(include_self=True)) tracked_forums.append(track.forum) for forum in forums: if forum not in tracked_forums and forum not in unread_forums \ and forum.direct_topics_count > 0: unread_forums.extend(forum.get_ancestors(include_self=True)) return list(set(unread_forums))
Filter a list of forums and return only those which are unread. Given a list of forums find and returns the list of forums that are unread for the passed user. If a forum is unread all of its ancestors are also unread and will be included in the final list.
def format_config(sensor_graph): cmdfile = CommandFile("Config Variables", "1.0") for slot in sorted(sensor_graph.config_database, key=lambda x: x.encode()): for conf_var, conf_def in sorted(sensor_graph.config_database[slot].items()): conf_type, conf_val = conf_def if conf_type == 'binary': conf_val = 'hex:' + hexlify(conf_val) cmdfile.add("set_variable", slot, conf_var, conf_type, conf_val) return cmdfile.dump()
Extract the config variables from this sensor graph in ASCII format. Args: sensor_graph (SensorGraph): the sensor graph that we want to format Returns: str: The ascii output lines concatenated as a single string
def _is_request_in_exclude_path(self, request): if self._exclude_paths: for path in self._exclude_paths: if request.path.startswith(path): return True return False else: return False
Check if the request path is in the `_exclude_paths` list
def addmag(self, magval): if N.isscalar(magval): factor = 10**(-0.4*magval) return self*factor else: raise TypeError(".addmag() only takes a constant scalar argument")
Add a scalar magnitude to existing flux values. .. math:: \\textnormal{flux}_{\\textnormal{new}} = 10^{-0.4 \\; \\textnormal{magval}} \\; \\textnormal{flux} Parameters ---------- magval : number Magnitude value. Returns ------- sp : `CompositeSourceSpectrum` New source spectrum with adjusted flux values. Raises ------ TypeError Magnitude value is not a scalar number.
def prep_args(arg_info): filtered_args = [a for a in arg_info.args if getattr(arg_info, 'varargs', None) != a] if filtered_args and (filtered_args[0] in ('self', 'cls')): filtered_args = filtered_args[1:] pos_args = [] if filtered_args: for arg in filtered_args: if isinstance(arg, str) and arg in arg_info.locals: resolved_type = resolve_type(arg_info.locals[arg]) pos_args.append(resolved_type) else: pos_args.append(type(UnknownType())) varargs = None if arg_info.varargs: varargs_tuple = arg_info.locals[arg_info.varargs] if isinstance(varargs_tuple, tuple): varargs = [resolve_type(arg) for arg in varargs_tuple[:4]] return ResolvedTypes(pos_args=pos_args, varargs=varargs)
Resolve types from ArgInfo
def signed_headers(self): signed_headers = self.query_parameters.get(_x_amz_signedheaders) if signed_headers is not None: signed_headers = url_unquote(signed_headers[0]) else: signed_headers = self.authorization_header_parameters[ _signedheaders] parts = signed_headers.split(";") canonicalized = sorted([sh.lower() for sh in parts]) if parts != canonicalized: raise AttributeError("SignedHeaders is not canonicalized: %r" % (signed_headers,)) return OrderedDict([(header, self.headers[header]) for header in signed_headers.split(";")])
An ordered dictionary containing the signed header names and values.
def _ReadUnionDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): return self._ReadDataTypeDefinitionWithMembers( definitions_registry, definition_values, data_types.UnionDefinition, definition_name, supports_conditions=False)
Reads an union data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: UnionDefinition: union data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
def choose_candidate_pair(candidates): highscored = sorted(candidates, key=candidates.get, reverse=True) for i, h_i in enumerate(highscored): for h_j in highscored[i+1:]: if len(h_i) == len(h_j): yield (h_i, h_j)
Choose a pair of address candidates ensuring they have the same length and starting with the highest scored ones :type candidates: dict[str, int] :param candidates: Count how often the longest common substrings appeared in the messages :return: