code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def from_points(cls, point1, point2): if isinstance(point1, Point) and isinstance(point2, Point): displacement = point1.substract(point2) return cls(displacement.x, displacement.y, displacement.z) raise TypeError
Return a Vector instance from two given points.
def _output_text(complete_output, categories): output = "" for result in complete_output: list_result = complete_output[result] if list_result: list_result_sorted = sorted(list_result, key=lambda x: list_result[x], ...
Output the results obtained in text format. :return: str, html formatted output
def _deploy(self): timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) try: self.d.setup(timeout=timeout) self.d.sentry.wait(timeout=timeout) except amulet.helpers.TimeoutError: amulet.raise_status( amulet.FAIL, msg="Depl...
Deploy environment and wait for all hooks to finish executing.
def db_connect(cls, path): con = sqlite3.connect(path, isolation_level=None, timeout=2**30) con.row_factory = StateEngine.db_row_factory return con
connect to our chainstate db
def add_separator(self, sub_menu='Advanced'): action = QtWidgets.QAction(self) action.setSeparator(True) if sub_menu: try: mnu = self._sub_menus[sub_menu] except KeyError: pass else: mnu.addAction(action) ...
Adds a sepqrator to the editor's context menu. :return: The sepator that has been added. :rtype: QtWidgets.QAction
def load_module(self, module_name, path=None): self.ensure_started() if path is None: path = sys.path mod = self.client.call(_load_module, module_name, path) mod.__isolation_context__ = self return mod
Import a module into this isolation context and return a proxy for it.
def updateEvent(self, event=None, home=None): if not home: home=self.default_home if not event: listEvent = dict() for cam_id in self.lastEvent: listEvent[self.lastEvent[cam_id]['time']] = self.lastEvent[cam_id] event = listEvent[sorted(listEvent)[0]] ...
Update the list of event with the latest ones
def do_block(parser, token): name, args, kwargs = get_signature(token, contextable=True) kwargs['nodelist'] = parser.parse(('end%s' % name,)) parser.delete_first_token() return BlockNode(parser, name, *args, **kwargs)
Process several nodes inside a single block Block functions take ``context``, ``nodelist`` as first arguments If the second to last argument is ``as``, the rendered result is stored in the context and is named whatever the last argument is. Syntax:: {% [block] [var args...] [name=value kwargs...] ...
def _is_list_iter(self): iter_var_type = ( self.context.vars.get(self.stmt.iter.id).typ if isinstance(self.stmt.iter, ast.Name) else None ) if isinstance(self.stmt.iter, ast.List) or isinstance(iter_var_type, ListType): return True if isins...
Test if the current statement is a type of list, used in for loops.
def data(self, data): self._buffer = self._buffer + data while self._data_handler(): pass
Use a length prefixed protocol to give the length of a pickled message.
def search(query, team=None): if team is None: team = _find_logged_in_team() if team is not None: session = _get_session(team) response = session.get("%s/api/search/" % get_registry_url(team), params=dict(q=query)) print("* Packages in team %s" % team) packages = response...
Search for packages
def geometric_series(q, n): q = np.asarray(q) if n < 0: raise ValueError('Finite geometric series is only defined for n>=0.') else: if q.ndim == 0: if q == 1: s = (n + 1) * 1.0 return s else: s = (1.0 - q ** (n + 1)) / (...
Compute finite geometric series. \frac{1-q^{n+1}}{1-q} q \neq 1 \sum_{k=0}^{n} q^{k}= n+1 q = 1 Parameters ---------- q : array-like The common ratio of the geometric series. n : int The num...
def alter_subprocess_kwargs_by_platform(**kwargs): kwargs.setdefault('close_fds', os.name == 'posix') if os.name == 'nt': CONSOLE_CREATION_FLAGS = 0 CREATE_NO_WINDOW = 0x08000000 CONSOLE_CREATION_FLAGS |= CREATE_NO_WINDOW kwargs.setdefault('creationflags', CONSOLE_CREATION_F...
Given a dict, populate kwargs to create a generally useful default setup for running subprocess processes on different platforms. For example, `close_fds` is set on posix and creation of a new console window is disabled on Windows. This function will alter the given kwargs and return the...
def drop_table(self, cursor, target, options): "Drops the target table." sql = 'DROP TABLE IF EXISTS {0}' cursor.execute(sql.format(self.qualified_names[target]))
Drops the target table.
def same_types(self, index1, index2): try: same = self.table[index1].type == self.table[index2].type != SharedData.TYPES.NO_TYPE except Exception: self.error() return same
Returns True if both symbol table elements are of the same type
def read(self, entity=None, attrs=None, ignore=None, params=None): if attrs is None: attrs = self.update_json([]) if ignore is None: ignore = set() ignore.add('account_password') return super(AuthSourceLDAP, self).read(entity, attrs, ignore, params)
Do not read the ``account_password`` attribute. Work around a bug. For more information, see `Bugzilla #1243036 <https://bugzilla.redhat.com/show_bug.cgi?id=1243036>`_.
def profiles(): paths = [] for pattern in PROFILES: pattern = os.path.expanduser(pattern) paths += glob(pattern) return paths
List of all the connection profile files, ordered by preference. :returns: list of all Koji client config files. Example: ['/home/kdreyer/.koji/config.d/kojidev.conf', '/etc/koji.conf.d/stg.conf', '/etc/koji.conf.d/fedora.conf']
def ids2tokens(token_ids: Iterable[int], vocab_inv: Dict[int, str], exclude_set: Set[int]) -> Iterator[str]: tokens = (vocab_inv[token] for token in token_ids) return (tok for token_id, tok in zip(token_ids, tokens) if token_id not in exclude_set)
Transforms a list of token IDs into a list of words, excluding any IDs in `exclude_set`. :param token_ids: The list of token IDs. :param vocab_inv: The inverse vocabulary. :param exclude_set: The list of token IDs to exclude. :return: The list of words.
def synchronizer_class(self): if not self.synchronizer_path or self.synchronizer_path == 'None' or not self.layer: return False if (self._synchronizer_class is not None and self._synchronizer_class.__name__ not in self.synchronizer_path): self._synchronizer = None sel...
returns synchronizer class
def _fmt(self, tag, msg): msg = msg or '<unset>' msg = str(msg) msg = msg.strip() if not msg: return if len(msg) > 2048: msg = msg[:1024] + '...' if msg.count('\n') <= 1: return '{}: {}\n'.format(tag, msg.strip()) else: ...
Format a string for inclusion in the exception's string representation. If msg is None, format to empty string. If msg has a single line, format to: tag: msg If msg has multiple lines, format to: tag: line 1 line 2 Msg is truncated to 1024 chars.
def from_pypirc(pypi_repository): ret = {} pypirc_locations = PYPIRC_LOCATIONS for pypirc_path in pypirc_locations: pypirc_path = os.path.expanduser(pypirc_path) if os.path.isfile(pypirc_path): parser = configparser.SafeConfigParser() parser.read(pypirc_path) ...
Load configuration from .pypirc file, cached to only run once
def create_subvariant (self, root_targets, all_targets, build_request, sources, rproperties, usage_requirements): assert is_iterable_typed(root_targets, virtual_target.VirtualTarget) assert is_iterable_typed(all_targets, virtual_target.VirtualTarget)...
Creates a new subvariant-dg instances for 'targets' - 'root-targets' the virtual targets will be returned to dependents - 'all-targets' all virtual targets created while building this main target - 'build-request' is property-set instance with requested build properties
def start(cls, ev): ev.stopPropagation() ev.preventDefault() author = cls.input_el.value.strip() if not author: cls.input_el.style.border = "2px solid red" return cls.hide_errors() AuthorBar.show(50) make_request( url=join(setti...
Event handler which starts the request to REST API.
def create_handler(target: str): if target == 'stderr': return logging.StreamHandler(sys.stderr) elif target == 'stdout': return logging.StreamHandler(sys.stdout) else: return logging.handlers.WatchedFileHandler(filename=target)
Create a handler for logging to ``target``
def main(): args = parser.parse_args() try: function = args.func except AttributeError: parser.print_usage() parser.exit(1) function(vars(args))
Parse the args and call whatever function was selected
def minimize(self, loss_fn, x, optim_state): grads = self._compute_gradients(loss_fn, x, optim_state) return self._apply_gradients(grads, x, optim_state)
Analogous to tf.Optimizer.minimize :param loss_fn: tf Tensor, representing the loss to minimize :param x: list of Tensor, analogous to tf.Optimizer's var_list :param optim_state: A possibly nested dict, containing any optimizer state. Returns: new_x: list of Tensor, updated version of `x` ...
def same_notebook_code(nb1, nb2): if len(nb1['cells']) != len(nb2['cells']): return False for n in range(len(nb1['cells'])): if nb1['cells'][n]['cell_type'] != nb2['cells'][n]['cell_type']: return False if nb1['cells'][n]['cell_type'] == 'code' and \ nb1['cell...
Return true of the code cells of notebook objects `nb1` and `nb2` are the same.
def get_a_satellite_link(sat_type, sat_dict): cls = get_alignak_class('alignak.objects.%slink.%sLink' % (sat_type, sat_type.capitalize())) return cls(params=sat_dict, parsing=False)
Get a SatelliteLink object for a given satellite type and a dictionary :param sat_type: type of satellite :param sat_dict: satellite configuration data :return:
def get_correlations(self, chain=0, parameters=None): parameters, cov = self.get_covariance(chain=chain, parameters=parameters) diag = np.sqrt(np.diag(cov)) divisor = diag[None, :] * diag[:, None] correlations = cov / divisor return parameters, correlations
Takes a chain and returns the correlation between chain parameters. Parameters ---------- chain : int|str, optional The chain index or name. Defaults to first chain. parameters : list[str], optional The list of parameters to compute correlations. Defaults to all ...
def validate(ref_intervals, ref_pitches, est_intervals, est_pitches): validate_intervals(ref_intervals, est_intervals) if not ref_intervals.shape[0] == ref_pitches.shape[0]: raise ValueError('Reference intervals and pitches have different ' 'lengths.') if not est_intervals.s...
Checks that the input annotations to a metric look like time intervals and a pitch list, and throws helpful errors if not. Parameters ---------- ref_intervals : np.ndarray, shape=(n,2) Array of reference notes time intervals (onset and offset times) ref_pitches : np.ndarray, shape=(n,) ...
def sct_report_string(report): ret = [] namespaces = {"svrl": "http://purl.oclc.org/dsdl/svrl"} for index, failed_assert_el in enumerate( report.findall("svrl:failed-assert", namespaces=namespaces) ): ret.append( "{}. {}".format( index + 1, fai...
Return a human-readable string representation of the error report returned by lxml's schematron validator.
def reset_parameters(self): stdv = 1.0 / math.sqrt(self.num_features) self.weight.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) if self.padding_idx is not None: self.weight.weight.data[self.padding_idx].fill_(0)
Reinitiate the weight parameters.
def _handle_request_error(self, orig_request, error, start_response): headers = [('Content-Type', 'application/json')] status_code = error.status_code() body = error.rest_error() response_status = '%d %s' % (status_code, httplib.responses.get(status_code, ...
Handle a request error, converting it to a WSGI response. Args: orig_request: An ApiRequest, the original request from the user. error: A RequestError containing information about the error. start_response: A function with semantics defined in PEP-333. Returns: A string containing the ...
def send(self, cmd): self._bridge.send(cmd, wait=self.wait, reps=self.reps)
Send a command to the bridge. :param cmd: List of command bytes.
def modules_directory(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), "modules")
Get the core modules directory.
def hide(self): thr_is_alive = self._spin_thread and self._spin_thread.is_alive() if thr_is_alive and not self._hide_spin.is_set(): self._hide_spin.set() sys.stdout.write("\r") self._clear_line() sys.stdout.flush()
Hide the spinner to allow for custom writing to the terminal.
def load(self): if self._modules_loaded is True: return self.load_modules_from_python(routes.ALL_ROUTES) self.aliases.update(routes.ALL_ALIASES) self._load_modules_from_entry_points('softlayer.cli') self._modules_loaded = True
Loads all modules.
def add_query_occurrence(self, report): initial_millis = int(report['parsed']['stats']['millis']) mask = report['queryMask'] existing_report = self._get_existing_report(mask, report) if existing_report is not None: self._merge_report(existing_report, report) else: ...
Adds a report to the report aggregation
def get_any_node(self, addr): for n in self.graph.nodes(): if n.addr == addr: return n
Get any VFG node corresponding to the basic block at @addr. Note that depending on the context sensitivity level, there might be multiple nodes corresponding to different contexts. This function will return the first one it encounters, which might not be what you want.
def resolve(self, pubID, sysID): ret = libxml2mod.xmlACatalogResolve(self._o, pubID, sysID) return ret
Do a complete resolution lookup of an External Identifier
def format_keyword(keyword): import re result = '' if keyword: result = re.sub(r"\W", "", keyword) result = re.sub("_", "", result) return result
Removing special character from a keyword. Analysis Services must have this kind of keywords. E.g. if assay name from the Instrument is 'HIV-1 2.0', an AS must be created on Bika with the keyword 'HIV120'
def load_statements(fname, as_dict=False): logger.info('Loading %s...' % fname) with open(fname, 'rb') as fh: if sys.version_info[0] < 3: stmts = pickle.load(fh) else: stmts = pickle.load(fh, encoding='latin1') if isinstance(stmts, dict): if as_dict: ...
Load statements from a pickle file. Parameters ---------- fname : str The name of the pickle file to load statements from. as_dict : Optional[bool] If True and the pickle file contains a dictionary of statements, it is returned as a dictionary. If False, the statements are alway...
def match_var(self, tokens, item): setvar, = tokens if setvar != wildcard: if setvar in self.names: self.add_check(self.names[setvar] + " == " + item) else: self.add_def(setvar + " = " + item) self.names[setvar] = item
Matches a variable.
def metric_tensor(self) -> np.ndarray: return dot(self._matrix, self._matrix.T)
The metric tensor of the lattice.
def builds(self, request, pk=None): builds = self.get_object().builds.prefetch_related('test_runs').order_by('-datetime') page = self.paginate_queryset(builds) serializer = BuildSerializer(page, many=True, context={'request': request}) return self.get_paginated_response(serializer.data)
List of builds for the current project.
def cont_cat_split(df, max_card=20, dep_var=None)->Tuple[List,List]: "Helper function that returns column names of cont and cat variables from given df." cont_names, cat_names = [], [] for label in df: if label == dep_var: continue if df[label].dtype == int and df[label].unique().shape[0] > ...
Helper function that returns column names of cont and cat variables from given df.
def _dump_spec(spec): with open("spec.yaml", "w") as f: yaml.dump(spec, f, Dumper=MyDumper, default_flow_style=False)
Dump bel specification dictionary using YAML Formats this with an extra indentation for lists to make it easier to use cold folding on the YAML version of the spec dictionary.
def get_children_treepos(self, treepos): children_treepos = [] for i, child in enumerate(self.dgtree[treepos]): if isinstance(child, nltk.Tree): children_treepos.append(child.treeposition()) elif is_leaf(child): treepos_list = list(treepos) ...
Given a treeposition, return the treepositions of its children.
def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False): items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) ...
Returns all datacenters in a vCenter. service_instance The Service Instance Object from which to obtain cluster. datacenter_names List of datacenter names to filter by. Default value is None. get_all_datacenters Flag specifying whether to retrieve all datacenters. Default ...
def filepaths_in_dir(path): filepaths = [] for root, directories, filenames in os.walk(path): for filename in filenames: filepath = os.path.join(root, filename) filepath = filepath.replace(path, '').lstrip('/') filepaths.append(filepath) return filepaths
Find all files in a directory, and return the relative paths to those files. Args: path (str): the directory path to walk Returns: list: the list of relative paths to all files inside of ``path`` or its subdirectories.
def parse_secured_key(secured_key, key_nonce_separator='.', nonce_length=4, base=BASE62): parts = secured_key.split(key_nonce_separator) if len(parts) != 2: raise ValueError('Invalid secured key format') (key, nonce) = parts if len(nonce) != nonce_length: raise Va...
Parse a given secured key and return its associated integer, the key itself, and the embedded nonce. @param secured_key a string representation of a secured key composed of a key in Base62, a separator character, and a nonce. @param key_nonce_separator: the character that is used to separate th...
def handle_target(self, request, controller_args, controller_kwargs): try: param_args, param_kwargs = self.normalize_target_params( request=request, controller_args=controller_args, controller_kwargs=controller_kwargs ) ret = se...
Internal method for this class handles normalizing the passed in values from the decorator using .normalize_target_params() and then passes them to the set .target()
def getTopRight(self): return (float(self.get_x()) + float(self.get_width()), float(self.get_y()) + float(self.get_height()))
Retrieves a tuple with the x,y coordinates of the upper right point of the rect. Requires the coordinates, width, height to be numbers
def _ask_for_ledger_status(self, node_name: str, ledger_id): self.request_msg(LEDGER_STATUS, {f.LEDGER_ID.nm: ledger_id}, [node_name, ]) logger.info("{} asking {} for ledger status of ledger {}".format(self, node_name, ledger_id))
Ask other node for LedgerStatus
def do_logStream(self,args): parser = CommandArgumentParser("logStream") parser.add_argument(dest='logStream',help='logStream index.'); args = vars(parser.parse_args(args)) print "loading log stream {}".format(args['logStream']) index = int(args['logStream']) logStream = ...
Go to the specified log stream. logStream -h for detailed help
def _parse_tile_url(tile_url): props = tile_url.rsplit('/', 7) return ''.join(props[1:4]), '-'.join(props[4:7]), int(props[7])
Extracts tile name, data and AWS index from tile URL :param tile_url: Location of tile at AWS :type: tile_url: str :return: Tuple in a form (tile_name, date, aws_index) :rtype: (str, str, int)
def get_pelican_cls(settings): cls = settings['PELICAN_CLASS'] if isinstance(cls, six.string_types): module, cls_name = cls.rsplit('.', 1) module = __import__(module) cls = getattr(module, cls_name) return cls
Get the Pelican class requested in settings
async def createcsrf(self, csrfarg = '_csrf'): await self.sessionstart() if not csrfarg in self.session.vars: self.session.vars[csrfarg] = uuid.uuid4().hex
Create a anti-CSRF token in the session
def process_origin( headers: Headers, origins: Optional[Sequence[Optional[Origin]]] = None ) -> Optional[Origin]: try: origin = cast(Origin, headers.get("Origin")) except MultipleValuesError: raise InvalidHeader("Origin", "more than one Origin header found") i...
Handle the Origin HTTP request header. Raise :exc:`~websockets.exceptions.InvalidOrigin` if the origin isn't acceptable.
def prepare_jochem(ctx, jochem, output, csoutput): click.echo('chemdataextractor.dict.prepare_jochem') for i, line in enumerate(jochem): print('JC%s' % i) if line.startswith('TM '): if line.endswith(' @match=ci\n'): for tokens in _make_tokens(line[3:-11]): ...
Process and filter jochem file to produce list of names for dictionary.
def clean_whitespace(statement): import re statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') statement.text = statement.text.strip() statement.text = re.sub(' +', ' ', statement.text) return statement
Remove any consecutive whitespace characters from the statement text.
def process_json(json_dict): ep = EidosProcessor(json_dict) ep.extract_causal_relations() ep.extract_correlations() ep.extract_events() return ep
Return an EidosProcessor by processing a Eidos JSON-LD dict. Parameters ---------- json_dict : dict The JSON-LD dict to be processed. Returns ------- ep : EidosProcessor A EidosProcessor containing the extracted INDRA Statements in its statements attribute.
def read_unicode(path, encoding, encoding_errors): try: f = open(path, 'rb') return make_unicode(f.read(), encoding, encoding_errors) finally: f.close()
Return the contents of a file as a unicode string.
def api_submit(): data = request.files.file response.content_type = 'application/json' if not data or not hasattr(data, 'file'): return json.dumps({"status": "Failed", "stderr": "Missing form params"}) return json.dumps(analyse_pcap(data.file, data.filename), default=jsondate, indent=4)
Blocking POST handler for file submission. Runs snort on supplied file and returns results as json text.
def recalc_M(S, D_cba, Y, nr_sectors): Y_diag = ioutil.diagonalize_blocks(Y.values, blocksize=nr_sectors) Y_inv = np.linalg.inv(Y_diag) M = D_cba.dot(Y_inv) if type(D_cba) is pd.DataFrame: M.columns = D_cba.columns M.index = D_cba.index return M
Calculate Multipliers based on footprints. Parameters ---------- D_cba : pandas.DataFrame or numpy array Footprint per sector and country Y : pandas.DataFrame or numpy array Final demand: aggregated across categories or just one category, one column per country. This will be dia...
def record(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('SL record not yet initialized!') outlist = [b'SL', struct.pack('=BBB', self.current_length(), SU_ENTRY_VERSION, self.flags)] for comp in self.symlink_components: outlist.append(comp.r...
Generate a string representing the Rock Ridge Symbolic Link record. Parameters: None. Returns: String containing the Rock Ridge record.
def pad_array(in1): padded_size = 2*np.array(in1.shape) out1 = np.zeros([padded_size[0],padded_size[1]]) out1[padded_size[0]/4:3*padded_size[0]/4,padded_size[1]/4:3*padded_size[1]/4] = in1 return out1
Simple convenience function to pad arrays for linear convolution. INPUTS: in1 (no default): Input array which is to be padded. OUTPUTS: out1 Padded version of the input.
def is_diff(self): if not isinstance(self.details, dict): return False for key in ['additions', 'updates', 'deletions']: if self.details.get(key, None): return True return False
Return True if there are any differences logged
def unpack_archive(*components, **kwargs) -> str: path = fs.abspath(*components) compression = kwargs.get("compression", "bz2") dir = kwargs.get("dir", fs.dirname(path)) fs.cd(dir) tar = tarfile.open(path, "r:" + compression) tar.extractall() tar.close() fs.cdpop() return dir
Unpack a compressed archive. Arguments: *components (str[]): Absolute path. **kwargs (dict, optional): Set "compression" to compression type. Default: bz2. Set "dir" to destination directory. Defaults to the directory of the archive. Returns: str: Path to direct...
def qubo_energy(sample, Q, offset=0.0): for v0, v1 in Q: offset += sample[v0] * sample[v1] * Q[(v0, v1)] return offset
Calculate the energy for the specified sample of a QUBO model. Energy of a sample for a binary quadratic model is defined as a sum, offset by the constant energy offset associated with the model, of the sample multipled by the linear bias of the variable and all its interactions. For a quadratic uncons...
def getCandScoresMap(self, profile): elecType = profile.getElecType() if elecType != "soc" and elecType != "toc": print("ERROR: unsupported election type") exit() wmg = profile.getWmg() maximinScores = dict() for cand in wmg.keys(): maximinScor...
Returns a dictionary that associates integer representations of each candidate with their maximin score. :ivar Profile profile: A Profile object that represents an election profile.
def age(self, as_at_date=None): if self.date_of_death != None or self.is_deceased == True: return None as_at_date = date.today() if as_at_date == None else as_at_date if self.date_of_birth != None: if (as_at_date.month >= self.date_of_birth.month) and (as_at_date.day >= s...
Compute the person's age
def get_query_targets(cli_ctx, apps, resource_group): if isinstance(apps, list): if resource_group: return [get_id_from_azure_resource(cli_ctx, apps[0], resource_group)] return list(map(lambda x: get_id_from_azure_resource(cli_ctx, x), apps)) else: if resource_group: ...
Produces a list of uniform GUIDs representing applications to query.
def put_bits( self, value, count ): for _ in range( count ): bit = (value & 1) value >>= 1 if self.bits_reverse: if self.insert_at_msb: self.current_bits |= (bit << (self.bits_remaining-1)) else: self.cur...
Push bits into the target. value Integer containing bits to push, ordered from least-significant bit to most-significant bit. count Number of bits to push to the target.
def wait_for(self, event, predicate, result=None): future = self.loop.create_future() entry = EventListener(event=event, predicate=predicate, result=result, future=future) self._dispatch_listeners.append(entry) return future
Waits for a DISPATCH'd event that meets the predicate. Parameters ----------- event: :class:`str` The event name in all upper case to wait for. predicate A function that takes a data parameter to check for event properties. The data parameter is the '...
def merge_pdfs(pdf_names, output) -> None: merger = PyPDF2.PdfFileMerger() for filename in pdf_names: merger.append(filename) merger.write(output) merger.close()
Merges all pdfs together into a single long PDF.
def load_public_key(vm_): public_key_filename = config.get_cloud_config_value( 'ssh_public_key', vm_, __opts__, search_global=False, default=None ) if public_key_filename is not None: public_key_filename = os.path.expanduser(public_key_filename) if not os.path.isfile(public_key_filen...
Load the public key file if exists.
def _dict_to_name_value(data): if isinstance(data, dict): sorted_data = sorted(data.items(), key=lambda s: s[0]) result = [] for name, value in sorted_data: if isinstance(value, dict): result.append({name: _dict_to_name_value(value)}) else: ...
Convert a dictionary to a list of dictionaries to facilitate ordering
def print_access(access, title): columns = ['id', 'hostname', 'Primary Public IP', 'Primary Private IP', 'Created'] table = formatting.Table(columns, title) for host in access: host_id = host.get('id') host_fqdn = host.get('fullyQualifiedDomainName', '-') host_primary = host.get('pri...
Prints out the hardware or virtual guests a user can access
def now_heating(self): try: if self.side == 'left': heat = self.device.device_data['leftNowHeating'] elif self.side == 'right': heat = self.device.device_data['rightNowHeating'] return heat except TypeError: return None
Return current heating state.
def process_iter(proc, cmd=""): try: for l in proc.stdout: yield l finally: if proc.poll() is None: return else: proc.wait() if proc.returncode not in (0, None, signal.SIGPIPE, signal.SIGPIPE + 128): sys.stderr.write("cmd wa...
helper function to iterate over a process stdout and report error messages when done
def files_comments_edit( self, *, comment: str, file: str, id: str, **kwargs ) -> SlackResponse: kwargs.update({"comment": comment, "file": file, "id": id}) return self.api_call("files.comments.edit", json=kwargs)
Edit an existing file comment. Args: comment (str): The body of the comment. e.g. 'Everyone should take a moment to read this file.' file (str): The file id. e.g. 'F1234467890' id (str): The file comment id. e.g. 'Fc1234567890'
def show_files(md5): if not WORKBENCH: return flask.redirect('/') md5_view = WORKBENCH.work_request('view', md5) return flask.render_template('templates/md5_view.html', md5_view=md5_view['view'], md5=md5)
Renders template with `view` of the md5.
def send_direct_message(self, text, user=None, delegate=None, screen_name=None, user_id=None, params={}): params = params.copy() if user is not None: params['user'] = user if user_id is not None: params['user_id'] = user_id if screen_name is not None: ...
Send a direct message
def get_if_present(self, name, default=None): if not self.processed_data: raise exceptions.FormNotProcessed('The form data has not been processed yet') if name in self.field_dict: return self[name] return default
Returns the value for a field, but if the field doesn't exist will return default instead
def remove_sshkey(host, known_hosts=None): if known_hosts is None: if 'HOME' in os.environ: known_hosts = '{0}/.ssh/known_hosts'.format(os.environ['HOME']) else: try: known_hosts = '{0}/.ssh/known_hosts'.format( pwd.getpwuid(os.getuid()).pw...
Remove a host from the known_hosts file
def validate_capacity(capacity): if capacity not in VALID_SCALING_CONFIGURATION_CAPACITIES: raise ValueError( "ScalingConfiguration capacity must be one of: {}".format( ", ".join(map( str, VALID_SCALING_CONFIGURATION_CAPACITIES ...
Validate ScalingConfiguration capacity for serverless DBCluster
def getdminfo(self, columnname=None): dminfo = self._getdminfo() if columnname is None: return dminfo for fld in dminfo.values(): if columnname in fld["COLUMNS"]: fldc = fld.copy() del fldc['COLUMNS'] return fldc rai...
Get data manager info. Each column in a table is stored using a data manager. A storage manager is a data manager storing the physically in a file. A virtual column engine is a data manager that does not store data but calculates it on the fly (e.g. scaling floats to short to re...
def _post(self, xml_query): req = urllib2.Request(url = 'http://www.rcsb.org/pdb/rest/search', data=xml_query) f = urllib2.urlopen(req) return f.read().strip()
POST the request.
def get_header_example(cls, header): if header.is_array: result = cls.get_example_for_array(header.item) else: example_method = getattr(cls, '{}_example'.format(header.type)) result = example_method(header.properties, header.type_format) return {header.name: r...
Get example for header object :param Header header: Header object :return: example :rtype: dict
def check_command(self, op_description, op=None, data=b'', chk=0, timeout=DEFAULT_TIMEOUT): val, data = self.command(op, data, chk, timeout=timeout) if len(data) < self.STATUS_BYTES_LENGTH: raise FatalError("Failed to %s. Only got %d byte status response." % (op_description, len(data))) ...
Execute a command with 'command', check the result code and throw an appropriate FatalError if it fails. Returns the "result" of a successful command.
def new(namespace, name, wdl, synopsis, documentation=None, api_url=fapi.PROD_API_ROOT): r = fapi.update_workflow(namespace, name, synopsis, wdl, documentation, api_url) fapi._check_response_code(r, 201) d = r.json() return Method(namespace, n...
Create new FireCloud method. If the namespace + name already exists, a new snapshot is created. Args: namespace (str): Method namespace for this method name (str): Method name wdl (file): WDL description synopsis (str): Short description of task ...
def get_unread_forums_from_list(self, forums, user): unread_forums = [] visibility_contents = ForumVisibilityContentTree.from_forums(forums) forum_ids_to_visibility_nodes = visibility_contents.as_dict tracks = super().get_queryset().select_related('forum').filter( user=user, ...
Filter a list of forums and return only those which are unread. Given a list of forums find and returns the list of forums that are unread for the passed user. If a forum is unread all of its ancestors are also unread and will be included in the final list.
def format_config(sensor_graph): cmdfile = CommandFile("Config Variables", "1.0") for slot in sorted(sensor_graph.config_database, key=lambda x: x.encode()): for conf_var, conf_def in sorted(sensor_graph.config_database[slot].items()): conf_type, conf_val = conf_def if conf_type ...
Extract the config variables from this sensor graph in ASCII format. Args: sensor_graph (SensorGraph): the sensor graph that we want to format Returns: str: The ascii output lines concatenated as a single string
def _is_request_in_exclude_path(self, request): if self._exclude_paths: for path in self._exclude_paths: if request.path.startswith(path): return True return False else: return False
Check if the request path is in the `_exclude_paths` list
def addmag(self, magval): if N.isscalar(magval): factor = 10**(-0.4*magval) return self*factor else: raise TypeError(".addmag() only takes a constant scalar argument")
Add a scalar magnitude to existing flux values. .. math:: \\textnormal{flux}_{\\textnormal{new}} = 10^{-0.4 \\; \\textnormal{magval}} \\; \\textnormal{flux} Parameters ---------- magval : number Magnitude value. Returns ------- sp : `Co...
def prep_args(arg_info): filtered_args = [a for a in arg_info.args if getattr(arg_info, 'varargs', None) != a] if filtered_args and (filtered_args[0] in ('self', 'cls')): filtered_args = filtered_args[1:] pos_args = [] if filtered_args: for arg in filtered_args: if isinstance...
Resolve types from ArgInfo
def signed_headers(self): signed_headers = self.query_parameters.get(_x_amz_signedheaders) if signed_headers is not None: signed_headers = url_unquote(signed_headers[0]) else: signed_headers = self.authorization_header_parameters[ _signedheaders] p...
An ordered dictionary containing the signed header names and values.
def _ReadUnionDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): return self._ReadDataTypeDefinitionWithMembers( definitions_registry, definition_values, data_types.UnionDefinition, definition_name, supports_conditions=False)
Reads an union data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type...
def choose_candidate_pair(candidates): highscored = sorted(candidates, key=candidates.get, reverse=True) for i, h_i in enumerate(highscored): for h_j in highscored[i+1:]: if len(h_i) == len(h_j): yield (h_i, h_j)
Choose a pair of address candidates ensuring they have the same length and starting with the highest scored ones :type candidates: dict[str, int] :param candidates: Count how often the longest common substrings appeared in the messages :return: