code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def do_HEAD(self): self.do_initial_operations() coap_response = self.client.get(self.coap_uri.path) self.client.stop() logger.info("Server response: %s", coap_response.pretty_print()) self.set_http_header(coap_response)
Perform a HEAD request
def visit_SetComp(self, node: ast.SetComp) -> Any: result = self._execute_comprehension(node=node) for generator in node.generators: self.visit(generator.iter) self.recomputed_values[node] = result return result
Compile the set comprehension as a function and call it.
def _import_module(self, module_path): LOGGER.debug('Importing %s', module_path) try: return __import__(module_path) except ImportError as error: LOGGER.critical('Could not import %s: %s', module_path, error) return None
Dynamically import a module returning a handle to it. :param str module_path: The module path :rtype: module
def _GetRelPath(self, filename): assert filename.startswith(self.subdir), (filename, self.subdir) return filename[len(self.subdir):].lstrip(r"\/")
Get relative path of a file according to the current directory, given its logical path in the repo.
def reduce_multiline(string): string = str(string) return " ".join([item.strip() for item in string.split("\n") if item.strip()])
reduces a multiline string to a single line of text. args: string: the text to reduce
def _get_update_fn(strategy): if strategy is None: strategy = MS_DICTS try: return _MERGE_FNS[strategy] except KeyError: if callable(strategy): return strategy raise ValueError("Wrong merge strategy: %r" % strategy)
Select dict-like class based on merge strategy and orderness of keys. :param merge: Specify strategy from MERGE_STRATEGIES of how to merge dicts. :return: Callable to update objects
def MigrateArtifacts(): artifacts = data_store.REL_DB.ReadAllArtifacts() if artifacts: logging.info("Deleting %d artifacts from REL_DB.", len(artifacts)) for artifact in data_store.REL_DB.ReadAllArtifacts(): data_store.REL_DB.DeleteArtifact(Text(artifact.name)) else: logging.info("No artifacts found in REL_DB.") artifacts = artifact_registry.REGISTRY.GetArtifacts( reload_datastore_artifacts=True) logging.info("Found %d artifacts in AFF4.", len(artifacts)) artifacts = list(filter(_IsCustom, artifacts)) logging.info("Migrating %d user-created artifacts.", len(artifacts)) for artifact in artifacts: _MigrateArtifact(artifact)
Migrates Artifacts from AFF4 to REL_DB.
def check_keypoints(keypoints, rows, cols): for kp in keypoints: check_keypoint(kp, rows, cols)
Check if keypoints boundaries are in range [0, 1)
def date_elem(ind_days, ind_minutes): def inner(seq): return nexrad_to_datetime(seq[ind_days], seq[ind_minutes] * 60 * 1000) return inner
Create a function to parse a datetime from the product-specific blocks.
def fresh(t, non_generic): mappings = {} def freshrec(tp): p = prune(tp) if isinstance(p, TypeVariable): if is_generic(p, non_generic): if p not in mappings: mappings[p] = TypeVariable() return mappings[p] else: return p elif isinstance(p, dict): return p elif isinstance(p, Collection): return Collection(*[freshrec(x) for x in p.types]) elif isinstance(p, Scalar): return Scalar([freshrec(x) for x in p.types]) elif isinstance(p, TypeOperator): return TypeOperator(p.name, [freshrec(x) for x in p.types]) elif isinstance(p, MultiType): return MultiType([freshrec(x) for x in p.types]) else: assert False, "missing freshrec case {}".format(type(p)) return freshrec(t)
Makes a copy of a type expression. The type t is copied. The generic variables are duplicated and the non_generic variables are shared. Args: t: A type to be copied. non_generic: A set of non-generic TypeVariables
def overrules(self, other): if other.is_primary(): return False elif self.is_simple_index() and other.is_unique(): return False same_columns = self.spans_columns(other.get_columns()) if ( same_columns and (self.is_primary() or self.is_unique()) and self.same_partial_index(other) ): return True return False
Detects if the other index is a non-unique, non primary index that can be overwritten by this one. :param other: The other index :type other: Index :rtype: bool
def finalcallback(request, **kwargs): default_provider.load_services() service_name = kwargs.get('service_name') service_object = default_provider.get_service(service_name) lets_callback = getattr(service_object, 'callback') return render_to_response(lets_callback(request))
let's do the callback of the related service after the auth request from UserServiceCreateView
def _ReadEncryptedData(self, read_size): encrypted_data = self._file_object.read(read_size) read_count = len(encrypted_data) self._encrypted_data = b''.join([self._encrypted_data, encrypted_data]) self._decrypted_data, self._encrypted_data = ( self._decrypter.Decrypt(self._encrypted_data)) self._decrypted_data_size = len(self._decrypted_data) return read_count
Reads encrypted data from the file-like object. Args: read_size (int): number of bytes of encrypted data to read. Returns: int: number of bytes of encrypted data read.
def _check_radians(value, max_radians=2 * np.pi): try: value = value.to('radians').m except AttributeError: pass if np.greater(np.nanmax(np.abs(value)), max_radians): warnings.warn('Input over {} radians. ' 'Ensure proper units are given.'.format(max_radians)) return value
Input validation of values that could be in degrees instead of radians. Parameters ---------- value : `pint.Quantity` The input value to check. max_radians : float Maximum absolute value of radians before warning. Returns ------- `pint.Quantity` The input value
async def stderr(self) -> AsyncGenerator[str, None]: await self.wait_running() async for line in self._subprocess.stderr: yield line
Asynchronous generator for lines from subprocess stderr.
def render_block_to_string(template_name, block_name, context=None): if isinstance(template_name, (tuple, list)): t = loader.select_template(template_name) else: t = loader.get_template(template_name) context = context or {} if isinstance(t, DjangoTemplate): return django_render_block(t, block_name, context) elif isinstance(t, Jinja2Template): from render_block.jinja2 import jinja2_render_block return jinja2_render_block(t, block_name, context) else: raise UnsupportedEngine( 'Can only render blocks from the Django template backend.')
Loads the given template_name and renders the given block with the given dictionary as context. Returns a string. template_name The name of the template to load and render. If it's a list of template names, Django uses select_template() instead of get_template() to find the template.
def cli(env, sortby, datacenter): block_manager = SoftLayer.BlockStorageManager(env.client) mask = "mask[serviceResource[datacenter[name]],"\ "replicationPartners[serviceResource[datacenter[name]]]]" block_volumes = block_manager.list_block_volumes(datacenter=datacenter, mask=mask) datacenters = dict() for volume in block_volumes: service_resource = volume['serviceResource'] if 'datacenter' in service_resource: datacenter_name = service_resource['datacenter']['name'] if datacenter_name not in datacenters.keys(): datacenters[datacenter_name] = 1 else: datacenters[datacenter_name] += 1 table = formatting.KeyValueTable(DEFAULT_COLUMNS) table.sortby = sortby for datacenter_name in datacenters: table.add_row([datacenter_name, datacenters[datacenter_name]]) env.fout(table)
List number of block storage volumes per datacenter.
def indication(self, apdu): if _debug: ServerSSM._debug("indication %r", apdu) if self.state == IDLE: self.idle(apdu) elif self.state == SEGMENTED_REQUEST: self.segmented_request(apdu) elif self.state == AWAIT_RESPONSE: self.await_response(apdu) elif self.state == SEGMENTED_RESPONSE: self.segmented_response(apdu) else: if _debug: ServerSSM._debug(" - invalid state")
This function is called for each downstream packet related to the transaction.
def save(self): active_language = get_language() for (name, value) in self.cleaned_data.items(): if name not in registry: name, code = name.rsplit('_modeltranslation_', 1) else: code = None setting_obj, created = Setting.objects.get_or_create(name=name) if settings.USE_MODELTRANSLATION: if registry[name]["translatable"]: try: activate(code) except: pass finally: setting_obj.value = value activate(active_language) else: for code in OrderedDict(settings.LANGUAGES): setattr(setting_obj, build_localized_fieldname('value', code), value) else: setting_obj.value = value setting_obj.save()
Save each of the settings to the DB.
def drug_names_to_generic(drugs: List[str], unknown_to_default: bool = False, default: str = None, include_categories: bool = False) -> List[str]: return [ drug_name_to_generic(drug, unknown_to_default=unknown_to_default, default=default, include_categories=include_categories) for drug in drugs ]
Converts a list of drug names to their generic equivalents. The arguments are as for :func:`drug_name_to_generic` but this function handles a list of drug names rather than a single one. Note in passing the following conversion of blank-type representations from R via ``reticulate``, when using e.g. the ``default`` parameter and storing results in a ``data.table()`` character column: .. code-block:: none ------------------------------ ---------------- To Python Back from Python ------------------------------ ---------------- [not passed, so Python None] "NULL" NULL "NULL" NA_character_ "NA" NA TRUE (logical) ------------------------------ ----------------
def _split_op( self, identifier, hs_label=None, dagger=False, args=None): if self._isinstance(identifier, 'SymbolicLabelBase'): identifier = QnetAsciiDefaultPrinter()._print_SCALAR_TYPES( identifier.expr) name, total_subscript = self._split_identifier(identifier) total_superscript = '' if (hs_label not in [None, '']): if self._settings['show_hs_label'] == 'subscript': if len(total_subscript) == 0: total_subscript = '(' + hs_label + ')' else: total_subscript += ',(' + hs_label + ')' else: total_superscript += '(' + hs_label + ')' if dagger: total_superscript += self._dagger_sym args_str = '' if (args is not None) and (len(args) > 0): args_str = (self._parenth_left + ",".join([self.doprint(arg) for arg in args]) + self._parenth_right) return name, total_subscript, total_superscript, args_str
Return `name`, total `subscript`, total `superscript` and `arguments` str. All of the returned strings are fully rendered. Args: identifier (str or SymbolicLabelBase): A (non-rendered/ascii) identifier that may include a subscript. The output `name` will be the `identifier` without any subscript hs_label (str): The rendered label for the Hilbert space of the operator, or None. Returned unchanged. dagger (bool): Flag to indicate whether the operator is daggered. If True, :attr:`dagger_sym` will be included in the `superscript` (or `subscript`, depending on the settings) args (list or None): List of arguments (expressions). Each element will be rendered with :meth:`doprint`. The total list of args will then be joined with commas, enclosed with :attr:`_parenth_left` and :attr:`parenth_right`, and returnd as the `arguments` string
def run(path, tasks): readable_path = make_readable_path(path) if not os.path.isfile(path): logger.log(logger.red("Can't read pylpfile "), logger.magenta(readable_path)) sys.exit(-1) else: logger.log("Using pylpfile ", logger.magenta(readable_path)) try: runpy.run_path(path, None, "pylpfile") except Exception as e: traceback.print_exc(file=sys.stdout) logger.log(logger.red("\nAn error has occurred during the execution of the pylpfile")) sys.exit(-1) for name in tasks: pylp.start(name) loop = asyncio.get_event_loop() loop.run_until_complete(wait_and_quit(loop))
Run a pylpfile.
def assignLeafRegisters(inodes, registerMaker): leafRegisters = {} for node in inodes: key = node.key() if key in leafRegisters: node.reg = leafRegisters[key] else: node.reg = leafRegisters[key] = registerMaker(node)
Assign new registers to each of the leaf nodes.
def complete_use(self, text, *_): return [t + " " for t in REGIONS if t.startswith(text)]
Autocomplete for use
def compile_state_cpfs(self, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[Noise] = None) -> List[CPFPair]: next_state_fluents = [] with self.graph.as_default(): with tf.name_scope('state_cpfs'): for cpf in self.rddl.domain.state_cpfs: cpf_noise = noise.get(cpf.name, None) if noise is not None else None name_scope = utils.identifier(cpf.name) with tf.name_scope(name_scope): t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise) next_state_fluents.append((cpf.name, t)) key = lambda f: self.rddl.domain.next_state_fluent_ordering.index(f[0]) next_state_fluents = sorted(next_state_fluents, key=key) return next_state_fluents
Compiles the next state fluent CPFs given the current `state` and `action` scope. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation. batch_size (Optional[int]): The batch size. Returns: A list of state fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.
def loads(s, encoding=None, cls=JSONTreeDecoder, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kargs): return json.loads(s, encoding, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kargs)
JSON load from string function that defaults the loading class to be JSONTreeDecoder
def traverse_levelorder(self, leaves=True, internal=True): q = deque(); q.append(self) while len(q) != 0: n = q.popleft() if (leaves and n.is_leaf()) or (internal and not n.is_leaf()): yield n q.extend(n.children)
Perform a levelorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
def do_init(argdict): site = make_site_obj(argdict) try: site.init_structure() print "Initialized directory." if argdict['randomsite']: for i in range(1,argdict['numpages']+1): p = site.random_page() p.set_published() p.write() print "added page ",p.slug except ValueError: print "Cannot create structure. You're already within an s2 \ tree, or the directory is not empty or it is not writeable. "
Create the structure of a s2site.
def refresh(self, **kwargs): self.resource.refresh(**kwargs) self.rdict = self.resource.entries self._update_stats()
Refreshes stats attached to an object
def star_assign_item_check(self, original, loc, tokens): return self.check_py("3", "starred assignment (add 'match' to front to produce universal code)", original, loc, tokens)
Check for Python 3 starred assignment.
def getCoeffStr(self): txt = '' for key, val in self.coeffs.items(): txt += '%s = %s\n' % (key, val) return txt
get the distortion coeffs in a formated string
def list(ctx): log.debug('chemdataextractor.config.list') for k in config: click.echo('%s : %s' % (k, config[k]))
List all config values.
def _draw_ellipse(data, obj, draw_options): if isinstance(obj, mpl.patches.Circle): return _draw_circle(data, obj, draw_options) x, y = obj.center ff = data["float format"] if obj.angle != 0: fmt = "rotate around={{" + ff + ":(axis cs:" + ff + "," + ff + ")}}" draw_options.append(fmt.format(obj.angle, x, y)) cont = ( "\\draw[{}] (axis cs:" + ff + "," + ff + ") ellipse (" + ff + " and " + ff + ");\n" ).format(",".join(draw_options), x, y, 0.5 * obj.width, 0.5 * obj.height) return data, cont
Return the PGFPlots code for ellipses.
def _resume_with_session_ticket( self, server_info: ServerConnectivityInfo, ssl_version_to_use: OpenSslVersionEnum, ) -> TslSessionTicketSupportEnum: try: session1 = self._resume_ssl_session(server_info, ssl_version_to_use, should_enable_tls_ticket=True) except SslHandshakeRejected: if server_info.highest_ssl_version_supported >= OpenSslVersionEnum.TLSV1_3: return TslSessionTicketSupportEnum.FAILED_ONLY_TLS_1_3_SUPPORTED else: raise try: session1_tls_ticket = self._extract_tls_session_ticket(session1) except IndexError: return TslSessionTicketSupportEnum.FAILED_TICKET_NOT_ASSIGNED session2 = self._resume_ssl_session(server_info, ssl_version_to_use, session1, should_enable_tls_ticket=True) try: session2_tls_ticket = self._extract_tls_session_ticket(session2) except IndexError: return TslSessionTicketSupportEnum.FAILED_TICKET_NOT_ASSIGNED if session1_tls_ticket != session2_tls_ticket: return TslSessionTicketSupportEnum.FAILED_TICKED_IGNORED return TslSessionTicketSupportEnum.SUCCEEDED
Perform one session resumption using TLS Session Tickets.
def error(message, *args, **kwargs): print('[!] ' + message.format(*args, **kwargs)) sys.exit(1)
print an error message
def deactivate(self): try: self.phase = PHASE.DEACTIVATE self.logger.info("Deactivating environment %s..." % self.namespace) self.directory.rewrite_config = False self.instantiate_features() self._specialize() for feature in self.features.run_order: self.logger.info("Deactivating %s..." % feature[0]) self.run_action(feature, 'deactivate') self.clear_all() self._finalize() except Exception: self.logger.debug("", exc_info=sys.exc_info()) et, ei, tb = sys.exc_info() reraise(et, ei, tb)
deactivate the environment
def get_project(self, project_short_name): project = pbclient.find_project(short_name=project_short_name, all=self.all) if (len(project) == 1): return project[0] else: raise ProjectNotFound(project_short_name)
Return project object.
def __filename(self, id): suffix = self.fnsuffix() filename = "%s-%s.%s" % (self.fnprefix, id, suffix) return os.path.join(self.location, filename)
Return the cache file name for an entry with a given id.
def _der_to_pem(der_key, marker): pem_key_chunks = [('-----BEGIN %s-----' % marker).encode('utf-8')] for chunk_start in range(0, len(der_key), 48): pem_key_chunks.append(b64encode(der_key[chunk_start:chunk_start + 48])) pem_key_chunks.append(('-----END %s-----' % marker).encode('utf-8')) return b'\n'.join(pem_key_chunks)
Perform a simple DER to PEM conversion.
def handle_json_GET_neareststops(self, params): schedule = self.server.schedule lat = float(params.get('lat')) lon = float(params.get('lon')) limit = int(params.get('limit')) stops = schedule.GetNearestStops(lat=lat, lon=lon, n=limit) return [StopToTuple(s) for s in stops]
Return a list of the nearest 'limit' stops to 'lat', 'lon
def get_method(self, name, arg_types=()): arg_types = tuple(arg_types) for m in self.get_methods_by_name(name): if (((not m.is_bridge()) and m.get_arg_type_descriptors() == arg_types)): return m return None
searches for the method matching the name and having argument type descriptors matching those in arg_types. Parameters ========== arg_types : sequence of strings each string is a parameter type, in the non-pretty format. Returns ======= method : `JavaMemberInfo` or `None` the single matching, non-bridging method of matching name and parameter types.
def update_aliases(self): try: response = self.client.api.get_room_state(self.room_id) for chunk in response: if "content" in chunk and "aliases" in chunk["content"]: if chunk["content"]["aliases"] != self.aliases: self.aliases = chunk["content"]["aliases"] return True else: return False except MatrixRequestError: return False
Get aliases information from room state. Returns: boolean: True if the aliases changed, False if not
def record_ce_entries(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Rock Ridge extension not yet initialized') return self._record(self.ce_entries)
Return a string representing the Rock Ridge entries in the Continuation Entry. Parameters: None. Returns: A string representing the Rock Ridge entry.
def network_profiles(self): profiles = self._wifi_ctrl.network_profiles(self._raw_obj) if self._logger.isEnabledFor(logging.INFO): for profile in profiles: self._logger.info("Get profile:") self._logger.info("\tssid: %s", profile.ssid) self._logger.info("\tauth: %s", profile.auth) self._logger.info("\takm: %s", profile.akm) self._logger.info("\tcipher: %s", profile.cipher) return profiles
Get all the AP profiles.
def get_src_builders(self, env): memo_key = id(env) try: memo_dict = self._memo['get_src_builders'] except KeyError: memo_dict = {} self._memo['get_src_builders'] = memo_dict else: try: return memo_dict[memo_key] except KeyError: pass builders = [] for bld in self.src_builder: if SCons.Util.is_String(bld): try: bld = env['BUILDERS'][bld] except KeyError: continue builders.append(bld) memo_dict[memo_key] = builders return builders
Returns the list of source Builders for this Builder. This exists mainly to look up Builders referenced as strings in the 'BUILDER' variable of the construction environment and cache the result.
def is_owner(package, abspath): try: files = package['files'] location = package['location'] except KeyError: return False paths = (os.path.abspath(os.path.join(location, f)) for f in files) return abspath in paths
Determine whether `abspath` belongs to `package`.
def clear(self): self.erase() output = self.output output.erase_screen() output.cursor_goto(0, 0) output.flush() self.request_absolute_cursor_position()
Clear screen and go to 0,0
def num_data(self): if self.handle is not None: ret = ctypes.c_int() _safe_call(_LIB.LGBM_DatasetGetNumData(self.handle, ctypes.byref(ret))) return ret.value else: raise LightGBMError("Cannot get num_data before construct dataset")
Get the number of rows in the Dataset. Returns ------- number_of_rows : int The number of rows in the Dataset.
def rebuild_config_cache(self, config_filepath): self.validate_config_file(config_filepath) config_data = None try: with open(config_filepath, 'r') as f: config_data = yaml.load(f) items = list(iteritems(config_data)) except AttributeError: items = list(config_data) self.config_file_contents = OrderedDict(sorted(items, key=lambda x: x[0], reverse=True)) self.config_filepath = config_filepath
Loads from file and caches all data from the config file in the form of an OrderedDict to self.data :param config_filepath: str, the full filepath to the config file :return: bool, success status
def get_all_credit_notes(self, params=None): if not params: params = {} return self._iterate_through_pages(self.get_credit_notes_per_page, resource=CREDIT_NOTES, **{'params': params})
Get all credit notes This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list
def _StartMonitoringProcess(self, process): if process is None: raise ValueError('Missing process.') pid = process.pid if pid in self._process_information_per_pid: raise KeyError( 'Already monitoring process (PID: {0:d}).'.format(pid)) if pid in self._rpc_clients_per_pid: raise KeyError( 'RPC client (PID: {0:d}) already exists'.format(pid)) rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient() rpc_port = process.rpc_port.value time_waited_for_process = 0.0 while not rpc_port: time.sleep(0.1) rpc_port = process.rpc_port.value time_waited_for_process += 0.1 if time_waited_for_process >= self._RPC_SERVER_TIMEOUT: raise IOError( 'RPC client unable to determine server (PID: {0:d}) port.'.format( pid)) hostname = 'localhost' if not rpc_client.Open(hostname, rpc_port): raise IOError(( 'RPC client unable to connect to server (PID: {0:d}) ' 'http://{1:s}:{2:d}').format(pid, hostname, rpc_port)) self._rpc_clients_per_pid[pid] = rpc_client self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)
Starts monitoring a process. Args: process (MultiProcessBaseProcess): process. Raises: IOError: if the RPC client cannot connect to the server. KeyError: if the process is not registered with the engine or if the process is already being monitored. OSError: if the RPC client cannot connect to the server. ValueError: if the process is missing.
def _claim_in_progress_and_claim_channels(self, grpc_channel, channels): payments = self._call_GetListInProgress(grpc_channel) if (len(payments) > 0): self._printout("There are %i payments in 'progress' (they haven't been claimed in blockchain). We will claim them."%len(payments)) self._blockchain_claim(payments) payments = self._start_claim_channels(grpc_channel, channels) self._blockchain_claim(payments)
Claim all 'pending' payments in progress and after we claim given channels
def doc_params(**kwds): def dec(obj): obj.__doc__ = dedent(obj.__doc__).format(**kwds) return obj return dec
\ Docstrings should start with "\" in the first line for proper formatting.
def init_logger(self): if not self.result_logger: if not os.path.exists(self.local_dir): os.makedirs(self.local_dir) if not self.logdir: self.logdir = tempfile.mkdtemp( prefix="{}_{}".format( str(self)[:MAX_LEN_IDENTIFIER], date_str()), dir=self.local_dir) elif not os.path.exists(self.logdir): os.makedirs(self.logdir) self.result_logger = UnifiedLogger( self.config, self.logdir, upload_uri=self.upload_dir, loggers=self.loggers, sync_function=self.sync_function)
Init logger.
def _send_register_payload(self, websocket): file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME) data = codecs.open(file, 'r', 'utf-8') raw_handshake = data.read() handshake = json.loads(raw_handshake) handshake['payload']['client-key'] = self.client_key yield from websocket.send(json.dumps(handshake)) raw_response = yield from websocket.recv() response = json.loads(raw_response) if response['type'] == 'response' and \ response['payload']['pairingType'] == 'PROMPT': raw_response = yield from websocket.recv() response = json.loads(raw_response) if response['type'] == 'registered': self.client_key = response['payload']['client-key'] self.save_key_file()
Send the register payload.
def to_grayscale(cv2im): grayscale_im = np.sum(np.abs(cv2im), axis=0) im_max = np.percentile(grayscale_im, 99) im_min = np.min(grayscale_im) grayscale_im = np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1) grayscale_im = np.expand_dims(grayscale_im, axis=0) return grayscale_im
Convert gradients to grayscale. This gives a saliency map.
def compact(text, **kw): return '\n\n'.join(' '.join(p.split()) for p in text.split('\n\n')).format(**kw)
Compact whitespace in a string and format any keyword arguments into the string. :param text: The text to compact (a string). :param kw: Any keyword arguments to apply using :func:`str.format()`. :returns: The compacted, formatted string. The whitespace compaction preserves paragraphs.
def set_current_session(session_id) -> bool: try: g.session_id = session_id return True except (Exception, BaseException) as error: if current_app.config['DEBUG']: print(error) return False
Add session_id to flask globals for current request
def _check_inclusions(self, f, domains=None): filename = f if isinstance(f, six.string_types) else f.path if domains is None: domains = list(self.domains.values()) domains = list(domains) domains.insert(0, self) for dom in domains: if dom.include: for regex in dom.include: if re.search(regex, filename): return True return False else: for regex in dom.exclude: if re.search(regex, filename, flags=re.UNICODE): return False return True
Check file or directory against regexes in config to determine if it should be included in the index
def select(self, fields=['rowid', '*'], offset=None, limit=None): SQL = 'SELECT %s FROM %s' % (','.join(fields), self._table) if self._selectors: SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip() if self._modifiers: SQL = ' '.join([SQL, self._modifiers]) if limit is not None and isinstance(limit, int): SQL = ' '.join((SQL, 'LIMIT %s' % limit)) if (limit is not None) and (offset is not None) and isinstance(offset, int): SQL = ' '.join((SQL, 'OFFSET %s' % offset)) return ''.join((SQL, ';'))
return SELECT SQL
def noise_uniform(self, lower_bound, upper_bound): assert upper_bound > lower_bound nu = self.sym.sym('nu_{:d}'.format(len(self.scope['nu']))) self.scope['nu'].append(nu) return lower_bound + nu*(upper_bound - lower_bound)
Create a uniform noise variable
def init_runner(self, parser, tracers, projinfo): self.parser = parser self.tracers = tracers self.proj_info = projinfo
initial some instances for preparing to run test case @note: should not override @param parser: instance of TestCaseParser @param tracers: dict type for the instance of Tracer. Such as {"":tracer_obj} or {"192.168.0.1:5555":tracer_obj1, "192.168.0.2:5555":tracer_obj2} @param proj_info: dict type of test case. use like: self.proj_info["module"], self.proj_info["name"] yaml case like: - project: name: xxx module: xxxx dict case like: {"project": {"name": xxx, "module": xxxx}}
def nearby(self, expand=50): return Region( self.x-expand, self.y-expand, self.w+(2*expand), self.h+(2*expand)).clipRegionToScreen()
Returns a new Region that includes the nearby neighbourhood of the the current region. The new region is defined by extending the current region's dimensions all directions by range number of pixels. The center of the new region remains the same.
def parse_target(target_expression): match = TARGET_REX.match(target_expression) if not match: log.warning('Unable to parse target "%s"', target_expression) ret = { 'engine': None, 'delimiter': None, 'pattern': target_expression, } else: ret = match.groupdict() return ret
Parse `target_expressing` splitting it into `engine`, `delimiter`, `pattern` - returns a dict
def create_symbol(self, *args, **kwargs): if not kwargs.get('project_name'): kwargs['project_name'] = self.project.project_name sym = self.app.database.create_symbol(*args, **kwargs) if sym: if type(sym) != Symbol: self._created_symbols[sym.filename].add(sym.unique_name) return sym
Extensions that discover and create instances of `symbols.Symbol` should do this through this method, as it will keep an index of these which can be used when generating a "naive index". See `database.Database.create_symbol` for more information. Args: args: see `database.Database.create_symbol` kwargs: see `database.Database.create_symbol` Returns: symbols.Symbol: the created symbol, or `None`.
def _get_benchmark_handler(self, last_trade, freq='minutely'): return LiveBenchmark( last_trade, frequency=freq).surcharge_market_data \ if utils.is_live(last_trade) else None
Setup a custom benchmark handler or let zipline manage it
def getRandomSequence(length=500): fastaHeader = "" for i in xrange(int(random.random()*100)): fastaHeader = fastaHeader + random.choice([ 'A', 'C', '0', '9', ' ', '\t' ]) return (fastaHeader, \ "".join([ random.choice([ 'A', 'C', 'T', 'G', 'A', 'C', 'T', 'G', 'A', 'C', 'T', 'G', 'A', 'C', 'T', 'G', 'A', 'C', 'T', 'G', 'N' ]) for i in xrange((int)(random.random() * length))]))
Generates a random name and sequence.
def spkcpo(target, et, outref, refloc, abcorr, obspos, obsctr, obsref): target = stypes.stringToCharP(target) et = ctypes.c_double(et) outref = stypes.stringToCharP(outref) refloc = stypes.stringToCharP(refloc) abcorr = stypes.stringToCharP(abcorr) obspos = stypes.toDoubleVector(obspos) obsctr = stypes.stringToCharP(obsctr) obsref = stypes.stringToCharP(obsref) state = stypes.emptyDoubleVector(6) lt = ctypes.c_double() libspice.spkcpo_c(target, et, outref, refloc, abcorr, obspos, obsctr, obsref, state, ctypes.byref(lt)) return stypes.cVectorToPython(state), lt.value
Return the state of a specified target relative to an "observer," where the observer has constant position in a specified reference frame. The observer's position is provided by the calling program rather than by loaded SPK files. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkcpo_c.html :param target: Name of target ephemeris object. :type target: str :param et: Observation epoch. :type et: float :param outref: Reference frame of output state. :type outref: str :param refloc: Output reference frame evaluation locus. :type refloc: str :param abcorr: Aberration correction. :type abcorr: str :param obspos: Observer position relative to center of motion. :type obspos: 3-Element Array of floats :param obsctr: Center of motion of observer. :type obsctr: str :param obsref: Frame of observer position. :type obsref: str :return: State of target with respect to observer, One way light time between target and observer. :rtype: tuple
def loadSharedResource(self, pchResourceName, pchBuffer, unBufferLen): fn = self.function_table.loadSharedResource result = fn(pchResourceName, pchBuffer, unBufferLen) return result
Loads the specified resource into the provided buffer if large enough. Returns the size in bytes of the buffer required to hold the specified resource.
def validate_document(self, definition): initial_document = {} try: initial_document = Loader.load(definition) except RuntimeError as exception: self.logger.error(str(exception)) sys.exit(1) document = Validator().validate(initial_document) if document is None: self.logger.info("Schema validation for '%s' has failed", definition) sys.exit(1) self.logger.info("Schema validation for '%s' succeeded", definition) return document
Validate given pipeline document. The method is trying to load, parse and validate the spline document. The validator verifies the Python structure B{not} the file format. Args: definition (str): path and filename of a yaml file containing a valid spline definition. Returns: dict: loaded and validated spline document. Note: if validation fails the application does exit! See Also: spline.validation.Validator
def boards(self, startAt=0, maxResults=50, type=None, name=None, projectKeyOrID=None): params = {} if type: params['type'] = type if name: params['name'] = name if projectKeyOrID: params['projectKeyOrId'] = projectKeyOrID if self._options['agile_rest_path'] == GreenHopperResource.GREENHOPPER_REST_PATH: if startAt or maxResults or params: warnings.warn('Old private GreenHopper API is used, all parameters will be ignored.', Warning) r_json = self._get_json('rapidviews/list', base=self.AGILE_BASE_URL) boards = [Board(self._options, self._session, raw_boards_json) for raw_boards_json in r_json['views']] return ResultList(boards, 0, len(boards), len(boards), True) else: return self._fetch_pages(Board, 'values', 'board', startAt, maxResults, params, base=self.AGILE_BASE_URL)
Get a list of board resources. :param startAt: The starting index of the returned boards. Base index: 0. :param maxResults: The maximum number of boards to return per page. Default: 50 :param type: Filters results to boards of the specified type. Valid values: scrum, kanban. :param name: Filters results to boards that match or partially match the specified name. :param projectKeyOrID: Filters results to boards that match the specified project key or ID. :rtype: ResultList[Board] When old GreenHopper private API is used, paging is not enabled and all parameters are ignored.
def _synthesize_multiple_subprocess(self, text_file, output_file_path, quit_after=None, backwards=False): self.log(u"Synthesizing multiple via subprocess...") ret = self._synthesize_multiple_generic( helper_function=self._synthesize_single_subprocess_helper, text_file=text_file, output_file_path=output_file_path, quit_after=quit_after, backwards=backwards ) self.log(u"Synthesizing multiple via subprocess... done") return ret
Synthesize multiple fragments via ``subprocess``. :rtype: tuple (result, (anchors, current_time, num_chars))
def get_decode_value(self): if self._store_type == PUBLIC_KEY_STORE_TYPE_HEX: value = bytes.fromhex(self._value) elif self._store_type == PUBLIC_KEY_STORE_TYPE_BASE64: value = b64decode(self._value) elif self._store_type == PUBLIC_KEY_STORE_TYPE_BASE85: value = b85decode(self._value) elif self._store_type == PUBLIC_KEY_STORE_TYPE_JWK: raise NotImplementedError else: value = self._value return value
Return the key value based on it's storage type.
def find_project_file(start_dir, basename): prefix = os.path.abspath(start_dir) while True: candidate = os.path.join(prefix, basename) if os.path.isfile(candidate): return candidate if os.path.exists(candidate): raise PrintableError( "Found {}, but it's not a file.".format(candidate)) if os.path.dirname(prefix) == prefix: raise PrintableError("Can't find " + basename) prefix = os.path.dirname(prefix)
Walk up the directory tree until we find a file of the given name.
def np_lst_sq(vecMdl, aryFuncChnk): aryTmpBts, vecTmpRes = np.linalg.lstsq(vecMdl, aryFuncChnk, rcond=-1)[:2] return aryTmpBts, vecTmpRes
Least squares fitting in numpy without cross-validation. Notes ----- This is just a wrapper function for np.linalg.lstsq to keep piping consistent.
def load_configuration(self, **kwargs): for key in settings.ACTIVE_URL_KWARGS: kwargs.setdefault(key, settings.ACTIVE_URL_KWARGS[key]) self.css_class = kwargs['css_class'] self.parent_tag = kwargs['parent_tag'] self.menu = kwargs['menu'] self.ignore_params = kwargs['ignore_params']
load configuration, merge with default settings
def _init_contoh(self, makna_label): indeks = makna_label.text.find(': ') if indeks != -1: contoh = makna_label.text[indeks + 2:].strip() self.contoh = contoh.split('; ') else: self.contoh = []
Memproses contoh yang ada dalam makna. :param makna_label: BeautifulSoup untuk makna yang ingin diproses. :type makna_label: BeautifulSoup
def save_json(object, handle, indent=2): obj_json = json.dumps(object, indent=indent, cls=NumpyJSONEncoder) handle.write(obj_json)
Save object as json on CNS.
def check(self, triggers, data_reader): if len(triggers['snr']) == 0: return None i = triggers['snr'].argmax() rchisq = triggers['chisq'][i] nsnr = ranking.newsnr(triggers['snr'][i], rchisq) dur = triggers['template_duration'][i] if nsnr > self.newsnr_threshold and \ rchisq < self.reduced_chisq_threshold and \ dur > self.duration_threshold: fake_coinc = {'foreground/%s/%s' % (self.ifo, k): triggers[k][i] for k in triggers} fake_coinc['foreground/stat'] = nsnr fake_coinc['foreground/ifar'] = self.fixed_ifar fake_coinc['HWINJ'] = data_reader.near_hwinj() return fake_coinc return None
Look for a single detector trigger that passes the thresholds in the current data.
def parse_config(args=sys.argv): parser = argparse.ArgumentParser( description='Read in the config file') parser.add_argument( 'config_file', help='Configuration file.', metavar='FILE', type=extant_file) return parser.parse_args(args[1:])
Parse the args using the config_file pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD
def add_child(parent, tag, text=None): elem = ET.SubElement(parent, tag) if text is not None: elem.text = text return elem
Add a child element of specified tag type to parent. The new child element is returned.
def update(self, *objs): keys = self.keys() new_keys = get_objs_columns(objs, self.realfieldb) modified = False for v in new_keys: if v in keys: keys.remove(v) else: d = {self.fielda:self.valuea, self.fieldb:v} if self.before_save: self.before_save(d) if self.through_model: obj = self.through_model(**d) obj.save() else: self.do_(self.table.insert().values(**d)) modified = True if keys: self.clear(*keys) modified = True setattr(self.instance, self.store_key, new_keys) return modified
Update the third relationship table, but not the ModelA or ModelB
def start_to(self, ip, tcpport=102): if tcpport != 102: logger.info("setting server TCP port to %s" % tcpport) self.set_param(snap7.snap7types.LocalPort, tcpport) assert re.match(ipv4, ip), '%s is invalid ipv4' % ip logger.info("starting server to %s:102" % ip) return self.library.Srv_Start(self.pointer, ip)
start server on a specific interface.
def _col_name(index): for exp in itertools.count(1): limit = 26 ** exp if index < limit: return ''.join(chr(ord('A') + index // (26 ** i) % 26) for i in range(exp-1, -1, -1)) index -= limit
Converts a column index to a column name. >>> _col_name(0) 'A' >>> _col_name(26) 'AA'
def propget(self, prop, rev, path=None): rev, prefix = self._maprev(rev) if path is None: return self._propget(prop, str(rev), None) else: path = type(self).cleanPath(_join(prefix, path)) return self._propget(prop, str(rev), path)
Get Subversion property value of the path
def get_index(self): ifreq = struct.pack('16si', self.name, 0) res = fcntl.ioctl(sockfd, SIOCGIFINDEX, ifreq) return struct.unpack("16si", res)[1]
Convert an interface name to an index value.
def set_pixel(self,x,y,state): self.send_cmd("P"+str(x+1)+","+str(y+1)+","+state)
Set pixel at "x,y" to "state" where state can be one of "ON", "OFF" or "TOGGLE"
def get_stock_dividends(self, sid, trading_days): if self._adjustment_reader is None: return [] if len(trading_days) == 0: return [] start_dt = trading_days[0].value / 1e9 end_dt = trading_days[-1].value / 1e9 dividends = self._adjustment_reader.conn.execute( "SELECT * FROM stock_dividend_payouts WHERE sid = ? AND " "ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\ fetchall() dividend_info = [] for dividend_tuple in dividends: dividend_info.append({ "declared_date": dividend_tuple[1], "ex_date": pd.Timestamp(dividend_tuple[2], unit="s"), "pay_date": pd.Timestamp(dividend_tuple[3], unit="s"), "payment_sid": dividend_tuple[4], "ratio": dividend_tuple[5], "record_date": pd.Timestamp(dividend_tuple[6], unit="s"), "sid": dividend_tuple[7] }) return dividend_info
Returns all the stock dividends for a specific sid that occur in the given trading range. Parameters ---------- sid: int The asset whose stock dividends should be returned. trading_days: pd.DatetimeIndex The trading range. Returns ------- list: A list of objects with all relevant attributes populated. All timestamp fields are converted to pd.Timestamps.
def rerank(self, hypotheses: Dict[str, Any], reference: str) -> Dict[str, Any]: scores = [self.scoring_function(hypothesis, reference) for hypothesis in hypotheses['translations']] ranking = list(np.argsort(scores, kind='mergesort')[::-1]) reranked_hypotheses = self._sort_by_ranking(hypotheses, ranking) if self.return_score: reranked_hypotheses['scores'] = [scores[i] for i in ranking] return reranked_hypotheses
Reranks a set of hypotheses that belong to one single reference translation. Uses stable sorting. :param hypotheses: Nbest translations. :param reference: A single string with the actual reference translation. :return: Nbest translations sorted by reranking scores.
def abort(self): resp = make_response(render_template('error.html', error=self.code, message=self.message), self.code) return resp
Return an HTML Response representation of the exception.
def clear_public_domain(self): if (self.get_public_domain_metadata().is_read_only() or self.get_public_domain_metadata().is_required()): raise errors.NoAccess() self._my_map['publicDomain'] = self._public_domain_default
Removes the public domain status. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def get_children(self,node): if self.find_cycle() or self.__directionless: sys.stderr.write("ERROR: do cannot find a branch when there are cycles in the graph\n") sys.exit() v = self.__get_children(node.id) return [self.__nodes[i] for i in v]
Find all the children of a node. must be a undirectional graph with no cycles :param node: :type node: Node :returns: list of nodes :rtype: Node[]
def _query_zendesk(self, endpoint, object_type, *endpoint_args, **endpoint_kwargs): _id = endpoint_kwargs.get('id', None) if _id: item = self.cache.get(object_type, _id) if item: return item else: return self._get(url=self._build_url(endpoint(*endpoint_args, **endpoint_kwargs))) elif 'ids' in endpoint_kwargs: cached_objects = [] for _id in endpoint_kwargs['ids']: obj = self.cache.get(object_type, _id) if not obj: return self._get(self._build_url(endpoint=endpoint(*endpoint_args, **endpoint_kwargs))) cached_objects.append(obj) return ZendeskResultGenerator(self, {}, response_objects=cached_objects, object_type=object_type) else: return self._get(self._build_url(endpoint=endpoint(*endpoint_args, **endpoint_kwargs)))
Query Zendesk for items. If an id or list of ids are passed, attempt to locate these items in the relevant cache. If they cannot be found, or no ids are passed, execute a call to Zendesk to retrieve the items. :param endpoint: target endpoint. :param object_type: object type we are expecting. :param endpoint_args: args for endpoint :param endpoint_kwargs: kwargs for endpoint :return: either a ResultGenerator or a Zenpy object.
def combine(self, pubkeys): assert len(pubkeys) > 0 outpub = ffi.new('secp256k1_pubkey *') for item in pubkeys: assert ffi.typeof(item) is ffi.typeof('secp256k1_pubkey *') res = lib.secp256k1_ec_pubkey_combine( self.ctx, outpub, pubkeys, len(pubkeys)) if not res: raise Exception('failed to combine public keys') self.public_key = outpub return outpub
Add a number of public keys together.
def set_bytes_at_rva(self, rva, data): if not isinstance(data, bytes): raise TypeError('data should be of type: bytes') offset = self.get_physical_by_rva(rva) if not offset: return False return self.set_bytes_at_offset(offset, data)
Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA. Return True if successful, False otherwise. It can fail if the offset is outside the file's boundaries.
def need_ext(): sys.stdout.write("{0}\next_mods\n".format(OPTIONS.delimiter)) sys.exit(EX_MOD_DEPLOY)
Signal that external modules need to be deployed.
def _parse_boolean(value): value = value.lower() if value in _true_strings: return True elif value in _false_strings: return False else: return None
Coerce value into an bool. :param str value: Value to parse. :returns: bool or None if the value is not a boolean string.
def create_swagger_json_handler(app, **kwargs): spec = get_swagger_spec(app) _add_blueprint_specs(app, spec) spec_dict = spec.swagger_definition(**kwargs) encoded_spec = json.dumps(spec_dict).encode("UTF-8") async def swagger(request): return HTTPResponse( body_bytes=encoded_spec, headers={ "Access-Control-Allow-Origin": "*" }, content_type="application/json", ) return swagger
Create a handler that returns the swagger definition for an application. This method assumes the application is using the TransmuteUrlDispatcher as the router.
def update_policy(self,defaultHeaders): if self.inputs is not None: for k,v in defaultHeaders.items(): if k not in self.inputs: self.inputs[k] = v return self.inputs else: return self.inputs
if policy in default but not input still return
async def async_fetch(url: str, **kwargs) -> Selector: kwargs.setdefault('headers', DEFAULT_HEADERS) async with aiohttp.ClientSession(**kwargs) as ses: async with ses.get(url, **kwargs) as res: html = await res.text() tree = Selector(text=html) return tree
Do the fetch in an async style. Args: url (str): The url of the site. Returns: Selector: allows you to select parts of HTML text using CSS or XPath expressions.