docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Raises error if API invocation failed. Args: raw_response: string, the http response. Raises: GitkitClientError: if the error code is 4xx. GitkitServerError: if the response if malformed. Returns: Successful response as dict.
def _CheckGitkitError(self, raw_response): try: response = simplejson.loads(raw_response) if 'error' not in response: return response else: error = response['error'] if 'code' in error: code = error['code'] if str(code).startswith('4'): raise errors.GitkitClientError(error['message']) else: raise errors.GitkitServerError(error['message']) except simplejson.JSONDecodeError: pass raise errors.GitkitServerError('null error code from Gitkit server')
696,239
Initializes from user specified dictionary. Args: dictionary: dict of user specified attributes Returns: GitkitUser object
def FromDictionary(cls, dictionary): if 'user_id' in dictionary: raise errors.GitkitClientError('use localId instead') if 'localId' not in dictionary: raise errors.GitkitClientError('must specify localId') if 'email' not in dictionary: raise errors.GitkitClientError('must specify email') return cls(decode=False, **dictionary)
696,243
Inits the Gitkit client library. Args: client_id: string, developer's Google oauth2 web client id. service_account_email: string, Google service account email. service_account_key: string, Google service account private key. widget_url: string, Gitkit widget URL. cookie_name: string, Gitkit cookie name. http: Http, http client which support cache. project_id: string, developer console's project id.
def __init__(self, client_id='', service_account_email='', service_account_key='', widget_url='', cookie_name='gtoken', http=None, project_id=''): self.client_id = client_id self.widget_url = widget_url self.cookie_name = cookie_name self.project_id = project_id self.rpc_helper = rpchelper.RpcHelper(service_account_email, service_account_key, GitkitClient.GOOGLE_API_BASE, http) self.config_data_cached = None if not self.client_id: self.client_id = self.GetClientId()
696,245
Verifies a Gitkit token string. Args: jwt: string, the token to be checked Returns: GitkitUser, if the token is valid. None otherwise.
def VerifyGitkitToken(self, jwt): certs = self.rpc_helper.GetPublicCert() crypt.MAX_TOKEN_LIFETIME_SECS = 30 * 86400 # 30 days parsed = None for aud in filter(lambda x: x is not None, [self.project_id, self.client_id]): try: parsed = crypt.verify_signed_jwt_with_certs(jwt, certs, aud) except crypt.AppIdentityError as e: if "Wrong recipient" not in e.message: return None if parsed: return GitkitUser.FromToken(parsed) return None
696,250
Gets user info by email. Args: email: string, the user email. Returns: GitkitUser, containing the user info.
def GetUserByEmail(self, email): user = self.rpc_helper.GetAccountInfoByEmail(email) return GitkitUser.FromApiResponse(user)
696,251
Gets user info by id. Args: local_id: string, the user id at Gitkit server. Returns: GitkitUser, containing the user info.
def GetUserById(self, local_id): user = self.rpc_helper.GetAccountInfoById(local_id) return GitkitUser.FromApiResponse(user)
696,252
Uploads multiple users to Gitkit server. Args: hash_algorithm: string, the hash algorithm. hash_key: array, raw key of the hash algorithm. accounts: list of GitkitUser. Returns: A dict of failed accounts. The key is the index of the 'accounts' list, starting from 0.
def UploadUsers(self, hash_algorithm, hash_key, accounts): return self.rpc_helper.UploadAccount(hash_algorithm, base64.urlsafe_b64encode(hash_key), [GitkitUser.ToRequest(i) for i in accounts])
696,253
Gets all user info from Gitkit server. Args: pagination_size: int, how many users should be returned per request. The account info are retrieved in pagination. Yields: A generator to iterate all users.
def GetAllUsers(self, pagination_size=10): next_page_token, accounts = self.rpc_helper.DownloadAccount( None, pagination_size) while accounts: for account in accounts: yield GitkitUser.FromApiResponse(account) next_page_token, accounts = self.rpc_helper.DownloadAccount( next_page_token, pagination_size)
696,254
Builds out-of-band URL. Gitkit API GetOobCode() is called and the returning code is combined with Gitkit widget URL to building the out-of-band url. Args: param: dict of request. mode: string, Gitkit widget mode to handle the oob action after user clicks the oob url in the email. Raises: GitkitClientError: if oob code is not returned. Returns: A string of oob url.
def _BuildOobLink(self, param, mode): code = self.rpc_helper.GetOobCode(param) if code: parsed = list(parse.urlparse(self.widget_url)) query = dict(parse.parse_qsl(parsed[4])) query.update({'mode': mode, 'oobCode': code}) try: parsed[4] = parse.urlencode(query) except AttributeError: parsed[4] = urllib.urlencode(query) return code, parse.urlunparse(parsed) raise errors.GitkitClientError('invalid request')
696,256
Similar to pkg_resources.resource_filename, however this works with the information cached in this registry instance, and arguments are not quite the same. Arguments: package_name The name of the package to get the artifact from artifact_name The exact name of the artifact. Returns the path of where the artifact should be if it has been declared, otherwise None.
def get_artifact_filename(self, package_name, artifact_name): project_name = self.packages.normalize(package_name) return self.records.get((project_name, artifact_name))
696,508
Yield the list of paths to the artifacts in the order of the dependency resolution Arguments: package_names The names of the packages to probe the dependency graph, to be provided as a list of strings. artifact_name The exact name of the artifact. dependencies Trace dependencies. Default is off. Returns the path of where the artifact should be if it has been declared, otherwise None.
def resolve_artifacts_by_builder_compat( self, package_names, builder_name, dependencies=False): paths = self.compat_builders.get(builder_name) if not paths: # perhaps warn, but just return return resolver = ( # traces dependencies for distribution. find_packages_requirements_dists if dependencies else # just get grabs the distribution. pkg_names_to_dists ) for distribution in resolver(package_names): path = paths.get(distribution.project_name) if path: yield path
696,509
Generic artifact builder function. Arguments: package_names List of package names to be built Returns True if the build is successful without errors, False if errors were found or if no artifacts were built.
def __call__(self, package_names): result = True registry = get(self.registry_name) for package_name in package_names: metadata = {} for entry_point, export_target in registry.iter_export_targets_for( package_name): builder = next(registry.generate_builder( entry_point, export_target), None) if not builder: # immediate failure if builder does not exist. result = False continue entries = registry.execute_builder(*builder) # whether the builder produced an artifact entry. result = bool(entries) and result metadata.update(entries) # whether the package as a whole produced artifacts entries. result = bool(metadata) and result registry.update_artifact_metadata(package_name, metadata) return result
696,520
Add an advice that will be handled later by the handle method. Arguments: name The name of the advice group f A callable method or function. The rest of the arguments will be passed as arguments and keyword arguments to f when it's invoked.
def advise(self, name, f, *a, **kw): if name is None: return advice = (f, a, kw) debug = self.get(DEBUG) frame = currentframe() if frame is None: logger.debug('currentframe() failed to return frame') else: if name in self._called: self.__advice_stack_frame_protection(frame) if debug: logger.debug( "advise '%s' invoked by %s:%d", name, frame.f_back.f_code.co_filename, frame.f_back.f_lineno, ) if debug > 1: # use the memory address of the tuple which should # be stable self._frames[id(advice)] = ''.join( format_stack(frame.f_back)) self._advices[name] = self._advices.get(name, []) self._advices[name].append(advice)
696,818
Extract a specific argument from a specific function name. Arguments: text The source text. f_name The name of the function f_argn The argument position f_argt The argument type from calmjs.parse.asttypes; default: calmjs.parse.asttypes.String
def extract_function_argument(text, f_name, f_argn, f_argt=asttypes.String): tree = parse(text) return list(filter_function_argument(tree, f_name, f_argn, f_argt))
696,915
Get all the nodes in a network. args: network_id (int): The network in which to search template_id (int): Only return nodes whose type is in this template.
def get_nodes(network_id, template_id=None, **kwargs): user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_read_permission(user_id=user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) node_qry = db.DBSession.query(Node).filter( Node.network_id==network_id, Node.status=='A').options( noload('network') ).options( joinedload_all('types.templatetype') ).options( joinedload_all('attributes.attr') ) if template_id is not None: node_qry = node_qry.filter(ResourceType.node_id==Node.id, TemplateType.id==ResourceType.type_id, TemplateType.template_id==template_id) nodes = node_qry.all() return nodes
697,371
Get all the links in a network. args: network_id (int): The network in which to search template_id (int): Only return links whose type is in this template.
def get_links(network_id, template_id=None, **kwargs): user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_read_permission(user_id=user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) link_qry = db.DBSession.query(Link).filter( Link.network_id==network_id, Link.status=='A').options( noload('network') ).options( joinedload_all('types.templatetype') ).options( joinedload_all('attributes.attr') ) if template_id is not None: link_qry = link_qry.filter(ResourceType.link_id==Link.id, TemplateType.id==ResourceType.type_id, TemplateType.template_id==template_id) links = link_qry.all() return links
697,372
Get all the resource groups in a network. args: network_id (int): The network in which to search template_id (int): Only return resource groups whose type is in this template.
def get_groups(network_id, template_id=None, **kwargs): user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_read_permission(user_id=user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) group_qry = db.DBSession.query(ResourceGroup).filter( ResourceGroup.network_id==network_id, ResourceGroup.status=='A').options( noload('network') ).options( joinedload_all('types.templatetype') ).options( joinedload_all('attributes.attr') ) if template_id is not None: group_qry = group_qry.filter(ResourceType.group_id==ResourceGroup.id, TemplateType.id==ResourceType.type_id, TemplateType.template_id==template_id) groups = group_qry.all() return groups
697,373
Login a user, returning a dict containing their user_id and session_id This does the DB login to check the credentials, and then creates a session so that requests from apps do not need to perform a login args: username (string): The user's username password(string): The user's password (unencrypted) returns: A dict containing the user_id and session_id raises: HydraError if the login fails
def login(username, password, **kwargs): user_id = util.hdb.login_user(username, password) hydra_session = session.Session({}, #This is normally a request object, but in this case is empty validate_key=config.get('COOKIES', 'VALIDATE_KEY', 'YxaDbzUUSo08b+'), type='file', cookie_expires=True, data_dir=config.get('COOKIES', 'DATA_DIR', '/tmp'), file_dir=config.get('COOKIES', 'FILE_DIR', '/tmp/auth')) hydra_session['user_id'] = user_id hydra_session['username'] = username hydra_session.save() return (user_id, hydra_session.id)
697,595
Logout a user, removing their cookie if it exists and returning 'OK' args: session_id (string): The session ID to identify the cookie to remove returns: 'OK' raises: HydraError if the logout fails
def logout(session_id, **kwargs): hydra_session_object = session.SessionObject({}, #This is normally a request object, but in this case is empty validate_key=config.get('COOKIES', 'VALIDATE_KEY', 'YxaDbzUUSo08b+'), type='file', cookie_expires=True, data_dir=config.get('COOKIES', 'DATA_DIR', '/tmp'), file_dir=config.get('COOKIES', 'FILE_DIR', '/tmp/auth')) hydra_session = hydra_session_object.get_by_id(session_id) if hydra_session is not None: hydra_session.delete() hydra_session.save() return 'OK'
697,596
Given a session ID, get the user ID that it is associated with args: session_id (string): The user's ID to identify the cookie to remove returns: user_id (string) or None if the session does not exist
def get_session_user(session_id, **kwargs): hydra_session_object = session.SessionObject({}, #This is normally a request object, but in this case is empty validate_key=config.get('COOKIES', 'VALIDATE_KEY', 'YxaDbzUUSo08b+'), type='file', cookie_expires=True, data_dir=config.get('COOKIES', 'DATA_DIR', '/tmp'), file_dir=config.get('COOKIES', 'FILE_DIR', '/tmp/auth')) hydra_session = hydra_session_object.get_by_id(session_id) if hydra_session is not None: return hydra_session['user_id'] return None
697,597
Convert a metadata json string into a dictionary. Args: user_id (int): Optional: Insert user_id into the metadata if specified source (string): Optional: Insert source (the name of the app typically) into the metadata if necessary. Returns: dict: THe metadata as a python dictionary
def get_metadata_as_dict(self, user_id=None, source=None): if self.metadata is None or self.metadata == "": return {} metadata_dict = self.metadata if isinstance(self.metadata, dict) else json.loads(self.metadata) # These should be set on all datasets by default, but we don't enforce this rigidly metadata_keys = [m.lower() for m in metadata_dict] if user_id is not None and 'user_id' not in metadata_keys: metadata_dict['user_id'] = six.text_type(user_id) if source is not None and 'source' not in metadata_keys: metadata_dict['source'] = six.text_type(source) return { k : six.text_type(v) for k, v in metadata_dict.items() }
697,680
Get all templates. Args: load_all Boolean: Returns just the template entry or the full template structure (template types and type attrs) Returns: List of Template objects
def get_templates(load_all=True, **kwargs): if load_all is False: templates = db.DBSession.query(Template).all() else: templates = db.DBSession.query(Template).options(joinedload_all('templatetypes.typeattrs')).all() return templates
697,763
Generate a new key pair. Args: secret (:class:`string`): A secret that serves as a seed Returns: A tuple of (private_key, public_key) encoded in base58.
def ed25519_generate_key_pair_from_secret(secret): # if you want to do this correctly, use a key derivation function! if not isinstance(secret, bytes): secret = secret.encode() hash_bytes = sha3.keccak_256(secret).digest() sk = Ed25519SigningKeyFromHash.generate(hash_bytes=hash_bytes) # Private key private_value_base58 = sk.encode(encoding='base58') # Public key public_value_compressed_base58 = sk.get_verifying_key().encode(encoding='base58') return private_value_base58, public_value_compressed_base58
697,834
Generates a cryptographic key pair. Args: secret (:class:`string`): A secret that serves as a seed Returns: :class:`~bigchaindb.common.crypto.CryptoKeypair`: A :obj:`collections.namedtuple` with named fields :attr:`~bigchaindb.common.crypto.CryptoKeypair.private_key` and :attr:`~bigchaindb.common.crypto.CryptoKeypair.public_key`.
def generate_key_pair(secret=None): if secret: keypair_raw = ed25519_generate_key_pair_from_secret(secret) return CryptoKeypair( *(k.decode() for k in keypair_raw)) else: return generate_keypair()
697,835
Initialize a :class:`~.Plugin` instance and connect to BigchainDB. Args: *nodes (str): One or more URLs of BigchainDB nodes to connect to as the persistence layer
def __init__(self, config=None, namespace=None): self.driver = get_database_instance(config) self.user = generate_key_pair(get_value('secret', 'SECRET', None, config)) self.namespace = get_value('db.namespace', 'DB_NAMESPACE', 'namespace' if not namespace else namespace, config) self.logger = logging.getLogger('Plugin') logging.basicConfig(level=logging.INFO)
697,942
Return a (compiled) regular expression for tokenization. Args: meta_left, meta_right: e.g. '{' and '}' - The regular expressions are memoized. - This function is public so the syntax highlighter can use it.
def MakeTokenRegex(meta_left, meta_right): key = meta_left, meta_right if key not in _token_re_cache: # - Need () grouping for re.split # - The first character must be a non-space. This allows us to ignore # literals like function() { return 1; } when # - There must be at least one (non-space) character inside {} _token_re_cache[key] = re.compile( r'(' + re.escape(meta_left) + r'\S.*?' + re.escape(meta_right) + r')') return _token_re_cache[key]
698,660
Construct a dictinary { template name -> Template() instance } Args: root_section: _Section instance -- root of the original parse tree
def _MakeGroupFromRootSection(root_section, undefined_str): group = {} for statement in root_section.Statements(): if isinstance(statement, six.string_types): continue func, args = statement # here the function acts as ID for the block type if func is _DoDef and isinstance(args, _Section): section = args # Construct a Template instance from a this _Section subtree t = Template._FromSection(section, group, undefined_str) group[section.section_name] = t return group
698,666
Execute a bunch of template statements in a ScopedContext. Args: callback: Strings are "written" to this callback function. trace: Trace object, or None This is called in a mutually recursive fashion.
def _Execute(statements, context, callback, trace): # Every time we call _Execute, increase this depth if trace: trace.exec_depth += 1 for i, statement in enumerate(statements): if isinstance(statement, six.string_types): callback(statement) else: # In the case of a substitution, args is a pair (name, formatters). # In the case of a section, it's a _Section instance. try: func, args = statement func(args, context, callback, trace) except UndefinedVariable as e: # Show context for statements start = max(0, i - 3) end = i + 3 e.near = statements[start:end] e.trace = trace # Attach caller's trace (could be None) raise
698,672
Expand a data dictionary with a template AND a style. DEPRECATED -- Remove this entire function in favor of expand(d, style=style) A style is a Template instance that factors out the common strings in several "body" templates. Args: template: Template instance for the inner "page content" style: Template instance for the outer "page style" data: Data dictionary, with a 'body' key (or body_subtree
def expand_with_style(template, style, data, body_subtree='body'): if template.has_defines: return template.expand(data, style=style) else: tokens = [] execute_with_style_LEGACY(template, style, data, tokens.append, body_subtree=body_subtree) return JoinTokens(tokens)
698,676
Get the value associated with a name in the current context. The current context could be an dictionary in a list, or a dictionary outside a list. Args: name: name to lookup, e.g. 'foo' or 'foo.bar.baz' Returns: The value, or self.undefined_str Raises: UndefinedVariable if self.undefined_str is not set
def Lookup(self, name): if name == '@': return self.stack[-1].context parts = name.split('.') value = self._LookUpStack(parts[0]) # Now do simple lookups of the rest of the parts for part in parts[1:]: try: value = value[part] except (KeyError, TypeError): # TypeError for non-dictionaries return self._Undefined(part) return value
698,701
Low level method to expand the template piece by piece. Args: data_dict: The JSON data dictionary. callback: A callback which should be called with each expanded token. group: Dictionary of name -> Template instance (for styles) Example: You can pass 'f.write' as the callback to write directly to a file handle.
def execute(self, data_dict, callback, group=None, trace=None): # First try the passed in version, then the one set by _SetTemplateGroup. May # be None. Only one of these should be set. group = group or self.group context = _ScopedContext(data_dict, self.undefined_str, group=group) _Execute(self._program.Statements(), context, callback, trace)
698,704
Emit signal to main. Args: signal: Name of the signal to be emitted. message: Message to be sent. analysis_id: Identifies the instance of this analysis.
def emit(self, signal, message, analysis_id): log.debug('kernel {} zmq send ({}): {}' ''.format(analysis_id, signal, message)) self.zmq_publish.send(json.dumps({ 'analysis_id': analysis_id, 'frame': {'signal': signal, 'load': message}, }, default=json_encoder_default).encode('utf-8'))
698,808
Reads pylintrc config into native ConfigParser object. Args: contents (str): The contents of the file containing the INI config. Returns: ConfigParser.ConfigParser: The parsed configuration.
def read_config(contents): file_obj = io.StringIO(contents) config = six.moves.configparser.ConfigParser() config.readfp(file_obj) return config
699,203
Loads the pylint.config.py file. Args: filename (str): The python file containing the local configuration. Returns: module: The loaded Python module.
def load_local_config(filename): if not filename: return imp.new_module('local_pylint_config') module = imp.load_source('local_pylint_config', filename) return module
699,204
Determines the final additions and replacements. Combines the config module with the defaults. Args: config_module: The loaded local configuration module. Returns: Config: the final configuration.
def determine_final_config(config_module): config = Config( DEFAULT_LIBRARY_RC_ADDITIONS, DEFAULT_LIBRARY_RC_REPLACEMENTS, DEFAULT_TEST_RC_ADDITIONS, DEFAULT_TEST_RC_REPLACEMENTS) for field in config._fields: if hasattr(config_module, field): config = config._replace(**{field: getattr(config_module, field)}) return config
699,205
Reads a requirements file. Args: req_file (str): Filename of requirements file
def read_requirements(req_file): items = list(parse_requirements(req_file, session={})) result = [] for item in items: # Get line number from item line_number = item.comes_from.split(req_file + ' (line ')[1][:-1] if item.req: item.req.marker = item.markers result.append((item.req, line_number)) else: result.append((item, line_number)) return result
699,385
Return list of outdated requirements. Args: req_file (str): Filename of requirements file skip_packages (list): List of package names to ignore.
def check_requirements_file(req_file, skip_packages): reqs = read_requirements(req_file) if skip_packages is not None: reqs = [req for req in reqs if req.name not in skip_packages] outdated_reqs = filter(None, [check_req(req) for req in reqs]) return outdated_reqs
699,392
Run the given statement a number of times and return the runtime stats Args: fail-if: An expression that causes cr8 to exit with a failure if it evaluates to true. The expression can contain formatting expressions for: - runtime_stats - statement - meta - concurrency - bulk_size For example: --fail-if "{runtime_stats.mean} > 1.34"
def timeit(hosts=None, stmt=None, warmup=30, repeat=None, duration=None, concurrency=1, output_fmt=None, fail_if=None, sample_mode='reservoir'): num_lines = 0 log = Logger(output_fmt) with Runner(hosts, concurrency, sample_mode) as runner: version_info = aio.run(runner.client.get_server_version) for line in as_statements(lines_from_stdin(stmt)): runner.warmup(line, warmup) timed_stats = runner.run(line, iterations=repeat, duration=duration) r = Result( version_info=version_info, statement=line, timed_stats=timed_stats, concurrency=concurrency ) log.result(r) if fail_if: eval_fail_if(fail_if, r) num_lines += 1 if num_lines == 0: raise SystemExit( 'No SQL statements provided. Use --stmt or provide statements via stdin')
700,089
Adaptively determines image background. Args: image: image converted 1-channel image. mask: 1-channel mask, same size as image. blocksize: adaptive algorithm parameter. Returns: image of same size as input with foreground inpainted with background.
def _calc_block_mean_variance(image, mask, blocksize): I = image.copy() I_f = I.astype(np.float32) / 255. # Used for mean and std. result = np.zeros( (image.shape[0] / blocksize, image.shape[1] / blocksize), dtype=np.float32) for i in xrange(0, image.shape[0] - blocksize, blocksize): for j in xrange(0, image.shape[1] - blocksize, blocksize): patch = I_f[i:i+blocksize+1, j:j+blocksize+1] mask_patch = mask[i:i+blocksize+1, j:j+blocksize+1] tmp1 = np.zeros((blocksize, blocksize)) tmp2 = np.zeros((blocksize, blocksize)) mean, std_dev = cv2.meanStdDev(patch, tmp1, tmp2, mask_patch) value = 0 if std_dev[0][0] > MEAN_VARIANCE_THRESHOLD: value = mean[0][0] result[i/blocksize, j/blocksize] = value small_image = cv2.resize(I, (image.shape[1] / blocksize, image.shape[0] / blocksize)) res, inpaintmask = cv2.threshold(result, 0.02, 1, cv2.THRESH_BINARY) inpainted = cv2.inpaint(small_image, inpaintmask.astype(np.uint8), 5, cv2.INPAINT_TELEA) res = cv2.resize(inpainted, (image.shape[1], image.shape[0])) return res
700,128
Applies adaptive thresholding to the given image. Args: image: BGRA image. block_size: optional int block_size to use for adaptive thresholding. mask: optional mask. Returns: Thresholded image.
def threshold(image, block_size=DEFAULT_BLOCKSIZE, mask=None): if mask is None: mask = np.zeros(image.shape[:2], dtype=np.uint8) mask[:] = 255 if len(image.shape) > 2 and image.shape[2] == 4: image = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY) res = _calc_block_mean_variance(image, mask, block_size) res = image.astype(np.float32) - res.astype(np.float32) + 255 _, res = cv2.threshold(res, 215, 255, cv2.THRESH_BINARY) return res
700,129
Generate an insert statement using the given table and dictionary. Args: table (str): table name d (dict): dictionary with column names as keys and values as values. Returns: tuple of statement and arguments >>> to_insert('doc.foobar', {'name': 'Marvin'}) ('insert into doc.foobar ("name") values (?)', ['Marvin'])
def to_insert(table, d): columns = [] args = [] for key, val in d.items(): columns.append('"{}"'.format(key)) args.append(val) stmt = 'insert into {table} ({columns}) values ({params})'.format( table=table, columns=', '.join(columns), params=', '.join(['?'] * len(columns))) return (stmt, args)
700,132
Insert JSON lines fed into stdin into a Crate cluster. If no hosts are specified the statements will be printed. Args: table: Target table name. bulk_size: Bulk size of the insert statements. concurrency: Number of operations to run concurrently. hosts: hostname:port pairs of the Crate nodes
def insert_json(table=None, bulk_size=1000, concurrency=25, hosts=None, output_fmt=None): if not hosts: return print_only(table) queries = (to_insert(table, d) for d in dicts_from_stdin()) bulk_queries = as_bulk_queries(queries, bulk_size) print('Executing inserts: bulk_size={} concurrency={}'.format( bulk_size, concurrency), file=sys.stderr) stats = Stats() with clients.client(hosts, concurrency=concurrency) as client: f = partial(aio.measure, stats, client.execute_many) try: aio.run_many(f, bulk_queries, concurrency) except clients.SqlException as e: raise SystemExit(str(e)) try: print(format_stats(stats.get(), output_fmt)) except KeyError: if not stats.sampler.values: raise SystemExit('No data received via stdin') raise
700,134
Picks dominant angle of a set of lines. Args: lines: iterable of (x1, y1, x2, y2) tuples that define lines. domination_type: either MEDIAN or MEAN. Returns: Dominant angle value in radians. Raises: ValueError: on unknown domination_type.
def _get_dominant_angle(lines, domination_type=MEDIAN): if domination_type == MEDIAN: return _get_median_angle(lines) elif domination_type == MEAN: return _get_mean_angle(lines) else: raise ValueError('Unknown domination type provided: %s' % ( domination_type))
700,144
Finds an angle that matches the given one modulo step. Increments and decrements the given value with a given step. Args: range: a 2-tuple of min and max target values. step: tuning step. Returns: Normalized value within a given range.
def _normalize_angle(angle, range, step): while angle <= range[0]: angle += step while angle >= range[1]: angle -= step return angle
700,145
Returns a dict of collectors. Args: limit (int): number of collectors to return offset (int): the offset of where the list of collectors should begin from
def get_collectors(self, limit=1000, offset=0): options = { 'limit': limit, 'offset': offset, } request = requests.get(self.url, params=options, auth=self.auth) try: results = request.json()['collectors'] except KeyError: results = request.json() except json.decoder.JSONDecodeError: results = [] return results
700,148
Returns a dict of collector's details if found. Args: name (str): name of collector searching for
def find(self, name): collectors = self.get_collectors() for collector in collectors: if name.lower() == collector['name'].lower(): self.collector_id = collector['id'] return collector return {'status': 'No results found.'}
700,149
Delete a collector from inventory. Args: collector_id (int): id of collector (optional)
def delete(self, collector_id=None): cid = self.collector_id if collector_id: cid = collector_id # param to delete id url = '{0}/{1}'.format(self.url, cid) request = requests.delete(url, auth=self.auth) try: # unable to delete collector response = request.json() except ValueError: # returns when collector is deleted # apparently, the request does not return # a json response response = { u'message': u'The request completed successfully.', u'status': 200, } return response
700,150
Return a dict of collector. Args: collector_id (int): id of collector (optional)
def info(self, collector_id): cid = self.collector_id if collector_id: cid = collector_id url = '{0}/{1}'.format(self.url, cid) request = requests.get(url, auth=self.auth) return request.json()
700,151
Search the logs. Args: auth (Client): Authentication object api (str): Api endpath
def __init__(self, auth, api='/logs/search', **kwargs): self.api = api self.log = auth.log try: self.url = '%s%s' % (auth.get_url(), self.api) except AttributeError: self.url = 'https://api.sumologic.com/api/v1%s' % self.api try: self.auth = auth.get_auth() except AttributeError: self.auth = auth
700,364
Initializes a Sheet instance. Args: orig_image: cv.Mat instance with the original sheet image. dpi: optional (x resolution, y resolution) tuple or None. If set to None, will try to guess dpi. save_image: A callback to save debug images with args (name, img)
def __init__(self, orig_image, dpi, save_image): self._shreds = None self.orig_img = orig_image self.save_image = save_image self._fg_mask = None self._shreds = None if dpi is None: self.res_x, self.res_y = self._guess_dpi() else: self.res_x, self.res_y = dpi
700,526
Detects shreds in the current sheet and constructs Shred instances. Caches the results for further invocations. Args: feature_extractors: iterable of AbstractShredFeature instances to use for shreds feature assignment. sheet_name: string, included in shred attributes. Returns: list of Shred instances.
def get_shreds(self, feature_extractors, sheet_name): if self._shreds is None: shreds = [] _, contours, _ = cv2.findContours(self._foreground_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i, contour in enumerate(contours): shred = self._make_shred(contour, i, feature_extractors, sheet_name) if shred is not None: shreds.append(shred) self._shreds = shreds return self._shreds
700,527
Creates a Shred instances from a given contour. Args: c: cv2 contour object. name: string shred name within a sheet. feature_extractors: iterable of AbstractShredFeature instances. Returns: A new Shred instance or None on failure.
def _make_shred(self, c, name, feature_extractors, sheet_name): height, width, channels = self.orig_img.shape # bounding rect of currrent contour r_x, r_y, r_w, r_h = cv2.boundingRect(c) # Generating simplified contour to use it in html epsilon = 0.01 * cv2.arcLength(c, True) simplified_contour = cv2.approxPolyDP(c, epsilon, True) # filter out too small fragments if self.px_to_mm(r_w) <= 3 or self.px_to_mm(r_h) <= 3: print("Skipping piece #%s as too small (%spx x %s px)" % ( name, r_w, r_h)) return None if self.px_to_mm(r_w) >= 100 and self.px_to_mm(r_h) >= 100: print("Skipping piece #%s as too big (%spx x %s px)" % ( name, r_w, r_h)) return None # position of rect of min area. # this will provide us angle to straighten image box_center, bbox, angle = cv2.minAreaRect(c) # We want our pieces to be "vertical" if bbox[0] > bbox[1]: angle += 90 bbox = (bbox[1], bbox[0]) if bbox[1] / float(bbox[0]) > 70: print("Skipping piece #%s as too too long and narrow" % name) return None # Coords of region of interest using which we should crop piece after # rotation y1 = math.floor(box_center[1] - bbox[1] / 2) x1 = math.floor(box_center[0] - bbox[0] / 2) bbox = tuple(map(int, map(math.ceil, bbox))) # A mask we use to show only piece we are currently working on piece_mask = np.zeros([height, width, 1], dtype=np.uint8) cv2.drawContours(piece_mask, [c], -1, 255, cv2.FILLED) # apply mask to original image img_crp = self.orig_img[r_y:r_y + r_h, r_x:r_x + r_w] piece_in_context = self.save_image( "pieces/%s_ctx" % name, self.orig_img[max(r_y - 10, 0):r_y + r_h + 10, max(r_x - 10, 0):r_x + r_w + 10]) mask = piece_mask[r_y:r_y + r_h, r_x:r_x + r_w] img_roi = cv2.bitwise_and(img_crp, img_crp, mask=mask) # Add alpha layer and set it to the mask img_roi = cv2.cvtColor(img_roi, cv2.COLOR_BGR2BGRA) img_roi[:, :, 3] = mask[:, :, 0] # Straighten it # Because we crop original image before rotation we save us some memory # and a lot of time but we need to adjust coords of the center of # new min area rect M = cv2.getRotationMatrix2D((box_center[0] - r_x, box_center[1] - r_y), angle, 1) # And translate an image a bit to make it fit to the bbox again. # This is done with direct editing of the transform matrix. # (Wooohoo, I know matrix-fu) M[0][2] += r_x - x1 M[1][2] += r_y - y1 # Apply rotation/transform/crop img_roi = cv2.warpAffine(img_roi, M, bbox) piece_fname = self.save_image("pieces/%s" % name, img_roi, "png") # FEATURES MAGIC BELOW # # Get our mask/contour back after the trasnform _, _, _, mask = cv2.split(img_roi) _, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if len(contours) != 1: print("Piece #%s has strange contours after transform" % name) cnt = contours[0] features_fname = self.save_image("pieces/%s_mask" % name, mask, "png") base_features = { # On_sheet_* features describe the min counding box on the sheet. "on_sheet_x": r_x, "on_sheet_y": r_y, "on_sheet_width": r_w, "on_sheet_height": r_h, "on_sheet_angle": angle, "width": img_roi.shape[1], "height": img_roi.shape[0], } tags_suggestions = [] for feat in feature_extractors: fts, tags = feat.get_info(img_roi, cnt, name) base_features.update(fts) tags_suggestions += tags if tags_suggestions: print(name, tags_suggestions) return Shred( contour=c, features=base_features, features_fname=features_fname, img_roi=img_roi, name=name, piece_fname=piece_fname, piece_in_context_fname=piece_in_context, sheet=sheet_name, simplified_contour=simplified_contour, tags_suggestions=tags_suggestions, )
700,532
Initializes Client object. Args: auth (tuple): Authentication object api (str): Api endpath
def __init__(self, auth, **kwargs): self.auth = auth self.protocol = kwargs.get('protocol', 'https') self.domain = kwargs.get('domain', 'api.sumologic.com') self.api = kwargs.get('api', '/api/v1') api_path = '%s' % self.api self.url = '%s://%s%s' % (self.protocol, self.domain, api_path) # setup debug logging self._debug_mode = kwargs.get('debug', False) self.log = logging.getLogger(__name__) self.log.addHandler(logging.StreamHandler()) self.log.setLevel(get_logging_level(self._debug_mode))
700,621
Calculate the centre of mass of an image along each axes (x,y,z), separately. Arguments: imdct - the image dictionary with the image and header data. Output the list of the centre of mass for each axis.
def centre_mass_img(imdct): #> initialise centre of mass array com = np.zeros(3, dtype=np.float32) #> total image sum imsum = np.sum(imdct['im']) for ind_ax in [-1, -2, -3]: #> list of axes axs = range(imdct['im'].ndim) del axs[ind_ax] #> indexed centre of mass icom = np.sum( np.sum(imdct['im'], axis=tuple(axs)) \ * np.arange(imdct['im'].shape[ind_ax]))/imsum #> centre of mass in mm com[abs(ind_ax)-1] = icom * imdct['hdr']['pixdim'][abs(ind_ax)] return com
700,754
Anonymise DICOM file(s) Arguments: > dcmpth: it can be passed as a single DICOM file, or a folder containing DICOM files, or a list of DICOM file paths. > patient: the name of the patient. > physician:the name of the referring physician. > dob: patient's date of birth. > verbose: display processing output.
def dcmanonym( dcmpth, displayonly=False, patient='anonymised', physician='anonymised', dob='19800101', verbose=True): #> check if a single DICOM file if isinstance(dcmpth, basestring) and os.path.isfile(dcmpth): dcmlst = [dcmpth] if verbose: print 'i> recognised the input argument as a single DICOM file.' #> check if a folder containing DICOM files elif isinstance(dcmpth, basestring) and os.path.isdir(dcmpth): dircontent = os.listdir(dcmpth) #> create a list of DICOM files inside the folder dcmlst = [os.path.join(dcmpth,d) for d in dircontent if os.path.isfile(os.path.join(dcmpth,d)) and d.endswith(dcmext)] if verbose: print 'i> recognised the input argument as the folder containing DICOM files.' #> check if a folder containing DICOM files elif isinstance(dcmpth, list): if not all([os.path.isfile(d) and d.endswith(dcmext) for d in dcmpth]): raise IOError('Not all files in the list are DICOM files.') dcmlst = dcmpth if verbose: print 'i> recognised the input argument as the list of DICOM file paths.' #> check if dictionary of data input <datain> elif isinstance(dcmpth, dict) and 'corepath' in dcmpth: dcmlst = list_dcm_datain(dcmpth) if verbose: print 'i> recognised the input argument as the dictionary of scanner data.' else: raise IOError('Unrecognised input!') for dcmf in dcmlst: #> read the file dhdr = dcm.dcmread(dcmf) #> get the basic info about the DICOM file dcmtype = dcminfo(dhdr, verbose=False) if verbose: print '-------------------------------' print 'i> the DICOM file is for:', dcmtype #> anonymise mMR data. if 'mmr' in dcmtype: if [0x029, 0x1120] in dhdr and dhdr[0x029, 0x1120].name=='[CSA Series Header Info]': csafield = dhdr[0x029, 0x1120] csa = csafield.value elif [0x029, 0x1020] in dhdr and dhdr[0x029, 0x1020].name=='[CSA Series Header Info]': csafield = dhdr[0x029, 0x1020] csa = csafield.value else: csa = '' # string length considered for replacement strlen = 200 idx = [m.start() for m in re.finditer(r'([Pp]atients{0,1}[Nn]ame)', csa)] if idx and verbose: print ' > found sensitive information deep in DICOM headers:', dcmtype #> run the anonymisation iupdate = 0 for i in idx: ci = i - iupdate if displayonly: print ' > sensitive info:' print ' ', csa[ci:ci+strlen] continue rplcmnt = re.sub( r'(\{\s*\"{1,2}\W*\w+\W*\w+\W*\"{1,2}\s*\})', '{ ""' +patient+ '"" }', csa[ci:ci+strlen] ) #> update string csa = csa[:ci] + rplcmnt + csa[ci+strlen:] print ' > removed sensitive information.' #> correct for the number of removed letters iupdate = strlen-len(rplcmnt) #> update DICOM if not displayonly and csa!='': csafield.value = csa #> Patient's name if [0x010,0x010] in dhdr: if displayonly: print ' > sensitive info:', dhdr[0x010,0x010].name print ' ', dhdr[0x010,0x010].value else: dhdr[0x010,0x010].value = patient if verbose: print ' > anonymised patients name' #> date of birth if [0x010,0x030] in dhdr: if displayonly: print ' > sensitive info:', dhdr[0x010,0x030].name print ' ', dhdr[0x010,0x030].value else: dhdr[0x010,0x030].value = dob if verbose: print ' > anonymised date of birh' #> physician's name if [0x008, 0x090] in dhdr: if displayonly: print ' > sensitive info:', dhdr[0x008,0x090].name print ' ', dhdr[0x008,0x090].value else: dhdr[0x008,0x090].value = physician if verbose: print ' > anonymised physician name' dhdr.save_as(dcmf)
700,776
Lookup the keyword arguments needed for a given path name Parameters: name (str): The name of the path Returns: A list of keywords needed for filepath generation
def lookup_keys(self, name): assert name, 'Must specify a path name' assert name in self.templates.keys(), '{0} must be defined in the path templates'.format(name) # find all words inside brackets keys = list(set(re.findall(r'{(.*?)}', self.templates[name]))) # lookup any keys referenced inside special functions skeys = self._check_special_kwargs(name) keys.extend(skeys) # remove any duplicates keys = list(set(keys)) # remove the type : descriptor keys = [k.split(':')[0] for k in keys] return keys
701,017
check special functions for kwargs Checks the content of the special functions (%methodname) for any keyword arguments referenced within Parameters: name (str): A path key name Returns: A list of keyword arguments found in any special functions
def _check_special_kwargs(self, name): keys = [] # find any %method names in the template string functions = re.findall(r"\%\w+", self.templates[name]) if not functions: return keys # loop over special method names and extract keywords for function in functions: method = getattr(self, function[1:]) # get source code of special method source = self._find_source(method) fkeys = re.findall(r'kwargs\[(.*?)\]', source) if fkeys: # evaluate to proper string fkeys = [ast.literal_eval(k) for k in fkeys] keys.extend(fkeys) return keys
701,018
Sends an RPC and returns the response. Args: request_path: The path to send the request to, eg /api/appversion/create. payload: The body of the request, or None to send an empty request. content_type: The Content-Type header to use. timeout: timeout in seconds; default None i.e. no timeout. (Note: for large requests on OS X, the timeout doesn't work right.) kwargs: Any keyword arguments are converted into query string parameters. Returns: The response body, as a string.
def MySend1(request_path, payload=None, content_type="application/octet-stream", timeout=None, force_auth=True, **kwargs): # TODO: Don't require authentication. Let the server say # whether it is necessary. global rpc if rpc == None: rpc = GetRpcServer(upload_options) self = rpc if not self.authenticated and force_auth: self._Authenticate() if request_path is None: return if timeout is None: timeout = 30 # seconds old_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) try: tries = 0 while True: tries += 1 args = dict(kwargs) url = "https://%s%s" % (self.host, request_path) if args: url += "?" + urllib.urlencode(args) req = self._CreateRequest(url=url, data=payload) req.add_header("Content-Type", content_type) try: f = self.opener.open(req) response = f.read() f.close() # Translate \r\n into \n, because Rietveld doesn't. response = response.replace('\r\n', '\n') # who knows what urllib will give us if type(response) == unicode: response = response.encode("utf-8") typecheck(response, str) return response except urllib2.HTTPError, e: if tries > 3: raise elif e.code == 401: self._Authenticate() elif e.code == 302: loc = e.info()["location"] if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0: return '' self._Authenticate() else: raise finally: socket.setdefaulttimeout(old_timeout)
701,119
Encode form fields for multipart/form-data. Args: fields: A sequence of (name, value) elements for regular form fields. files: A sequence of (name, filename, value) elements for data to be uploaded as files. Returns: (content_type, body) ready for httplib.HTTP instance. Source: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
def EncodeMultipartFormData(fields, files): BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-' CRLF = '\r\n' lines = [] for (key, value) in fields: typecheck(key, str) typecheck(value, str) lines.append('--' + BOUNDARY) lines.append('Content-Disposition: form-data; name="%s"' % key) lines.append('') lines.append(value) for (key, filename, value) in files: typecheck(key, str) typecheck(filename, str) typecheck(value, str) lines.append('--' + BOUNDARY) lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)) lines.append('Content-Type: %s' % GetContentType(filename)) lines.append('') lines.append(value) lines.append('--' + BOUNDARY + '--') lines.append('') body = CRLF.join(lines) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body
701,126
Executes a command and returns the output from stdout and the return code. Args: command: Command to execute. print_output: If True, the output is printed to stdout. If False, both stdout and stderr are ignored. universal_newlines: Use universal_newlines flag (default: True). Returns: Tuple (output, return code)
def RunShellWithReturnCode(command, print_output=False, universal_newlines=True, env=os.environ): logging.info("Running %s", command) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=use_shell, universal_newlines=universal_newlines, env=env) if print_output: output_array = [] while True: line = p.stdout.readline() if not line: break print line.strip("\n") output_array.append(line) output = "".join(output_array) else: output = p.stdout.read() p.wait() errout = p.stderr.read() if print_output and errout: print >>sys.stderr, errout p.stdout.close() p.stderr.close() return output, p.returncode
701,127
Splits a patch into separate pieces for each file. Args: data: A string containing the output of svn diff. Returns: A list of 2-tuple (filename, text) where text is the svn diff output pertaining to filename.
def SplitPatch(data): patches = [] filename = None diff = [] for line in data.splitlines(True): new_filename = None if line.startswith('Index:'): unused, new_filename = line.split(':', 1) new_filename = new_filename.strip() elif line.startswith('Property changes on:'): unused, temp_filename = line.split(':', 1) # When a file is modified, paths use '/' between directories, however # when a property is modified '\' is used on Windows. Make them the same # otherwise the file shows up twice. temp_filename = to_slash(temp_filename.strip()) if temp_filename != filename: # File has property changes but no modifications, create a new diff. new_filename = temp_filename if new_filename: if filename and diff: patches.append((filename, ''.join(diff))) filename = new_filename diff = [line] continue if diff is not None: diff.append(line) if filename and diff: patches.append((filename, ''.join(diff))) return patches
701,129
Uses ClientLogin to authenticate the user, returning an auth token. Args: email: The user's email address password: The user's password Raises: ClientLoginError: If there was an error authenticating with ClientLogin. HTTPError: If there was some other form of HTTP error. Returns: The authentication token returned by ClientLogin.
def _GetAuthToken(self, email, password): account_type = "GOOGLE" if self.host.endswith(".google.com") and not force_google_account: # Needed for use inside Google. account_type = "HOSTED" req = self._CreateRequest( url="https://www.google.com/accounts/ClientLogin", data=urllib.urlencode({ "Email": email, "Passwd": password, "service": "ah", "source": "rietveld-codereview-upload", "accountType": account_type, }), ) try: response = self.opener.open(req) response_body = response.read() response_dict = dict(x.split("=") for x in response_body.split("\n") if x) return response_dict["Auth"] except urllib2.HTTPError, e: if e.code == 403: body = e.read() response_dict = dict(x.split("=", 1) for x in body.split("\n") if x) raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict) else: raise
701,152
Fetches authentication cookies for an authentication token. Args: auth_token: The authentication token returned by ClientLogin. Raises: HTTPError: If there was an error fetching the authentication cookies.
def _GetAuthCookie(self, auth_token): # This is a dummy value to allow us to identify when we're successful. continue_location = "http://localhost/" args = {"continue": continue_location, "auth": auth_token} req = self._CreateRequest("https://%s/_ah/login?%s" % (self.host, urllib.urlencode(args))) try: response = self.opener.open(req) except urllib2.HTTPError, e: response = e if (response.code != 302 or response.info()["location"] != continue_location): raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp) self.authenticated = True
701,153
Converts string to Unicode code point ('263A' => 0x263a). Args: s: string to convert Returns: Unicode code point Raises: InputError: the string is not a valid Unicode value.
def _UInt(s): try: v = int(s, 16) except ValueError: v = -1 if len(s) < 4 or len(s) > 6 or v < 0 or v > _RUNE_MAX: raise InputError("invalid Unicode value %s" % (s,)) return v
701,170
Converts string to Unicode range. '0001..0003' => [1, 2, 3]. '0001' => [1]. Args: s: string to convert Returns: Unicode range Raises: InputError: the string is not a valid Unicode range.
def _URange(s): a = s.split("..") if len(a) == 1: return [_UInt(a[0])] if len(a) == 2: lo = _UInt(a[0]) hi = _UInt(a[1]) if lo < hi: return range(lo, hi + 1) raise InputError("invalid Unicode range %s" % (s,))
701,171
Returns list of Unicode code groups equivalent under case folding. Each group is a sorted list of code points, and the list of groups is sorted by first code point in the group. Args: unicode_dir: Unicode data directory Returns: list of Unicode code groups
def CaseGroups(unicode_dir=_UNICODE_DIR): # Dict mapping lowercase code point to fold-equivalent group. togroup = {} def DoLine(codes, fields): (_, foldtype, lower, _) = fields if foldtype not in ("C", "S"): return lower = _UInt(lower) togroup.setdefault(lower, [lower]).extend(codes) ReadUnicodeTable(unicode_dir+"/CaseFolding.txt", 4, DoLine) groups = togroup.values() for g in groups: g.sort() groups.sort() return togroup, groups
701,174
Returns dict mapping script names to code lists. Args: unicode_dir: Unicode data directory Returns: dict mapping script names to code lists
def Scripts(unicode_dir=_UNICODE_DIR): scripts = {} def DoLine(codes, fields): (_, name) = fields scripts.setdefault(name, []).extend(codes) ReadUnicodeTable(unicode_dir+"/Scripts.txt", 2, DoLine) return scripts
701,175
Returns dict mapping category names to code lists. Args: unicode_dir: Unicode data directory Returns: dict mapping category names to code lists
def Categories(unicode_dir=_UNICODE_DIR): categories = {} def DoLine(codes, fields): category = fields[2] categories.setdefault(category, []).extend(codes) # Add codes from Lu into L, etc. if len(category) > 1: short = category[0] categories.setdefault(short, []).extend(codes) ReadUnicodeTable(unicode_dir+"/UnicodeData.txt", 15, DoLine) return categories
701,176
Creates a list-table directive for a set of defined environment variables Parameters: name (str): The name of the config section command (object): The sdss_access path instance templates (dict): A dictionary of the path templates Yields: A string rst-formated list-table directive
def _format_templates(name, command, templates): yield '.. list-table:: {0}'.format(name) yield _indent(':widths: 20 50 70') yield _indent(':header-rows: 1') yield '' yield _indent('* - Name') yield _indent(' - Template') yield _indent(' - Kwargs') for key, var in templates.items(): kwargs = command.lookup_keys(key) yield _indent('* - {0}'.format(key)) yield _indent(' - {0}'.format(var)) yield _indent(' - {0}'.format(', '.join(kwargs))) yield ''
701,178
Generate the relevant Sphinx nodes. Generates a section for the Tree datamodel. Formats a tree section as a list-table directive. Parameters: name (str): The name of the config to be documented, e.g. 'sdsswork' command (object): The loaded module templates (bool): If True, generate a section for the path templates Returns: A section docutil node
def _generate_nodes(self, name, command, templates=None): # the source name source_name = name # Title section = nodes.section( '', nodes.title(text=name), ids=[nodes.make_id(name)], names=[nodes.fully_normalize_name(name)]) # Summarize result = statemachine.ViewList() if templates: lines = _format_templates(name, command, command.templates) for line in lines: result.append(line, source_name) self.state.nested_parse(result, 0, section) return [section]
701,179
Given a list of segment names, return an array of vertex indices for all the vertices in those faces. Args: segments: a list of segment names, ret_face_indices: if it is `True`, returns face indices
def vertex_indices_in_segments(self, segments, ret_face_indices=False): import numpy as np import warnings face_indices = np.array([]) vertex_indices = np.array([]) if self.segm is not None: try: segments = [self.segm[name] for name in segments] except KeyError as e: raise ValueError('Unknown segments {}. Consier using Mesh.clean_segments on segments'.format(e.args[0])) face_indices = np.unique(np.concatenate(segments)) vertex_indices = np.unique(np.ravel(self.f[face_indices])) else: warnings.warn('self.segm is None, will return empty array') if ret_face_indices: return vertex_indices, face_indices else: return vertex_indices
701,458
Remove the faces and vertices for given segments, keeping all others. Args: segments_to_remove: a list of segnments whose vertices will be removed
def remove_segments(self, segments_to_remove): v_ind = self.vertex_indices_in_segments(segments_to_remove) self.segm = {name: faces for name, faces in self.segm.iteritems() if name not in segments_to_remove} self.remove_vertices(v_ind)
701,460
Returns a downsampled copy of this mesh. Args: step: the step size for the sampling Returns: a new, downsampled Mesh object. Raises: ValueError if this Mesh has faces.
def downsampled_mesh(self, step): from lace.mesh import Mesh if self.f is not None: raise ValueError( 'Function `downsampled_mesh` does not support faces.') low = Mesh() if self.v is not None: low.v = self.v[::step] if self.vc is not None: low.vc = self.vc[::step] return low
701,467
Returns a matrix M, which if multiplied by vertices, gives back edges (so "e = M.dot(v)"). Note that this generates one edge per edge, *not* two edges per triangle. Args: want_xyz: if true, takes and returns xyz coordinates, otherwise takes and returns x *or* y *or* z coordinates
def get_vertices_to_edges_matrix(self, want_xyz=True): import numpy as np import scipy.sparse as sp vpe = np.asarray(self.vertices_per_edge, dtype=np.int32) IS = np.repeat(np.arange(len(vpe)), 2) JS = vpe.flatten() data = np.ones_like(vpe) data[:, 1] = -1 data = data.flatten() if want_xyz: IS = np.concatenate((IS*3, IS*3+1, IS*3+2)) JS = np.concatenate((JS*3, JS*3+1, JS*3+2)) data = np.concatenate((data, data, data)) ij = np.vstack((IS.flatten(), JS.flatten())) return sp.csc_matrix((data, ij))
701,482
Allows subplot-style inspection of primitives in multiple subwindows. Args: shape: a tuple indicating the number of vertical and horizontal windows requested Returns: a list of lists of MeshViewer objects: one per window requested.
def MeshViewers( shape=(1, 1), titlebar="Mesh Viewers", keepalive=False, window_width=1280, window_height=960 ): if not test_for_opengl(): return Dummy() mv = MeshViewerLocal( shape=shape, titlebar=titlebar, uid=None, keepalive=keepalive, window_width=window_width, window_height=window_height ) return mv.get_subwindows()
701,567
Returns first and last day of a quarter Args: quarter (str) quarter, in format '2015Q1' Returns: (tuple) datetime.dates for the first and last days of the quarter
def quarter_boundaries(quarter): year, quarter = quarter.split('Q') year = int(year) quarter = int(quarter) first_month_of_quarter = 3 * quarter - 2 last_month_of_quarter = 3 * quarter first_day = date(year, first_month_of_quarter, 1) last_day = date(year, last_month_of_quarter, monthrange(year, last_month_of_quarter)[1]) return first_day, last_day
701,968
Returns metta metadata for a quarter's SOC code classifier matrix Args: quarter (str) quarter, in format '2015Q1' num_dimensions (int) Number of features in matrix Returns: (dict) metadata suitable for metta.archive_train_test
def metta_config(quarter, num_dimensions): first_day, last_day = quarter_boundaries(quarter) return { 'start_time': first_day, 'end_time': last_day, 'prediction_window': 3, # ??? 'label_name': 'onet_soc_code', 'label_type': 'categorical', 'matrix_id': 'job_postings_{}'.format(quarter), 'feature_names': ['doc2vec_{}'.format(i) for i in range(num_dimensions)], }
701,969
Store train and test matrices using metta Args: train_features_path (str) Path to matrix with train features train_labels_path (str) Path to matrix with train labels test_features_path (str) Path to matrix with test features test_labels_path (str) Path to matrix with test labels train_quarter (str) Quarter of train matrix test_quarter (str) Quarter of test matrix num_dimensions (int) Number of features
def upload_to_metta(train_features_path, train_labels_path, test_features_path, test_labels_path, train_quarter, test_quarter, num_dimensions): train_config = metta_config(train_quarter, num_dimensions) test_config = metta_config(test_quarter, num_dimensions) X_train = pd.read_csv(train_features_path, sep=',') X_train.columns = ['doc2vec_'+str(i) for i in range(X_train.shape[1])] #X_train['label'] = pd.Series([randint(0,23) for i in range(len(X_train))]) Y_train = pd.read_csv(train_labels_path) Y_train.columns = ['onet_soc_code'] train = pd.concat([X_train, Y_train], axis=1) X_test = pd.read_csv(test_features_path, sep=',') X_test.columns = ['doc2vec_'+str(i) for i in range(X_test.shape[1])] #X_test['label'] = pd.Series([randint(0,23) for i in range(len(X_test))]) Y_test = pd.read_csv(test_labels_path) Y_test.columns = ['onet_soc_code'] test = pd.concat([X_test, Y_test], axis=1) #print(train.head()) #print(train.shape) #print(test.head()) #print(test.shape) metta.archive_train_test( train_config, X_train, test_config, X_test, directory='wdi' )
701,970
Uploads the given file to s3 Args: s3_conn: (boto.s3.connection) an s3 connection filepath (str) the local filename s3_path (str) the destination path on s3
def upload(s3_conn, filepath, s3_path): bucket_name, prefix = split_s3_path(s3_path) bucket = s3_conn.get_bucket(bucket_name) filename = os.path.basename(filepath) key = boto.s3.key.Key( bucket=bucket, name='{}/{}'.format(prefix, filename) ) logging.info('uploading from %s to %s', filepath, key) key.set_contents_from_filename(filepath)
702,029
Syncs a dictionary to an S3 bucket, serializing each value in the dictionary as a JSON file with the key as its name. Args: s3_conn: (boto.s3.connection) an s3 connection s3_prefix: (str) the destination prefix data_to_sync: (dict)
def upload_dict(s3_conn, s3_prefix, data_to_sync): bucket_name, prefix = split_s3_path(s3_prefix) bucket = s3_conn.get_bucket(bucket_name) for key, value in data_to_sync.items(): full_name = '{}/{}.json'.format(prefix, key) s3_key = boto.s3.key.Key( bucket=bucket, name=full_name ) logging.info('uploading key %s', full_name) s3_key.set_contents_from_string(json.dumps(value))
702,030
Downloads the given s3_path Args: s3_conn (boto.s3.connection) a boto s3 connection out_filename (str) local filename to save the file s3_path (str) the source path on s3
def download(s3_conn, out_filename, s3_path): bucket_name, prefix = split_s3_path(s3_path) bucket = s3_conn.get_bucket(bucket_name) key = boto.s3.key.Key( bucket=bucket, name=prefix ) logging.info('loading from %s into %s', key, out_filename) key.get_contents_to_filename(out_filename, cb=log_download_progress)
702,031
Call the function I wrap. Args: *arg: The arguments passed to me by the observed object. **kw: The keyword args passed to me by the observed object. observed_obj: The observed object which called me. Returns: Whatever the function I wrap returns.
def __call__(self, observed_obj, *arg, **kw): if self.identify_observed: return self.func_wr()(observed_obj, *arg, **kw) else: return self.func_wr()(*arg, **kw)
702,139
Call the function I wrap. Args: *arg: The arguments passed to me by the observed object. **kw: The keyword args passed to me by the observed object. observed_obj: The observed object which called me. Returns: Whatever the function I wrap returns.
def __call__(self, observed_obj, *arg, **kw): bound_method = getattr(self.inst(), self.method_name) if self.identify_observed: return bound_method(observed_obj, *arg, **kw) else: return bound_method(*arg, **kw)
702,141
Add a function as an observer. Args: func: The function to register as an observer. identify_observed: See docstring for add_observer. Returns: True if the function is added, otherwise False.
def _add_function(self, func, identify_observed): key = self.make_key(func) if key not in self.observers: self.observers[key] = ObserverFunction( func, identify_observed, (key, self.observers)) return True else: return False
702,143
Add an bound method as an observer. Args: bound_method: The bound method to add as an observer. identify_observed: See the docstring for add_observer. Returns: True if the bound method is added, otherwise False.
def _add_bound_method(self, bound_method, identify_observed): inst = bound_method.__self__ method_name = bound_method.__name__ key = self.make_key(bound_method) if key not in self.observers: self.observers[key] = ObserverBoundMethod( inst, method_name, identify_observed, (key, self.observers)) return True else: return False
702,144
Un-register an observer. Args: observer: The observer to un-register. Returns true if an observer was removed, otherwise False.
def discard_observer(self, observer): discarded = False key = self.make_key(observer) if key in self.observers: del self.observers[key] discarded = True return discarded
702,145
Return an ObservableBoundMethod or ObservableUnboundMethod. If accessed by instance, I return an ObservableBoundMethod which handles that instance. If accessed by class I return an ObservableUnboundMethod. Args: inst: The instance through which I was accessed. This will be None if I was accessed through the class, i.e. as an unbound method. cls: The class through which I was accessed.
def __get__(self, inst, cls): if inst is None: return self._unbound_method else: if not hasattr(inst, INSTANCE_OBSERVER_ATTR): d = {} setattr(inst, INSTANCE_OBSERVER_ATTR, d) else: d = getattr(inst, INSTANCE_OBSERVER_ATTR) observers = d.setdefault(self._func.__name__, {}) return ObservableBoundMethod(self._func, inst, observers)
702,150
Caches the JSON-serializable output of the function to a given file Args: filename (str) The filename (sans directory) to store the output Returns: decorator, applicable to a function that produces JSON-serializable output
def cache_json(filename): def cache_decorator(cacheable_function): @wraps(cacheable_function) def cache_wrapper(*args, **kwargs): path = CACHE_DIRECTORY + filename check_create_folder(path) if os.path.exists(path): with open(path) as infile: return json.load(infile) else: function_output = cacheable_function(*args, **kwargs) with open(path, 'w') as outfile: json.dump(function_output, outfile) return function_output return cache_wrapper return cache_decorator
702,336
Generic Notifier. Parameters: - `context`: session context - `event_type`: the event type to report, i.e. ip.usage - `payload`: dict containing the payload to send
def do_notify(context, event_type, payload): LOG.debug('IP_BILL: notifying {}'.format(payload)) notifier = n_rpc.get_notifier('network') notifier.info(context, event_type, payload)
702,402
Method to send notifications. We must send USAGE when a public IPv4 address is deallocated or a FLIP is associated. Parameters: - `context`: the context for notifier - `event_type`: the event type for IP allocate, deallocate, associate, disassociate - `ipaddress`: the ipaddress object to notify about Returns: nothing Notes: this may live in the billing module
def notify(context, event_type, ipaddress, send_usage=False, *args, **kwargs): if (event_type == IP_ADD and not CONF.QUARK.notify_ip_add) or \ (event_type == IP_DEL and not CONF.QUARK.notify_ip_delete) or \ (event_type == IP_ASSOC and not CONF.QUARK.notify_flip_associate) or \ (event_type == IP_DISASSOC and not CONF.QUARK.notify_flip_disassociate)\ or (event_type == IP_EXISTS and not CONF.QUARK.notify_ip_exists): LOG.debug('IP_BILL: notification {} is disabled by config'. format(event_type)) return # Do not send notifications when we are undoing due to an error if 'rollback' in kwargs and kwargs['rollback']: LOG.debug('IP_BILL: not sending notification because we are in undo') return # ip.add needs the allocated_at time. # All other events need the current time. ts = ipaddress.allocated_at if event_type == IP_ADD else _now() payload = build_payload(ipaddress, event_type, event_time=ts) # Send the notification with the payload do_notify(context, event_type, payload) # When we deallocate an IP or associate a FLIP we must send # a usage message to billing. # In other words when we supply end_time we must send USAGE to billing # immediately. # Our billing period is 24 hrs. If the address was allocated after midnight # send the start_time as as. If the address was allocated yesterday, then # send midnight as the start_time. # Note: if allocated_at is empty we assume today's midnight. if send_usage: if ipaddress.allocated_at is not None and \ ipaddress.allocated_at >= _midnight_today(): start_time = ipaddress.allocated_at else: start_time = _midnight_today() payload = build_payload(ipaddress, IP_EXISTS, start_time=start_time, end_time=ts) do_notify(context, IP_EXISTS, payload)
702,403
Yield job postings in common schema format Args: quarter (str) The quarter, in format '2015Q1' stats_counter (object, optional) A counter that can track both input and output documents using a 'track' method.
def postings(self, quarter, stats_counter=None): logging.info('Finding postings for %s', quarter) for posting in self._iter_postings(quarter): transformed = self._transform(posting) transformed['id'] = '{}_{}'.format( self.partner_id, self._id(posting) ) if stats_counter: stats_counter.track( input_document=posting, output_document=transformed ) yield transformed
702,417
Returns all dates between two dates. Inclusive of the start date but not the end date. Args: start_date (datetime.date) end_date (datetime.date) Returns: (list) of datetime.date objects
def dates_in_range(start_date, end_date): return [ start_date + timedelta(n) for n in range(int((end_date - start_date).days)) ]
702,430
Stream a JSON file (in JSON-per-line format) Args: local_file (file-like object) an open file-handle that contains a JSON string on each line Yields: (dict) JSON objects
def stream_json_file(local_file): for i, line in enumerate(local_file): try: data = json.loads(line.decode('utf-8')) yield data except ValueError as e: logging.warning("Skipping line %d due to error: %s", i, e) continue
702,451
Creates an index with a given configuration Args: index_name (str): Name of the index you want to create index_config (dict) configuration for the index client (Elasticsearch.IndicesClient) the Elasticsearch client
def create_index(index_name, index_config, client): client.create(index=index_name, body=index_config)
702,555
Retrieve the base index name from an alias Args: alias_name (str) Name of the alias index_client (Elasticsearch.IndicesClient) an Elasticsearch index client. Optional, will create one if not given Returns: (str) Name of index
def get_index_from_alias(alias_name, index_client=None): index_client = index_client or indices_client() if not index_client.exists_alias(name=alias_name): return None return list(index_client.get_alias(name=alias_name).keys())[0]
702,556
Points an alias to a new index, then delete the old index if needed Uses client.update_aliases to perform this with zero downtime Args: alias_name (str) Name of the alias new_index_name (str) The new index that the alias should point to index_client (Elasticsearch.IndicesClient) Elasticsearch index client
def atomic_swap(alias_name, new_index_name, index_client): logging.info('Performing atomic index alias swap') if index_client.exists_alias(name=alias_name): old_index_name = get_index_from_alias(alias_name, index_client) logging.info('Removing old as well as adding new') actions = {'actions': [ {'remove': {'index': old_index_name, 'alias': alias_name}}, {'add': {'index': new_index_name, 'alias': alias_name}} ]} index_client.update_aliases(body=actions) index_client.delete(index=old_index_name) else: logging.info('Old alias not found, only adding new') actions = {'actions': [ {'add': {'index': new_index_name, 'alias': alias_name}} ]} index_client.update_aliases(body=actions)
702,557
Context manager to create a new index based on a given alias, allow the caller to index it, and then point the alias to the new index Args: index_name (str) Name of an alias that should point to the new index index_config (dict) Configuration for the new index Yields: (name) The full name of the new index
def zero_downtime_index(index_name, index_config): client = indices_client() temporary_name = index_name + '_' + str(uuid.uuid4()) logging.info('creating index with config %s', index_config) create_index(temporary_name, index_config, client) try: yield temporary_name atomic_swap(index_name, temporary_name, client) except Exception: logging.error( 'deleting temporary index %s due to error:', temporary_name, exc_info=True ) client.delete(index=temporary_name)
702,558
Base class for Elasticsearch indexers Subclasses implement the index setting definition and transformation of data, The base class handles index management and bulk indexing with ES Args: s3_conn - a boto s3 connection es_client - an Elasticsearch indices client
def __init__(self, s3_conn, es_client): self.s3_conn = s3_conn self.es_client = es_client
702,559
Index all available documents, using streaming_bulk for speed Args: index_name (string): The index
def index_all(self, index_name): oks = 0 notoks = 0 for ok, item in streaming_bulk( self.es_client, self._iter_documents(index_name) ): if ok: oks += 1 else: notoks += 1 logging.info( "Import results: %d ok, %d not ok", oks, notoks )
702,562
Format the info field for SNV variants Args: variant(dict) variant_type(str): snv or sv Returns: vcf_info(str): A VCF formated info field
def format_info(variant, variant_type='snv'): observations = variant.get('observations',0) homozygotes = variant.get('homozygote') hemizygotes = variant.get('hemizygote') vcf_info = f"Obs={observations}" if homozygotes: vcf_info += f";Hom={homozygotes}" if hemizygotes: vcf_info += f";Hem={hemizygotes}" # This is SV specific if variant_type == 'sv': end = int((variant['end_left'] + variant['end_right'])/2) vcf_info += f";SVTYPE={variant['sv_type']};END={end};SVLEN={variant['length']}" return vcf_info
703,312