text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def load_modules_alignak_configuration(self): # pragma: no cover, not yet with unit tests. """Load Alignak configuration from the arbiter modules If module implements get_alignak_configuration, call this function :param raw_objects: raw objects we got from reading config files :type raw_objects: dict :return: None """ alignak_cfg = {} # Ask configured modules if they got configuration for us for instance in self.modules_manager.instances: if not hasattr(instance, 'get_alignak_configuration'): return try: logger.info("Getting Alignak global configuration from module '%s'", instance.name) cfg = instance.get_alignak_configuration() alignak_cfg.update(cfg) except Exception as exp: # pylint: disable=broad-except logger.error("Module %s get_alignak_configuration raised an exception %s. " "Log and continue to run", instance.name, str(exp)) output = io.StringIO() traceback.print_exc(file=output) logger.error("Back trace of this remove: %s", output.getvalue()) output.close() continue params = [] if alignak_cfg: logger.info("Got Alignak global configuration:") for key, value in sorted(alignak_cfg.items()): logger.info("- %s = %s", key, value) # properties starting with an _ character are "transformed" to macro variables if key.startswith('_'): key = '$' + key[1:].upper() + '$' # properties valued as None are filtered if value is None: continue # properties valued as None string are filtered if value == 'None': continue # properties valued as empty strings are filtered if value == '': continue # set properties as legacy Shinken configuration files params.append("%s=%s" % (key, value)) self.conf.load_params(params)
[ "def", "load_modules_alignak_configuration", "(", "self", ")", ":", "# pragma: no cover, not yet with unit tests.", "alignak_cfg", "=", "{", "}", "# Ask configured modules if they got configuration for us", "for", "instance", "in", "self", ".", "modules_manager", ".", "instance...
46.680851
20.914894
def license_loader(lic_dir=LIC_DIR): """Loads licenses from the given directory.""" lics = [] for ln in os.listdir(lic_dir): lp = os.path.join(lic_dir, ln) with open(lp) as lf: txt = lf.read() lic = License(txt) lics.append(lic) return lics
[ "def", "license_loader", "(", "lic_dir", "=", "LIC_DIR", ")", ":", "lics", "=", "[", "]", "for", "ln", "in", "os", ".", "listdir", "(", "lic_dir", ")", ":", "lp", "=", "os", ".", "path", ".", "join", "(", "lic_dir", ",", "ln", ")", "with", "open"...
29.9
11.1
def __get_league_object(): """Returns the xml object corresponding to the league Only designed for internal use""" # get data data = mlbgame.data.get_properties() # return league object return etree.parse(data).getroot().find('leagues').find('league')
[ "def", "__get_league_object", "(", ")", ":", "# get data", "data", "=", "mlbgame", ".", "data", ".", "get_properties", "(", ")", "# return league object", "return", "etree", ".", "parse", "(", "data", ")", ".", "getroot", "(", ")", ".", "find", "(", "'leag...
33.625
15.375
def register_renderer(app, id, renderer, force=True): """Registers a renderer on the application. :param app: The :class:`~flask.Flask` application to register the renderer on :param id: Internal id-string for the renderer :param renderer: Renderer to register :param force: Whether or not to overwrite the renderer if a different one is already registered for ``id`` """ renderers = app.extensions.setdefault('nav_renderers', {}) if force: renderers[id] = renderer else: renderers.setdefault(id, renderer)
[ "def", "register_renderer", "(", "app", ",", "id", ",", "renderer", ",", "force", "=", "True", ")", ":", "renderers", "=", "app", ".", "extensions", ".", "setdefault", "(", "'nav_renderers'", ",", "{", "}", ")", "if", "force", ":", "renderers", "[", "i...
36.1875
18.8125
def ContainsAddressStr(self, address): """ Determine if the wallet contains the address. Args: address (str): a string representing the public key. Returns: bool: True, if the address is present in the wallet. False otherwise. """ for key, contract in self._contracts.items(): if contract.Address == address: return True return False
[ "def", "ContainsAddressStr", "(", "self", ",", "address", ")", ":", "for", "key", ",", "contract", "in", "self", ".", "_contracts", ".", "items", "(", ")", ":", "if", "contract", ".", "Address", "==", "address", ":", "return", "True", "return", "False" ]
30.714286
18.571429
def perm(A, p): """ Symmetric permutation of a symmetric sparse matrix. :param A: :py:class:`spmatrix` :param p: :py:class:`matrix` or :class:`list` of length `A.size[0]` """ assert isinstance(A,spmatrix), "argument must be a sparse matrix" assert A.size[0] == A.size[1], "A must be a square matrix" assert A.size[0] == len(p), "length of p must be equal to the order of A" return A[p,p]
[ "def", "perm", "(", "A", ",", "p", ")", ":", "assert", "isinstance", "(", "A", ",", "spmatrix", ")", ",", "\"argument must be a sparse matrix\"", "assert", "A", ".", "size", "[", "0", "]", "==", "A", ".", "size", "[", "1", "]", ",", "\"A must be a squa...
35.083333
22.416667
def _build_xpath_expr(attrs): """Build an xpath expression to simulate bs4's ability to pass in kwargs to search for attributes when using the lxml parser. Parameters ---------- attrs : dict A dict of HTML attributes. These are NOT checked for validity. Returns ------- expr : unicode An XPath expression that checks for the given HTML attributes. """ # give class attribute as class_ because class is a python keyword if 'class_' in attrs: attrs['class'] = attrs.pop('class_') s = ["@{key}={val!r}".format(key=k, val=v) for k, v in attrs.items()] return '[{expr}]'.format(expr=' and '.join(s))
[ "def", "_build_xpath_expr", "(", "attrs", ")", ":", "# give class attribute as class_ because class is a python keyword", "if", "'class_'", "in", "attrs", ":", "attrs", "[", "'class'", "]", "=", "attrs", ".", "pop", "(", "'class_'", ")", "s", "=", "[", "\"@{key}={...
32.7
22.6
def _aix_memdata(): ''' Return the memory information for AIX systems ''' grains = {'mem_total': 0, 'swap_total': 0} prtconf = salt.utils.path.which('prtconf') if prtconf: for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines(): comps = [x for x in line.strip().split(' ') if x] if len(comps) > 2 and 'Memory' in comps[0] and 'Size' in comps[1]: grains['mem_total'] = int(comps[2]) break else: log.error('The \'prtconf\' binary was not found in $PATH.') swap_cmd = salt.utils.path.which('swap') if swap_cmd: swap_data = __salt__['cmd.run']('{0} -s'.format(swap_cmd)).split() try: swap_total = (int(swap_data[-2]) + int(swap_data[-6])) * 4 except ValueError: swap_total = None grains['swap_total'] = swap_total else: log.error('The \'swap\' binary was not found in $PATH.') return grains
[ "def", "_aix_memdata", "(", ")", ":", "grains", "=", "{", "'mem_total'", ":", "0", ",", "'swap_total'", ":", "0", "}", "prtconf", "=", "salt", ".", "utils", ".", "path", ".", "which", "(", "'prtconf'", ")", "if", "prtconf", ":", "for", "line", "in", ...
36.884615
22.5
def _validate_iso8601_string(self, value): """Return the value or raise a ValueError if it is not a string in ISO8601 format.""" ISO8601_REGEX = r'(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})([+-](\d{2})\:(\d{2})|Z)' if re.match(ISO8601_REGEX, value): return value else: raise ValueError('{} must be in ISO8601 format.'.format(value))
[ "def", "_validate_iso8601_string", "(", "self", ",", "value", ")", ":", "ISO8601_REGEX", "=", "r'(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2})\\:(\\d{2})\\:(\\d{2})([+-](\\d{2})\\:(\\d{2})|Z)'", "if", "re", ".", "match", "(", "ISO8601_REGEX", ",", "value", ")", ":", "return", "valu...
56.428571
20.428571
def admin_content_status(request): """ Returns a dictionary with the states and info of baking books, and the filters from the GET request to pre-populate the form. """ statement, sql_args = get_baking_statuses_sql(request.GET) states = [] status_filters = request.params.getall('status_filter') or [] state_icons = dict(STATE_ICONS) with db_connect(cursor_factory=DictCursor) as db_conn: with db_conn.cursor() as cursor: cursor.execute(statement, vars=sql_args) for row in cursor.fetchall(): message = '' state = row['state'] or 'PENDING' if status_filters and state not in status_filters: continue if state == 'FAILURE': # pragma: no cover if row['traceback'] is not None: message = row['traceback'].split("\n")[-2] latest_recipe = row['latest_recipe_id'] current_recipe = row['recipe_id'] if (current_recipe is not None and current_recipe != latest_recipe): state += ' stale_recipe' state_icon = state if state[:7] == "SUCCESS" and len(state) > 7: state_icon = 'unknown' states.append({ 'title': row['name'].decode('utf-8'), 'authors': format_authors(row['authors']), 'uuid': row['uuid'], 'print_style': row['print_style'], 'print_style_link': request.route_path( 'admin-print-style-single', style=row['print_style']), 'recipe': row['recipe'], 'recipe_name': row['recipe_name'], 'recipe_tag': row['recipe_tag'], 'recipe_link': request.route_path( 'get-resource', hash=row['recipe']), 'created': row['created'], 'state': state, 'state_message': message, 'state_icon': state_icons.get( state_icon, DEFAULT_ICON), 'status_link': request.route_path( 'admin-content-status-single', uuid=row['uuid']), 'content_link': request.route_path( 'get-content', ident_hash=row['ident_hash']) }) sort = request.params.get('sort', 'bpsa.created DESC') sort_match = SORTS_DICT[sort.split(' ')[0]] sort_arrow = ARROW_MATCH[sort.split(' ')[1]] if sort == "STATE ASC": states.sort(key=lambda x: x['state']) if sort == "STATE DESC": states.sort(key=lambda x: x['state'], reverse=True) num_entries = request.params.get('number', 100) or 100 page = request.params.get('page', 1) or 1 try: page = int(page) num_entries = int(num_entries) start_entry = (page - 1) * num_entries except ValueError: raise httpexceptions.HTTPBadRequest( 'invalid page({}) or entries per page({})'. format(page, num_entries)) total_entries = len(states) states = states[start_entry: start_entry + num_entries] returns = sql_args returns.update({'start_entry': start_entry, 'num_entries': num_entries, 'page': page, 'total_entries': total_entries, 'states': states, 'sort_' + sort_match: sort_arrow, 'sort': sort, 'domain': request.host, 'latest_only': request.GET.get('latest', False), 'STATE_ICONS': STATE_ICONS, 'status_filters': status_filters or [ i[0] for i in STATE_ICONS]}) return returns
[ "def", "admin_content_status", "(", "request", ")", ":", "statement", ",", "sql_args", "=", "get_baking_statuses_sql", "(", "request", ".", "GET", ")", "states", "=", "[", "]", "status_filters", "=", "request", ".", "params", ".", "getall", "(", "'status_filte...
44.348837
14.023256
def _get_group_dn(self, group_lookup_attribute_value): """ Searches for a group and retrieves its distinguished name. :param group_lookup_attribute_value: The value for the LDAP_GROUPS_GROUP_LOOKUP_ATTRIBUTE :type group_lookup_attribute_value: str :raises: **GroupDoesNotExist** if the group doesn't exist in the active directory. """ self.ldap_connection.search(search_base=self.GROUP_SEARCH['base_dn'], search_filter=self.GROUP_SEARCH['filter_string'].format( lookup_value=escape_query(group_lookup_attribute_value)), search_scope=self.GROUP_SEARCH['scope'], attributes=self.GROUP_SEARCH['attribute_list']) results = [result["dn"] for result in self.ldap_connection.response if result["type"] == "searchResEntry"] if not results: raise GroupDoesNotExist("The {group_lookup_attribute} provided does not exist in the Active " "Directory.".format(group_lookup_attribute=self.group_lookup_attr)) if len(results) > 1: logger.debug("Search returned more than one result: {results}".format(results=results)) if results: return results[0] else: return results
[ "def", "_get_group_dn", "(", "self", ",", "group_lookup_attribute_value", ")", ":", "self", ".", "ldap_connection", ".", "search", "(", "search_base", "=", "self", ".", "GROUP_SEARCH", "[", "'base_dn'", "]", ",", "search_filter", "=", "self", ".", "GROUP_SEARCH"...
51.074074
35.481481
def get_minions(): ''' Return a list of minions ''' conn, mdb = _get_conn(ret=None) ret = [] name = mdb.saltReturns.distinct('minion') ret.append(name) return ret
[ "def", "get_minions", "(", ")", ":", "conn", ",", "mdb", "=", "_get_conn", "(", "ret", "=", "None", ")", "ret", "=", "[", "]", "name", "=", "mdb", ".", "saltReturns", ".", "distinct", "(", "'minion'", ")", "ret", ".", "append", "(", "name", ")", ...
20.666667
20.444444
def create_parser(): """ create parser """ help_epilog = '''Getting more help: heron-explorer help <command> Disply help and options for <command>\n For detailed documentation, go to http://heronstreaming.io''' parser = argparse.ArgumentParser( prog='heron-explorer', epilog=help_epilog, formatter_class=SubcommandHelpFormatter, add_help=False) # sub-commands subparsers = parser.add_subparsers( title="Available commands", metavar='<command> <options>') # subparser for subcommands related to clusters clusters.create_parser(subparsers) # subparser for subcommands related to logical plan logicalplan.create_parser(subparsers) # subparser for subcommands related to physical plan physicalplan.create_parser(subparsers) # subparser for subcommands related to displaying info topologies.create_parser(subparsers) # subparser for help subcommand help.create_parser(subparsers) # subparser for version subcommand version.create_parser(subparsers) return parser
[ "def", "create_parser", "(", ")", ":", "help_epilog", "=", "'''Getting more help:\n heron-explorer help <command> Disply help and options for <command>\\n\n For detailed documentation, go to http://heronstreaming.io'''", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog"...
28.083333
17.888889
def need_record_permission(factory_name): """Decorator checking that the user has the required permissions on record. :param factory_name: name of the permission factory. """ def need_record_permission_builder(f): @wraps(f) def need_record_permission_decorator(self, record=None, *args, **kwargs): permission_factory = ( getattr(self, factory_name) or getattr(current_records_rest, factory_name) ) # FIXME use context instead request._methodview = self if permission_factory: verify_record_permission(permission_factory, record) return f(self, record=record, *args, **kwargs) return need_record_permission_decorator return need_record_permission_builder
[ "def", "need_record_permission", "(", "factory_name", ")", ":", "def", "need_record_permission_builder", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "need_record_permission_decorator", "(", "self", ",", "record", "=", "None", ",", "*", "args", ",",...
38.363636
14.909091
def visit_Call(self, node): """Propagate 'debug' wrapper into inner function calls if needed. Args: node (ast.AST): node statement to surround. """ if self.depth == 0: return node if self.ignore_exceptions is None: ignore_exceptions = ast.Name("None", ast.Load()) else: ignore_exceptions = ast.List(self.ignore_exceptions, ast.Load()) catch_exception_type = self.catch_exception \ if self.catch_exception else "None" catch_exception = ast.Name(catch_exception_type, ast.Load()) depth = ast.Num(self.depth - 1 if self.depth > 0 else -1) debug_node_name = ast.Name("debug", ast.Load()) call_extra_parameters = [] if IS_PYTHON_3 else [None, None] node.func = ast.Call(debug_node_name, [node.func, ignore_exceptions, catch_exception, depth], [], *call_extra_parameters) return node
[ "def", "visit_Call", "(", "self", ",", "node", ")", ":", "if", "self", ".", "depth", "==", "0", ":", "return", "node", "if", "self", ".", "ignore_exceptions", "is", "None", ":", "ignore_exceptions", "=", "ast", ".", "Name", "(", "\"None\"", ",", "ast",...
34.758621
22.068966
def check_elastic(self): ''' Checks if we need to break moderation in order to maintain our desired throttle limit @return: True if we need to break moderation ''' if self.elastic and self.elastic_kick_in == self.limit: value = self.redis_conn.zcard(self.window_key) if self.limit - value > self.elastic_buffer: return True return False
[ "def", "check_elastic", "(", "self", ")", ":", "if", "self", ".", "elastic", "and", "self", ".", "elastic_kick_in", "==", "self", ".", "limit", ":", "value", "=", "self", ".", "redis_conn", ".", "zcard", "(", "self", ".", "window_key", ")", "if", "self...
35.166667
22.666667
def unstructure_attrs_asdict(self, obj): # type: (Any) -> Dict[str, Any] """Our version of `attrs.asdict`, so we can call back to us.""" attrs = obj.__class__.__attrs_attrs__ dispatch = self._unstructure_func.dispatch rv = self._dict_factory() for a in attrs: name = a.name v = getattr(obj, name) rv[name] = dispatch(v.__class__)(v) return rv
[ "def", "unstructure_attrs_asdict", "(", "self", ",", "obj", ")", ":", "# type: (Any) -> Dict[str, Any]", "attrs", "=", "obj", ".", "__class__", ".", "__attrs_attrs__", "dispatch", "=", "self", ".", "_unstructure_func", ".", "dispatch", "rv", "=", "self", ".", "_...
38.545455
8.272727
def add_sibling(self, sibling): """ Designate this a multi-feature representative and add a co-feature. Some features exist discontinuously on the sequence, and therefore cannot be declared with a single GFF3 entry (which can encode only a single interval). The canonical encoding for these types of features is called a multi-feature, in which a single feature is declared on multiple lines with multiple entries all sharing the same feature type and ID attribute. This is commonly done with coding sequence (CDS) features. In this package, each multi-feature has a single "representative" feature object, and all other objects/entries associated with that multi-feature are attached to it as "siblings". Invoking this method will designate the calling feature as the multi-feature representative and add the argument as a sibling. """ assert self.is_pseudo is False if self.siblings is None: self.siblings = list() self.multi_rep = self sibling.multi_rep = self self.siblings.append(sibling)
[ "def", "add_sibling", "(", "self", ",", "sibling", ")", ":", "assert", "self", ".", "is_pseudo", "is", "False", "if", "self", ".", "siblings", "is", "None", ":", "self", ".", "siblings", "=", "list", "(", ")", "self", ".", "multi_rep", "=", "self", "...
45.92
23.04
def _fix_lsm_bitspersample(self, parent): """Correct LSM bitspersample tag. Old LSM writers may use a separate region for two 16-bit values, although they fit into the tag value element of the tag. """ if self.code != 258 or self.count != 2: return # TODO: test this case; need example file log.warning('TiffTag %i: correcting LSM bitspersample tag', self.code) value = struct.pack('<HH', *self.value) self.valueoffset = struct.unpack('<I', value)[0] parent.filehandle.seek(self.valueoffset) self.value = struct.unpack('<HH', parent.filehandle.read(4))
[ "def", "_fix_lsm_bitspersample", "(", "self", ",", "parent", ")", ":", "if", "self", ".", "code", "!=", "258", "or", "self", ".", "count", "!=", "2", ":", "return", "# TODO: test this case; need example file", "log", ".", "warning", "(", "'TiffTag %i: correcting...
42.666667
18.133333
def list_zones(self, max_results=None, page_token=None): """List zones for the project associated with this client. See https://cloud.google.com/dns/api/v1/managedZones/list :type max_results: int :param max_results: maximum number of zones to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: Optional. If present, return the next batch of zones, using the value, which must correspond to the ``nextPageToken`` value returned in the previous response. Deprecated: use the ``pages`` property of the returned iterator instead of manually passing the token. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.dns.zone.ManagedZone` belonging to this project. """ path = "/projects/%s/managedZones" % (self.project,) return page_iterator.HTTPIterator( client=self, api_request=self._connection.api_request, path=path, item_to_value=_item_to_zone, items_key="managedZones", page_token=page_token, max_results=max_results, )
[ "def", "list_zones", "(", "self", ",", "max_results", "=", "None", ",", "page_token", "=", "None", ")", ":", "path", "=", "\"/projects/%s/managedZones\"", "%", "(", "self", ".", "project", ",", ")", "return", "page_iterator", ".", "HTTPIterator", "(", "clien...
41.096774
19.774194
def simulate(radius=5e-6, sphere_index=1.339, medium_index=1.333, wavelength=550e-9, grid_size=(80, 80), model="projection", pixel_size=None, center=None): """Simulate scattering at a sphere Parameters ---------- radius: float Radius of the sphere [m] sphere_index: float Refractive index of the object medium_index: float Refractive index of the surrounding medium wavelength: float Vacuum wavelength of the imaging light [m] grid_size: tuple of ints or int Resulting image size in x and y [px] model: str Sphere model to use (see :const:`available`) pixel_size: float or None Pixel size [m]; if set to `None` the pixel size is chosen such that the radius fits at least three to four times into the grid. center: tuple of floats or None Center position in image coordinates [px]; if set to None, the center of the image (grid_size - 1)/2 is used. Returns ------- qpi: qpimage.QPImage Quantitative phase data set """ if isinstance(grid_size, numbers.Integral): # default to square-shape grid grid_size = (grid_size, grid_size) if pixel_size is None: # select simulation automatically rl = radius / wavelength if rl < 5: # a lot of diffraction artifacts may occur; # use 4x radius to capture the full field fact = 4 elif rl >= 5 and rl <= 10: # linearly decrease towards 3x radius fact = 4 - (rl - 5) / 5 else: # not many diffraction artifacts occur; # 3x radius is enough and allows to # simulate larger radii with BHFIELD fact = 3 pixel_size = fact * radius / np.min(grid_size) if center is None: center = (np.array(grid_size) - 1) / 2 model = model_dict[model] qpi = model(radius=radius, sphere_index=sphere_index, medium_index=medium_index, wavelength=wavelength, pixel_size=pixel_size, grid_size=grid_size, center=center) return qpi
[ "def", "simulate", "(", "radius", "=", "5e-6", ",", "sphere_index", "=", "1.339", ",", "medium_index", "=", "1.333", ",", "wavelength", "=", "550e-9", ",", "grid_size", "=", "(", "80", ",", "80", ")", ",", "model", "=", "\"projection\"", ",", "pixel_size...
32.818182
14.712121
def FromBinary(cls, record_data, record_count=1): """Create an UpdateRecord subclass from binary record data. This should be called with a binary record blob (NOT including the record type header) and it will decode it into a AddNodeRecord. Args: record_data (bytearray): The raw record data that we wish to parse into an UpdateRecord subclass NOT including its 8 byte record header. record_count (int): The number of records included in record_data. Raises: ArgumentError: If the record_data is malformed and cannot be parsed. Returns: AddNodeRecord: The decoded reflash tile record. """ _cmd, address, _resp_length, payload = cls._parse_rpc_info(record_data) descriptor = parse_binary_descriptor(payload) return AddNodeRecord(descriptor, address=address)
[ "def", "FromBinary", "(", "cls", ",", "record_data", ",", "record_count", "=", "1", ")", ":", "_cmd", ",", "address", ",", "_resp_length", ",", "payload", "=", "cls", ".", "_parse_rpc_info", "(", "record_data", ")", "descriptor", "=", "parse_binary_descriptor"...
40.227273
29.045455
def persistent_menu(menu): """ more: https://developers.facebook.com/docs/messenger-platform/thread-settings/persistent-menu :param menu: :return: """ if len(menu) > 3: raise Invalid('menu should not exceed 3 call to actions') if any(len(item['call_to_actions']) > 5 for item in menu if item['type'] == 'nested'): raise Invalid('call_to_actions is limited to 5 for sub-levels') for item in menu: if len(item['title']) > 30: raise Invalid('menu item title should not exceed 30 characters') if item['type'] == 'postback' and len(item['payload']) > 1000: raise Invalid('menu item payload should not exceed 1000 characters')
[ "def", "persistent_menu", "(", "menu", ")", ":", "if", "len", "(", "menu", ")", ">", "3", ":", "raise", "Invalid", "(", "'menu should not exceed 3 call to actions'", ")", "if", "any", "(", "len", "(", "item", "[", "'call_to_actions'", "]", ")", ">", "5", ...
36.526316
28.315789
def generate(self): """ Generates the report """ self._setup() for config_name in self.report_info.config_to_test_names_map.keys(): config_dir = os.path.join(self.report_info.resource_dir, config_name) utils.makedirs(config_dir) testsuite = self._generate_junit_xml(config_name) with open(os.path.join(self.report_info.junit_xml_path, 'zopkio_junit_reports.xml'), 'w') as file: TestSuite.to_file(file, [testsuite], prettyprint=False)
[ "def", "generate", "(", "self", ")", ":", "self", ".", "_setup", "(", ")", "for", "config_name", "in", "self", ".", "report_info", ".", "config_to_test_names_map", ".", "keys", "(", ")", ":", "config_dir", "=", "os", ".", "path", ".", "join", "(", "sel...
43.363636
21.727273
def cloud_train(train_dataset, eval_dataset, analysis_dir, output_dir, features, model_type, max_steps, num_epochs, train_batch_size, eval_batch_size, min_eval_frequency, top_n, layer_sizes, learning_rate, epsilon, job_name, job_name_prefix, config): """Train model using CloudML. See local_train() for a description of the args. Args: config: A CloudTrainingConfig object. job_name: Training job name. A default will be picked if None. """ import google.datalab.ml as ml if len(train_dataset.input_files) != 1 or len(eval_dataset.input_files) != 1: raise ValueError('CsvDataSets must be built with a file pattern, not list ' 'of files.') if file_io.file_exists(output_dir): raise ValueError('output_dir already exist. Use a new output path.') if isinstance(features, dict): # Make a features file. if not file_io.file_exists(output_dir): file_io.recursive_create_dir(output_dir) features_file = os.path.join(output_dir, 'features_file.json') file_io.write_string_to_file( features_file, json.dumps(features)) else: features_file = features if not isinstance(config, ml.CloudTrainingConfig): raise ValueError('cloud should be an instance of ' 'google.datalab.ml.CloudTrainingConfig for cloud training.') _assert_gcs_files([output_dir, train_dataset.input_files[0], eval_dataset.input_files[0], features_file, analysis_dir]) args = ['--train-data-paths=%s' % train_dataset.input_files[0], '--eval-data-paths=%s' % eval_dataset.input_files[0], '--preprocess-output-dir=%s' % analysis_dir, '--transforms-file=%s' % features_file, '--model-type=%s' % model_type, '--max-steps=%s' % str(max_steps), '--train-batch-size=%s' % str(train_batch_size), '--eval-batch-size=%s' % str(eval_batch_size), '--min-eval-frequency=%s' % str(min_eval_frequency), '--learning-rate=%s' % str(learning_rate), '--epsilon=%s' % str(epsilon)] if num_epochs: args.append('--num-epochs=%s' % str(num_epochs)) if top_n: args.append('--top-n=%s' % str(top_n)) if layer_sizes: for i in range(len(layer_sizes)): args.append('--layer-size%s=%s' % (i + 1, str(layer_sizes[i]))) job_request = { 'package_uris': [_package_to_staging(output_dir), _TF_GS_URL, _PROTOBUF_GS_URL], 'python_module': 'mltoolbox._structured_data.trainer.task', 'job_dir': output_dir, 'args': args } job_request.update(dict(config._asdict())) if not job_name: job_name = job_name_prefix or 'structured_data_train' job_name += '_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S') job = ml.Job.submit_training(job_request, job_name) print('Job request send. View status of job at') print('https://console.developers.google.com/ml/jobs?project=%s' % _default_project()) return job
[ "def", "cloud_train", "(", "train_dataset", ",", "eval_dataset", ",", "analysis_dir", ",", "output_dir", ",", "features", ",", "model_type", ",", "max_steps", ",", "num_epochs", ",", "train_batch_size", ",", "eval_batch_size", ",", "min_eval_frequency", ",", "top_n"...
35.625
19.034091
def parse(self, commands): """ Parse a list of commands. """ # Get rid of dummy objects that represented deleted objects in # the last parsing round. to_delete = [] for id_, val in self._objects.items(): if val == JUST_DELETED: to_delete.append(id_) for id_ in to_delete: self._objects.pop(id_) for command in commands: self._parse(command)
[ "def", "parse", "(", "self", ",", "commands", ")", ":", "# Get rid of dummy objects that represented deleted objects in", "# the last parsing round.", "to_delete", "=", "[", "]", "for", "id_", ",", "val", "in", "self", ".", "_objects", ".", "items", "(", ")", ":",...
29.6
13.066667
def recover_cfg(self, start=None, end=None, symbols=None, callback=None, arch_mode=None): """Recover CFG. Args: start (int): Start address. end (int): End address. symbols (dict): Symbol table. callback (function): A callback function which is called after each successfully recovered CFG. arch_mode (int): Architecture mode. Returns: ControlFlowGraph: A CFG. """ # Set architecture in case it wasn't already set. if arch_mode is None: arch_mode = self.binary.architecture_mode # Reload modules. self._load(arch_mode=arch_mode) # Check start address. start = start if start else self.binary.entry_point cfg, _ = self._recover_cfg(start=start, end=end, symbols=symbols, callback=callback) return cfg
[ "def", "recover_cfg", "(", "self", ",", "start", "=", "None", ",", "end", "=", "None", ",", "symbols", "=", "None", ",", "callback", "=", "None", ",", "arch_mode", "=", "None", ")", ":", "# Set architecture in case it wasn't already set.", "if", "arch_mode", ...
33.038462
22.5
def CBO_Gamma(self, **kwargs): ''' Returns the strain-shifted Gamma-valley conduction band offset (CBO), assuming the strain affects all conduction band valleys equally. ''' return (self.unstrained.CBO_Gamma(**kwargs) + self.CBO_strain_shift(**kwargs))
[ "def", "CBO_Gamma", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "(", "self", ".", "unstrained", ".", "CBO_Gamma", "(", "*", "*", "kwargs", ")", "+", "self", ".", "CBO_strain_shift", "(", "*", "*", "kwargs", ")", ")" ]
43.142857
22.571429
def user(self, **params): """Stream user Accepted params found at: https://dev.twitter.com/docs/api/1.1/get/user """ url = 'https://userstream.twitter.com/%s/user.json' \ % self.streamer.api_version self.streamer._request(url, params=params)
[ "def", "user", "(", "self", ",", "*", "*", "params", ")", ":", "url", "=", "'https://userstream.twitter.com/%s/user.json'", "%", "self", ".", "streamer", ".", "api_version", "self", ".", "streamer", ".", "_request", "(", "url", ",", "params", "=", "params", ...
32.888889
11.888889
def image(self,path_img): """ Open image file """ im_open = Image.open(path_img) im = im_open.convert("RGB") # Convert the RGB image in printable image pix_line, img_size = self._convert_image(im) self._print_image(pix_line, img_size)
[ "def", "image", "(", "self", ",", "path_img", ")", ":", "im_open", "=", "Image", ".", "open", "(", "path_img", ")", "im", "=", "im_open", ".", "convert", "(", "\"RGB\"", ")", "# Convert the RGB image in printable image", "pix_line", ",", "img_size", "=", "se...
39.428571
7
def attach_keypress(fig, scaling=1.1): """ Attach a key press event handler that configures keys for closing a figure and changing the figure size. Keys 'e' and 'c' respectively expand and contract the figure, and key 'q' closes it. **Note:** Resizing may not function correctly with all matplotlib backends (a `bug <https://github.com/matplotlib/matplotlib/issues/10083>`__ has been reported). Parameters ---------- fig : :class:`matplotlib.figure.Figure` object Figure to which event handling is to be attached scaling : float, optional (default 1.1) Scaling factor for figure size changes Returns ------- press : function Key press event handler function """ def press(event): if event.key == 'q': plt.close(fig) elif event.key == 'e': fig.set_size_inches(scaling * fig.get_size_inches(), forward=True) elif event.key == 'c': fig.set_size_inches(fig.get_size_inches() / scaling, forward=True) # Avoid multiple event handlers attached to the same figure if not hasattr(fig, '_sporco_keypress_cid'): cid = fig.canvas.mpl_connect('key_press_event', press) fig._sporco_keypress_cid = cid return press
[ "def", "attach_keypress", "(", "fig", ",", "scaling", "=", "1.1", ")", ":", "def", "press", "(", "event", ")", ":", "if", "event", ".", "key", "==", "'q'", ":", "plt", ".", "close", "(", "fig", ")", "elif", "event", ".", "key", "==", "'e'", ":", ...
32.710526
21.131579
def state_province_region(self, value=None): """Corresponds to IDD Field `state_province_region` Args: value (str): value for IDD Field `state_province_region` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `state_province_region`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `state_province_region`') self._state_province_region = value
[ "def", "state_province_region", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of...
36.416667
20.333333
def t_TITLE(self, token): ur'\#\s+<wca-title>(?P<title>.+)\n' token.value = token.lexer.lexmatch.group("title").decode("utf8") token.lexer.lineno += 1 return token
[ "def", "t_TITLE", "(", "self", ",", "token", ")", ":", "token", ".", "value", "=", "token", ".", "lexer", ".", "lexmatch", ".", "group", "(", "\"title\"", ")", ".", "decode", "(", "\"utf8\"", ")", "token", ".", "lexer", ".", "lineno", "+=", "1", "r...
38.2
15.8
def RegisterPartitionResolver(self, database_link, partition_resolver): """Registers the partition resolver associated with the database link :param str database_link: Database Self Link or ID based link. :param object partition_resolver: An instance of PartitionResolver. """ if not database_link: raise ValueError("database_link is None or empty.") if partition_resolver is None: raise ValueError("partition_resolver is None.") self.partition_resolvers = {base.TrimBeginningAndEndingSlashes(database_link): partition_resolver}
[ "def", "RegisterPartitionResolver", "(", "self", ",", "database_link", ",", "partition_resolver", ")", ":", "if", "not", "database_link", ":", "raise", "ValueError", "(", "\"database_link is None or empty.\"", ")", "if", "partition_resolver", "is", "None", ":", "raise...
39.3125
20.3125
def _init_pressure(self): """ Internal. Initialises the pressure sensor via RTIMU """ if not self._pressure_init: self._pressure_init = self._pressure.pressureInit() if not self._pressure_init: raise OSError('Pressure Init Failed')
[ "def", "_init_pressure", "(", "self", ")", ":", "if", "not", "self", ".", "_pressure_init", ":", "self", ".", "_pressure_init", "=", "self", ".", "_pressure", ".", "pressureInit", "(", ")", "if", "not", "self", ".", "_pressure_init", ":", "raise", "OSError...
32.888889
12.888889
def random_data(line_count=1, chars_per_line=80): """ Function to creates lines of random string data Args: line_count: An integer that says how many lines to return chars_per_line: An integer that says how many characters per line to return Returns: A String """ divide_lines = chars_per_line * line_count return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
[ "def", "random_data", "(", "line_count", "=", "1", ",", "chars_per_line", "=", "80", ")", ":", "divide_lines", "=", "chars_per_line", "*", "line_count", "return", "'\\n'", ".", "join", "(", "random_line_data", "(", "chars_per_line", ")", "for", "x", "in", "r...
34.615385
24.769231
def set_error_callback(self, callback): """Assign a method to invoke when a request has encountered an unrecoverable error in an action execution. :param method callback: The method to invoke """ self.logger.debug('Setting error callback: %r', callback) self._on_error = callback
[ "def", "set_error_callback", "(", "self", ",", "callback", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Setting error callback: %r'", ",", "callback", ")", "self", ".", "_on_error", "=", "callback" ]
35.666667
15.111111
def network_protocol(self, layer: Optional[Layer] = None) -> str: """Get a random network protocol form OSI model. :param layer: Enum object Layer. :return: Protocol name. :Example: AMQP """ key = self._validate_enum(item=layer, enum=Layer) protocols = NETWORK_PROTOCOLS[key] return self.random.choice(protocols)
[ "def", "network_protocol", "(", "self", ",", "layer", ":", "Optional", "[", "Layer", "]", "=", "None", ")", "->", "str", ":", "key", "=", "self", ".", "_validate_enum", "(", "item", "=", "layer", ",", "enum", "=", "Layer", ")", "protocols", "=", "NET...
31.583333
15.333333
def _prepare_version(self): """Setup the application version""" if config.VERSION not in self._config: self._config[config.VERSION] = __version__
[ "def", "_prepare_version", "(", "self", ")", ":", "if", "config", ".", "VERSION", "not", "in", "self", ".", "_config", ":", "self", ".", "_config", "[", "config", ".", "VERSION", "]", "=", "__version__" ]
42.5
8.25
def download(self, url, dir_path, filename=None): """ Download the resources specified by url into dir_path. The resulting file path is returned. DownloadError is raised the resources cannot be downloaded. """ if not filename: filename = url.rsplit('/', 1)[1] path = os.path.join(dir_path, filename) content = self.fetch(url) with open(path, "wb") as f: f.write(content) return path
[ "def", "download", "(", "self", ",", "url", ",", "dir_path", ",", "filename", "=", "None", ")", ":", "if", "not", "filename", ":", "filename", "=", "url", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "1", "]", "path", "=", "os", ".", "path", ...
31.666667
15.866667
def parse_date(string, formation=None): """ string to date stamp :param string: date string :param formation: format string :return: datetime.date """ if formation: _stamp = datetime.datetime.strptime(string, formation).date() return _stamp _string = string.replace('.', '-').replace('/', '-') if '-' in _string: if len(_string.split('-')[0]) > 3 or len(_string.split('-')[2]) > 3: try: _stamp = datetime.datetime.strptime(_string, '%Y-%m-%d').date() except ValueError: try: _stamp = datetime.datetime.strptime(_string, '%m-%d-%Y').date() except ValueError: _stamp = datetime.datetime.strptime(_string, '%d-%m-%Y').date() else: try: _stamp = datetime.datetime.strptime(_string, '%y-%m-%d').date() except ValueError: try: _stamp = datetime.datetime.strptime(_string, '%m-%d-%y').date() except ValueError: _stamp = datetime.datetime.strptime(_string, '%d-%m-%y').date() else: if len(_string) > 6: try: _stamp = datetime.datetime.strptime(_string, '%Y%m%d').date() except ValueError: _stamp = datetime.datetime.strptime(_string, '%m%d%Y').date() elif len(_string) <= 6: try: _stamp = datetime.datetime.strptime(_string, '%y%m%d').date() except ValueError: _stamp = datetime.datetime.strptime(_string, '%m%d%y').date() else: raise CanNotFormatError return _stamp
[ "def", "parse_date", "(", "string", ",", "formation", "=", "None", ")", ":", "if", "formation", ":", "_stamp", "=", "datetime", ".", "datetime", ".", "strptime", "(", "string", ",", "formation", ")", ".", "date", "(", ")", "return", "_stamp", "_string", ...
42.534884
20.813953
def eigtransform(self, sequences, right=True, mode='clip'): r"""Transform a list of sequences by projecting the sequences onto the first `n_timescales` dynamical eigenvectors. Parameters ---------- sequences : list of array-like List of sequences, or a single sequence. Each sequence should be a 1D iterable of state labels. Labels can be integers, strings, or other orderable objects. right : bool Which eigenvectors to map onto. Both the left (:math:`\Phi`) and the right (:math`\Psi`) eigenvectors of the transition matrix are commonly used, and differ in their normalization. The two sets of eigenvectors are related by the stationary distribution :: \Phi_i(x) = \Psi_i(x) * \mu(x) In the MSM literature, the right vectors (default here) are approximations to the transfer operator eigenfunctions, whereas the left eigenfunction are approximations to the propagator eigenfunctions. For more details, refer to reference [1]. mode : {'clip', 'fill'} Method by which to treat labels in `sequences` which do not have a corresponding index. This can be due, for example, to the ergodic trimming step. ``clip`` Unmapped labels are removed during transform. If they occur at the beginning or end of a sequence, the resulting transformed sequence will be shorted. If they occur in the middle of a sequence, that sequence will be broken into two (or more) sequences. (Default) ``fill`` Unmapped labels will be replaced with NaN, to signal missing data. [The use of NaN to signal missing data is not fantastic, but it's consistent with current behavior of the ``pandas`` library.] Returns ------- transformed : list of 2d arrays Each element of transformed is an array of shape ``(n_samples, n_timescales)`` containing the transformed data. References ---------- .. [1] Prinz, Jan-Hendrik, et al. "Markov models of molecular kinetics: Generation and validation." J. Chem. Phys. 134.17 (2011): 174105. """ result = [] for y in self.transform(sequences, mode=mode): if right: op = self.right_eigenvectors_[:, 1:] else: op = self.left_eigenvectors_[:, 1:] is_finite = np.isfinite(y) if not np.all(is_finite): value = np.empty((y.shape[0], op.shape[1])) value[is_finite, :] = np.take(op, y[is_finite].astype(np.int), axis=0) value[~is_finite, :] = np.nan else: value = np.take(op, y, axis=0) result.append(value) return result
[ "def", "eigtransform", "(", "self", ",", "sequences", ",", "right", "=", "True", ",", "mode", "=", "'clip'", ")", ":", "result", "=", "[", "]", "for", "y", "in", "self", ".", "transform", "(", "sequences", ",", "mode", "=", "mode", ")", ":", "if", ...
41.985714
25
def p_params(self, p): 'params : params_begin param_end' p[0] = p[1] + (p[2],) p.set_lineno(0, p.lineno(1))
[ "def", "p_params", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "(", "p", "[", "2", "]", ",", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
32
8.5
def SMA(Series, N, M=1): """ 威廉SMA算法 本次修正主要是对于返回值的优化,现在的返回值会带上原先输入的索引index 2018/5/3 @yutiansut """ ret = [] i = 1 length = len(Series) # 跳过X中前面几个 nan 值 while i < length: if np.isnan(Series.iloc[i]): i += 1 else: break preY = Series.iloc[i] # Y' ret.append(preY) while i < length: Y = (M * Series.iloc[i] + (N - M) * preY) / float(N) ret.append(Y) preY = Y i += 1 return pd.Series(ret, index=Series.tail(len(ret)).index)
[ "def", "SMA", "(", "Series", ",", "N", ",", "M", "=", "1", ")", ":", "ret", "=", "[", "]", "i", "=", "1", "length", "=", "len", "(", "Series", ")", "# 跳过X中前面几个 nan 值", "while", "i", "<", "length", ":", "if", "np", ".", "isnan", "(", "Series", ...
21.16
19.48
def setDecel(self, vehID, decel): """setDecel(string, double) -> None Sets the preferred maximal deceleration in m/s^2 for this vehicle. """ self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_DECEL, vehID, decel)
[ "def", "setDecel", "(", "self", ",", "vehID", ",", "decel", ")", ":", "self", ".", "_connection", ".", "_sendDoubleCmd", "(", "tc", ".", "CMD_SET_VEHICLE_VARIABLE", ",", "tc", ".", "VAR_DECEL", ",", "vehID", ",", "decel", ")" ]
38.428571
15.571429
def Queue(self, name, initial=None, maxsize=None): """The queue datatype. :param name: The name of the queue. :keyword initial: Initial items in the queue. See :class:`redish.types.Queue`. """ return types.Queue(name, self.api, initial=initial, maxsize=maxsize)
[ "def", "Queue", "(", "self", ",", "name", ",", "initial", "=", "None", ",", "maxsize", "=", "None", ")", ":", "return", "types", ".", "Queue", "(", "name", ",", "self", ".", "api", ",", "initial", "=", "initial", ",", "maxsize", "=", "maxsize", ")"...
30.3
18.2
def add_color_scheme_stack(self, scheme_name, custom=False): """Add a stack for a given scheme and connects the CONF values.""" color_scheme_groups = [ (_('Text'), ["normal", "comment", "string", "number", "keyword", "builtin", "definition", "instance", ]), (_('Highlight'), ["currentcell", "currentline", "occurrence", "matched_p", "unmatched_p", "ctrlclick"]), (_('Background'), ["background", "sideareas"]) ] parent = self.parent line_edit = parent.create_lineedit(_("Scheme name:"), '{0}/name'.format(scheme_name)) self.widgets[scheme_name] = {} # Widget setup line_edit.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter) self.setWindowTitle(_('Color scheme editor')) # Layout name_layout = QHBoxLayout() name_layout.addWidget(line_edit.label) name_layout.addWidget(line_edit.textbox) self.scheme_name_textbox[scheme_name] = line_edit.textbox if not custom: line_edit.textbox.setDisabled(True) if not self.isVisible(): line_edit.setVisible(False) cs_layout = QVBoxLayout() cs_layout.addLayout(name_layout) h_layout = QHBoxLayout() v_layout = QVBoxLayout() for index, item in enumerate(color_scheme_groups): group_name, keys = item group_layout = QGridLayout() for row, key in enumerate(keys): option = "{0}/{1}".format(scheme_name, key) value = self.parent.get_option(option) name = syntaxhighlighters.COLOR_SCHEME_KEYS[key] if is_text_string(value): label, clayout = parent.create_coloredit( name, option, without_layout=True, ) label.setAlignment(Qt.AlignRight | Qt.AlignVCenter) group_layout.addWidget(label, row+1, 0) group_layout.addLayout(clayout, row+1, 1) # Needed to update temp scheme to obtain instant preview self.widgets[scheme_name][key] = [clayout] else: label, clayout, cb_bold, cb_italic = parent.create_scedit( name, option, without_layout=True, ) label.setAlignment(Qt.AlignRight | Qt.AlignVCenter) group_layout.addWidget(label, row+1, 0) group_layout.addLayout(clayout, row+1, 1) group_layout.addWidget(cb_bold, row+1, 2) group_layout.addWidget(cb_italic, row+1, 3) # Needed to update temp scheme to obtain instant preview self.widgets[scheme_name][key] = [clayout, cb_bold, cb_italic] group_box = QGroupBox(group_name) group_box.setLayout(group_layout) if index == 0: h_layout.addWidget(group_box) else: v_layout.addWidget(group_box) h_layout.addLayout(v_layout) cs_layout.addLayout(h_layout) stackitem = QWidget() stackitem.setLayout(cs_layout) self.stack.addWidget(stackitem) self.order.append(scheme_name)
[ "def", "add_color_scheme_stack", "(", "self", ",", "scheme_name", ",", "custom", "=", "False", ")", ":", "color_scheme_groups", "=", "[", "(", "_", "(", "'Text'", ")", ",", "[", "\"normal\"", ",", "\"comment\"", ",", "\"string\"", ",", "\"number\"", ",", "...
38.898876
19.662921
def create_primary_zone_by_upload(self, account_name, zone_name, bind_file): """Creates a new primary zone by uploading a bind file Arguments: account_name -- The name of the account that will contain this zone. zone_name -- The name of the zone. It must be unique. bind_file -- The file to upload. """ zone_properties = {"name": zone_name, "accountName": account_name, "type": "PRIMARY"} primary_zone_info = {"forceImport": True, "createType": "UPLOAD"} zone_data = {"properties": zone_properties, "primaryCreateInfo": primary_zone_info} files = {'zone': ('', json.dumps(zone_data), 'application/json'), 'file': ('file', open(bind_file, 'rb'), 'application/octet-stream')} return self.rest_api_connection.post_multi_part("/v1/zones", files)
[ "def", "create_primary_zone_by_upload", "(", "self", ",", "account_name", ",", "zone_name", ",", "bind_file", ")", ":", "zone_properties", "=", "{", "\"name\"", ":", "zone_name", ",", "\"accountName\"", ":", "account_name", ",", "\"type\"", ":", "\"PRIMARY\"", "}"...
55.666667
29.733333
def getParam(self, name=None): """ Function getParam Return a dict of parameters or a parameter value @param key: The parameter name @return RETURN: dict of parameters or a parameter value """ if 'parameters' in self.keys(): l = {x['name']: x['value'] for x in self['parameters'].values()} if name: if name in l.keys(): return l[name] else: return False else: return l
[ "def", "getParam", "(", "self", ",", "name", "=", "None", ")", ":", "if", "'parameters'", "in", "self", ".", "keys", "(", ")", ":", "l", "=", "{", "x", "[", "'name'", "]", ":", "x", "[", "'value'", "]", "for", "x", "in", "self", "[", "'paramete...
32.875
14
def filter(self, *, type_=None, lang=None, attrs={}): """ Return an iterable which produces a sequence of the elements inside this :class:`XSOList`, filtered by the criteria given as arguments. The function starts with a working sequence consisting of the whole list. If `type_` is not :data:`None`, elements which are not an instance of the given type are excluded from the working sequence. If `lang` is not :data:`None`, it must be either a :class:`~.structs.LanguageRange` or an iterable of language ranges. The set of languages present among the working sequence is determined and used for a call to :class:`~.structs.lookup_language`. If the lookup returns a language, all elements whose :attr:`lang` is different from that value are excluded from the working sequence. .. note:: If an iterable of language ranges is given, it is evaluated into a list. This may be of concern if a huge iterable is about to be used for language ranges, but it is an requirement of the :class:`~.structs.lookup_language` function which is used under the hood. .. note:: Filtering by language assumes that the elements have a :class:`~aioxmpp.xso.LangAttr` descriptor named ``lang``. If `attrs` is not empty, the filter iterates over each `key`-`value` pair. For each iteration, all elements which do not have an attribute of the name in `key` or where that attribute has a value not equal to `value` are excluded from the working sequence. In general, the iterable returned from :meth:`filter` can only be used once. It is dynamic in the sense that changes to elements which are in the list *behind* the last element returned from the iterator will still be picked up when the iterator is resumed. """ result = self if type_ is not None: result = self._filter_type(result, type_) if lang is not None: result = self._filter_lang(result, lang) if attrs: result = self._filter_attrs(result, attrs) return result
[ "def", "filter", "(", "self", ",", "*", ",", "type_", "=", "None", ",", "lang", "=", "None", ",", "attrs", "=", "{", "}", ")", ":", "result", "=", "self", "if", "type_", "is", "not", "None", ":", "result", "=", "self", ".", "_filter_type", "(", ...
45.875
27.416667
def dip_and_closest_unimodal_from_cdf(xF, yF, plotting=False, verbose=False, eps=1e-12): ''' Dip computed as distance between empirical distribution function (EDF) and cumulative distribution function for the unimodal distribution with smallest such distance. The optimal unimodal distribution is found by the algorithm presented in Hartigan (1985): Computation of the dip statistic to test for unimodaliy. Applied Statistics, vol. 34, no. 3 If the plotting option is enabled the optimal unimodal distribution function is plotted along with (xF, yF-dip) and (xF, yF+dip) xF - x-coordinates for EDF yF - y-coordinates for EDF ''' ## TODO! Preprocess xF and yF so that yF increasing and xF does ## not have more than two copies of each x-value. if (xF[1:]-xF[:-1] < -eps).any(): raise ValueError('Need sorted x-values to compute dip') if (yF[1:]-yF[:-1] < -eps).any(): raise ValueError('Need sorted y-values to compute dip') # if plotting: # Nplot = 5 # bfig = plt.figure(figsize=(12, 3)) # i = 1 # plot index D = 0 # lower bound for dip*2 # [L, U] is interval where we still need to find unimodal function, # the modal interval L = 0 U = len(xF) - 1 # iGfin are the indices of xF where the optimal unimodal distribution is greatest # convex minorant to (xF, yF+dip) # iHfin are the indices of xF where the optimal unimodal distribution is least # concave majorant to (xF, yF-dip) iGfin = L iHfin = U while 1: iGG = greatest_convex_minorant_sorted(xF[L:(U+1)], yF[L:(U+1)]) iHH = least_concave_majorant_sorted(xF[L:(U+1)], yF[L:(U+1)]) iG = np.arange(L, U+1)[iGG] iH = np.arange(L, U+1)[iHH] # Interpolate. First and last point are in both and does not need # interpolation. Might cause trouble if included due to possiblity # of infinity slope at beginning or end of interval. if iG[0] != iH[0] or iG[-1] != iH[-1]: raise ValueError('Convex minorant and concave majorant should start and end at same points.') hipl = np.interp(xF[iG[1:-1]], xF[iH], yF[iH]) gipl = np.interp(xF[iH[1:-1]], xF[iG], yF[iG]) hipl = np.hstack([yF[iH[0]], hipl, yF[iH[-1]]]) gipl = np.hstack([yF[iG[0]], gipl, yF[iG[-1]]]) #hipl = lin_interpol_sorted(xF[iG], xF[iH], yF[iH]) #gipl = lin_interpol_sorted(xF[iH], xF[iG], yF[iG]) # Find largest difference between GCM and LCM. gdiff = hipl - yF[iG] hdiff = yF[iH] - gipl imaxdiffg = np.argmax(gdiff) imaxdiffh = np.argmax(hdiff) d = max(gdiff[imaxdiffg], hdiff[imaxdiffh]) # # Plot current GCM and LCM. # if plotting: # if i > Nplot: # bfig = plt.figure(figsize=(12, 3)) # i = 1 # bax = bfig.add_subplot(1, Nplot, i) # bax.plot(xF, yF, color='red') # bax.plot(xF, yF-d/2, color='black') # bax.plot(xF, yF+d/2, color='black') # bax.plot(xF[iG], yF[iG]+d/2, color='blue') # bax.plot(xF[iH], yF[iH]-d/2, color='blue') # if d <= D: # if verbose: # print("Difference in modal interval smaller than current dip") # break # Find new modal interval so that largest difference is at endpoint # and set d to largest distance between current GCM and LCM. if gdiff[imaxdiffg] > hdiff[imaxdiffh]: L0 = iG[imaxdiffg] U0 = iH[iH >= L0][0] else: U0 = iH[imaxdiffh] L0 = iG[iG <= U0][-1] # Add points outside the modal interval to the final GCM and LCM. iGfin = np.hstack([iGfin, iG[(iG <= L0)*(iG > L)]]) iHfin = np.hstack([iH[(iH >= U0)*(iH < U)], iHfin]) # # Plot new modal interval # if plotting: # ymin, ymax = bax.get_ylim() # bax.axvline(xF[L0], ymin, ymax, color='orange') # bax.axvline(xF[U0], ymin, ymax, color='red') # bax.set_xlim(xF[L]-.1*(xF[U]-xF[L]), xF[U]+.1*(xF[U]-xF[L])) # Compute new lower bound for dip*2 # i.e. largest difference outside modal interval gipl = np.interp(xF[L:(L0+1)], xF[iG], yF[iG]) D = max(D, np.amax(yF[L:(L0+1)] - gipl)) hipl = np.interp(xF[U0:(U+1)], xF[iH], yF[iH]) D = max(D, np.amax(hipl - yF[U0:(U+1)])) if xF[U0]-xF[L0] < eps: if verbose: print("Modal interval zero length") break # if plotting: # mxpt = np.argmax(yF[L:(L0+1)] - gipl) # bax.plot([xF[L:][mxpt], xF[L:][mxpt]], [yF[L:][mxpt]+d/2, # gipl[mxpt]+d/2], '+', color='red') # mxpt = np.argmax(hipl - yF[U0:(U+1)]) # bax.plot([xF[U0:][mxpt], xF[U0:][mxpt]], [yF[U0:][mxpt]-d/2, # hipl[mxpt]-d/2], '+', color='red') # i += 1 # Change modal interval L = L0 U = U0 if d <= D: if verbose: print("Difference in modal interval smaller than new dip") break # if plotting: # # Add modal interval to figure # bax.axvline(xF[L0], ymin, ymax, color='green', linestyle='dashed') # bax.axvline(xF[U0], ymin, ymax, color='green', linestyle='dashed') # ## Plot unimodal function (not distribution function) # bfig = plt.figure() # bax = bfig.add_subplot(1, 1, 1) # bax.plot(xF, yF, color='red') # bax.plot(xF, yF-D/2, color='black') # bax.plot(xF, yF+D/2, color='black') # Find string position in modal interval iM = np.arange(iGfin[-1], iHfin[0]+1) yM_lower = yF[iM]-D/2 yM_lower[0] = yF[iM[0]]+D/2 iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower) iM_concave = iM[iMM_concave] #bax.plot(xF[iM], yM_lower, color='orange') #bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red') lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave]) try: mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]] #bax.axvline(xF[mode], color='green', linestyle='dashed') except IndexError: iM_convex = np.zeros(0, dtype='i') else: after_mode = iM_concave > mode iM_concave = iM_concave[after_mode] iMM_concave = iMM_concave[after_mode] iM = iM[iM <= mode] iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])] # if plotting: # bax.plot(xF[np.hstack([iGfin, iM_convex, iM_concave, iHfin])], # np.hstack([yF[iGfin] + D/2, yF[iM_convex] + D/2, # yM_lower[iMM_concave], yF[iHfin] - D/2]), color='blue') # #bax.plot(xF[iM], yM_lower, color='orange') # ## Plot unimodal distribution function # bfig = plt.figure() # bax = bfig.add_subplot(1, 1, 1) # bax.plot(xF, yF, color='red') # bax.plot(xF, yF-D/2, color='black') # bax.plot(xF, yF+D/2, color='black') # Find string position in modal interval iM = np.arange(iGfin[-1], iHfin[0]+1) yM_lower = yF[iM]-D/2 yM_lower[0] = yF[iM[0]]+D/2 iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower) iM_concave = iM[iMM_concave] #bax.plot(xF[iM], yM_lower, color='orange') #bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red') lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave]) try: mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]] #bax.axvline(xF[mode], color='green', linestyle='dashed') except IndexError: iM_convex = np.zeros(0, dtype='i') else: after_mode = iM_concave > mode iM_concave = iM_concave[after_mode] iMM_concave = iMM_concave[after_mode] iM = iM[iM <= mode] iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])] # Closest unimodal curve xU = xF[np.hstack([iGfin[:-1], iM_convex, iM_concave, iHfin[1:]])] yU = np.hstack([yF[iGfin[:-1]] + D/2, yF[iM_convex] + D/2, yM_lower[iMM_concave], yF[iHfin[1:]] - D/2]) # Add points so unimodal curve goes from 0 to 1 k_start = (yU[1]-yU[0])/(xU[1]-xU[0]+1e-5) xU_start = xU[0] - yU[0]/(k_start+1e-5) k_end = (yU[-1]-yU[-2])/(xU[-1]-xU[-2]+1e-5) xU_end = xU[-1] + (1-yU[-1])/(k_end+1e-5) xU = np.hstack([xU_start, xU, xU_end]) yU = np.hstack([0, yU, 1]) # if plotting: # bax.plot(xU, yU, color='blue') # #bax.plot(xF[iM], yM_lower, color='orange') # plt.show() return D/2, (xU, yU)
[ "def", "dip_and_closest_unimodal_from_cdf", "(", "xF", ",", "yF", ",", "plotting", "=", "False", ",", "verbose", "=", "False", ",", "eps", "=", "1e-12", ")", ":", "## TODO! Preprocess xF and yF so that yF increasing and xF does", "## not have more than two copies of each x-...
38.133929
20.5
def mean_cl_boot(series, n_samples=1000, confidence_interval=0.95, random_state=None): """ Bootstrapped mean with confidence limits """ return bootstrap_statistics(series, np.mean, n_samples=n_samples, confidence_interval=confidence_interval, random_state=random_state)
[ "def", "mean_cl_boot", "(", "series", ",", "n_samples", "=", "1000", ",", "confidence_interval", "=", "0.95", ",", "random_state", "=", "None", ")", ":", "return", "bootstrap_statistics", "(", "series", ",", "np", ".", "mean", ",", "n_samples", "=", "n_sampl...
43.333333
11.555556
def _process_coref_span_annotations_for_word(label: str, word_index: int, clusters: DefaultDict[int, List[Tuple[int, int]]], coref_stacks: DefaultDict[int, List[int]]) -> None: """ For a given coref label, add it to a currently open span(s), complete a span(s) or ignore it, if it is outside of all spans. This method mutates the clusters and coref_stacks dictionaries. Parameters ---------- label : ``str`` The coref label for this word. word_index : ``int`` The word index into the sentence. clusters : ``DefaultDict[int, List[Tuple[int, int]]]`` A dictionary mapping cluster ids to lists of inclusive spans into the sentence. coref_stacks: ``DefaultDict[int, List[int]]`` Stacks for each cluster id to hold the start indices of active spans (spans which we are inside of when processing a given word). Spans with the same id can be nested, which is why we collect these opening spans on a stack, e.g: [Greg, the baker who referred to [himself]_ID1 as 'the bread man']_ID1 """ if label != "-": for segment in label.split("|"): # The conll representation of coref spans allows spans to # overlap. If spans end or begin at the same word, they are # separated by a "|". if segment[0] == "(": # The span begins at this word. if segment[-1] == ")": # The span begins and ends at this word (single word span). cluster_id = int(segment[1:-1]) clusters[cluster_id].append((word_index, word_index)) else: # The span is starting, so we record the index of the word. cluster_id = int(segment[1:]) coref_stacks[cluster_id].append(word_index) else: # The span for this id is ending, but didn't start at this word. # Retrieve the start index from the document state and # add the span to the clusters for this id. cluster_id = int(segment[:-1]) start = coref_stacks[cluster_id].pop() clusters[cluster_id].append((start, word_index))
[ "def", "_process_coref_span_annotations_for_word", "(", "label", ":", "str", ",", "word_index", ":", "int", ",", "clusters", ":", "DefaultDict", "[", "int", ",", "List", "[", "Tuple", "[", "int", ",", "int", "]", "]", "]", ",", "coref_stacks", ":", "Defaul...
53.851064
25.595745
def get_log_entry_form_for_create(self, log_entry_record_types): """Gets the log entry form for creating new log entries. A new form should be requested for each create transaction. arg: log_entry_record_types (osid.type.Type[]): array of log entry record types return: (osid.logging.LogEntryForm) - the log entry form raise: NullArgument - ``log_entry_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.get_resource_form_for_create_template for arg in log_entry_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if log_entry_record_types == []: obj_form = objects.LogEntryForm( log_id=self._catalog_id, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) else: obj_form = objects.LogEntryForm( log_id=self._catalog_id, record_types=log_entry_record_types, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
[ "def", "get_log_entry_form_for_create", "(", "self", ",", "log_entry_record_types", ")", ":", "# Implemented from template for", "# osid.resource.ResourceAdminSession.get_resource_form_for_create_template", "for", "arg", "in", "log_entry_record_types", ":", "if", "not", "isinstance...
46.416667
18
async def sqsStats(self, *args, **kwargs): """ Statistics on the sqs queues This method is only for debugging the ec2-manager This method is ``experimental`` """ return await self._makeApiCall(self.funcinfo["sqsStats"], *args, **kwargs)
[ "async", "def", "sqsStats", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "await", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"sqsStats\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
27.8
18.6
def _loc(self, pos, idx): """Convert an index pair (alpha, beta) into a single index that corresponds to the position of the value in the sorted list. Most queries require the index be built. Details of the index are described in self._build_index. Indexing requires traversing the tree from a leaf node to the root. The parent of each node is easily computable at (pos - 1) // 2. Left-child nodes are always at odd indices and right-child nodes are always at even indices. When traversing up from a right-child node, increment the total by the left-child node. The final index is the sum from traversal and the index in the sublist. For example, using the index from self._build_index: _index = 14 5 9 3 2 4 5 _offset = 3 Tree: 14 5 9 3 2 4 5 Converting index pair (2, 3) into a single index involves iterating like so: 1. Starting at the leaf node: offset + alpha = 3 + 2 = 5. We identify the node as a left-child node. At such nodes, we simply traverse to the parent. 2. At node 9, position 2, we recognize the node as a right-child node and accumulate the left-child in our total. Total is now 5 and we traverse to the parent at position 0. 3. Iteration ends at the root. Computing the index is the sum of the total and beta: 5 + 3 = 8. """ if not pos: return idx _index = self._index if not len(_index): self._build_index() total = 0 # Increment pos to point in the index to len(self._lists[pos]). pos += self._offset # Iterate until reaching the root of the index tree at pos = 0. while pos: # Right-child nodes are at odd indices. At such indices # account the total below the left child node. if not (pos & 1): total += _index[pos - 1] # Advance pos to the parent node. pos = (pos - 1) >> 1 return total + idx
[ "def", "_loc", "(", "self", ",", "pos", ",", "idx", ")", ":", "if", "not", "pos", ":", "return", "idx", "_index", "=", "self", ".", "_index", "if", "not", "len", "(", "_index", ")", ":", "self", ".", "_build_index", "(", ")", "total", "=", "0", ...
28.986301
27.273973
def _ParseFileEntryWithParser( self, parser_mediator, parser, file_entry, file_object=None): """Parses a file entry with a specific parser. Args: parser_mediator (ParserMediator): parser mediator. parser (BaseParser): parser. file_entry (dfvfs.FileEntry): file entry. file_object (Optional[file]): file-like object to parse. If not set the parser will use the parser mediator to open the file entry's default data stream as a file-like object. Returns: int: parse result which is _PARSE_RESULT_FAILURE if the file entry could not be parsed, _PARSE_RESULT_SUCCESS if the file entry successfully was parsed or _PARSE_RESULT_UNSUPPORTED when UnableToParseFile was raised. Raises: TypeError: if parser object is not a supported parser type. """ if not isinstance(parser, ( parsers_interface.FileEntryParser, parsers_interface.FileObjectParser)): raise TypeError('Unsupported parser object type.') parser_mediator.ClearParserChain() reference_count = ( parser_mediator.resolver_context.GetFileObjectReferenceCount( file_entry.path_spec)) parser_mediator.SampleStartTiming(parser.NAME) try: if isinstance(parser, parsers_interface.FileEntryParser): parser.Parse(parser_mediator) elif isinstance(parser, parsers_interface.FileObjectParser): parser.Parse(parser_mediator, file_object) result = self._PARSE_RESULT_SUCCESS # We catch IOError so we can determine the parser that generated the error. except (IOError, dfvfs_errors.BackEndError) as exception: display_name = parser_mediator.GetDisplayName(file_entry) logger.warning( '{0:s} unable to parse file: {1:s} with error: {2!s}'.format( parser.NAME, display_name, exception)) result = self._PARSE_RESULT_FAILURE except errors.UnableToParseFile as exception: display_name = parser_mediator.GetDisplayName(file_entry) logger.debug( '{0:s} unable to parse file: {1:s} with error: {2!s}'.format( parser.NAME, display_name, exception)) result = self._PARSE_RESULT_UNSUPPORTED finally: parser_mediator.SampleStopTiming(parser.NAME) parser_mediator.SampleMemoryUsage(parser.NAME) new_reference_count = ( parser_mediator.resolver_context.GetFileObjectReferenceCount( file_entry.path_spec)) if reference_count != new_reference_count: display_name = parser_mediator.GetDisplayName(file_entry) logger.warning(( '[{0:s}] did not explicitly close file-object for file: ' '{1:s}.').format(parser.NAME, display_name)) return result
[ "def", "_ParseFileEntryWithParser", "(", "self", ",", "parser_mediator", ",", "parser", ",", "file_entry", ",", "file_object", "=", "None", ")", ":", "if", "not", "isinstance", "(", "parser", ",", "(", "parsers_interface", ".", "FileEntryParser", ",", "parsers_i...
39.101449
21.927536
def send_chat_action(self, chat_id, action): """ Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status). :param chat_id: :param action: One of the following strings: 'typing', 'upload_photo', 'record_video', 'upload_video', 'record_audio', 'upload_audio', 'upload_document', 'find_location', 'record_video_note', 'upload_video_note'. :return: API reply. :type: boolean """ return apihelper.send_chat_action(self.token, chat_id, action)
[ "def", "send_chat_action", "(", "self", ",", "chat_id", ",", "action", ")", ":", "return", "apihelper", ".", "send_chat_action", "(", "self", ".", "token", ",", "chat_id", ",", "action", ")" ]
62
32.727273
def parse_radix(self, radix, chars, stream): """ BinaryNum ::= [+-]? '2' RadixSymbol [0-1]+ RadixSymbol OctalChar ::= [+-]? '8' RadixSymbol [0-7]+ RadixSymbol HexadecimalNum ::= [+-]? '16' RadixSymbol [0-9a-zA-Z]+ RadixSymbol """ value = b'' sign = self.parse_sign(stream) self.expect(stream, b(str(radix)) + self.radix_symbole) sign *= self.parse_sign(stream) while not self.has_next(self.radix_symbole, stream): next = stream.read(1) if not next: self.raise_unexpected_eof(stream) if next not in chars: self.raise_unexpected(stream, next) value += next if not value: self.raise_unexpected(stream, self.radix_symbole) self.expect(stream, self.radix_symbole) return sign * int(value, radix)
[ "def", "parse_radix", "(", "self", ",", "radix", ",", "chars", ",", "stream", ")", ":", "value", "=", "b''", "sign", "=", "self", ".", "parse_sign", "(", "stream", ")", "self", ".", "expect", "(", "stream", ",", "b", "(", "str", "(", "radix", ")", ...
33.307692
17.769231
async def install_sandboxed_update(filename, loop): """ Create a virtual environment and activate it, and then install an update candidate (leaves virtual environment activated) :return: a result dict and the path to python in the virtual environment """ log.debug("Creating virtual environment") venv_dir, python, venv_site_pkgs\ = await create_virtual_environment(loop=loop) log.debug("Installing update server into virtual environment") out, err, returncode = await _install(python, filename, loop) if err or returncode != 0: log.error("Install failed: {}".format(err)) res = {'status': 'failure', 'message': err} else: log.debug("Install successful") res = {'status': 'success'} return res, python, venv_site_pkgs, venv_dir
[ "async", "def", "install_sandboxed_update", "(", "filename", ",", "loop", ")", ":", "log", ".", "debug", "(", "\"Creating virtual environment\"", ")", "venv_dir", ",", "python", ",", "venv_site_pkgs", "=", "await", "create_virtual_environment", "(", "loop", "=", "...
42
14.947368
def base(self, *paths, **query_kwargs): """create a new url object using the current base path as a base if you had requested /foo/bar, then this would append *paths and **query_kwargs to /foo/bar :example: # current path: /foo/bar print url # http://host.com/foo/bar print url.base() # http://host.com/foo/bar print url.base("che", boom="bam") # http://host/foo/bar/che?boom=bam :param *paths: list, the paths to append to the current path without query params :param **query_kwargs: dict, any query string params to add """ kwargs = self._normalize_params(*paths, **query_kwargs) if self.path: if "path" in kwargs: paths = self.normalize_paths(self.path, kwargs["path"]) kwargs["path"] = "/".join(paths) else: kwargs["path"] = self.path return self.create(self.root, **kwargs)
[ "def", "base", "(", "self", ",", "*", "paths", ",", "*", "*", "query_kwargs", ")", ":", "kwargs", "=", "self", ".", "_normalize_params", "(", "*", "paths", ",", "*", "*", "query_kwargs", ")", "if", "self", ".", "path", ":", "if", "\"path\"", "in", ...
38.36
22.16
def findBinomialNsWithLowerBoundSampleMinimum(confidence, desiredValuesSorted, p, numSamples, nMax): """ For each desired value, find an approximate n for which the sample minimum has a probabilistic lower bound equal to this value. For each value, find an adjacent pair of n values whose lower bound sample minima are below and above the desired value, respectively, and return a linearly-interpolated n between these two values. @param confidence (float) For the probabilistic lower bound, this specifies the probability. If this is 0.8, that means that there's an 80% chance that the sample minimum is >= the desired value, and 20% chance that it's < the desired value. @param p (float) The p if the binomial distribution. @param numSamples (int) The number of samples in the sample minimum distribution. @return A list of results. Each result contains (interpolated_n, lower_value, upper_value). where each lower_value and upper_value are the probabilistic lower bound sample minimum for floor(interpolated_n) and ceil(interpolated_n) respectively. ...] """ def P(n, numOccurrences): """ Given n, return probability than the sample minimum is >= numOccurrences """ return 1 - SampleMinimumDistribution(numSamples, BinomialDistribution(n, p)).cdf( numOccurrences - 1) results = [] n = 0 for desiredValue in desiredValuesSorted: while n + 1 <= nMax and P(n + 1, desiredValue) < confidence: n += 1 if n + 1 > nMax: break left = P(n, desiredValue) right = P(n + 1, desiredValue) interpolated = n + ((confidence - left) / (right - left)) result = (interpolated, left, right) results.append(result) return results
[ "def", "findBinomialNsWithLowerBoundSampleMinimum", "(", "confidence", ",", "desiredValuesSorted", ",", "p", ",", "numSamples", ",", "nMax", ")", ":", "def", "P", "(", "n", ",", "numOccurrences", ")", ":", "\"\"\"\n Given n, return probability than the sample minimum i...
30.824561
24.157895
def __register(self, client_id, client_secret, email, scope, first_name, last_name, original_ip, original_device, **kwargs): """Call documentation: `/user/register <https://www.wepay.com/developer/reference/user#register>`_, plus extra keyword parameter: :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` .. note :: This call is NOT supported by API versions older then '2014-01-08'. """ params = { 'client_id': client_id, 'client_secret': client_secret, 'email': email, 'scope': scope, 'first_name': first_name, 'last_name': last_name, 'original_ip': original_ip, 'original_device': original_device } return self.make_call(self.__register, params, kwargs)
[ "def", "__register", "(", "self", ",", "client_id", ",", "client_secret", ",", "email", ",", "scope", ",", "first_name", ",", "last_name", ",", "original_ip", ",", "original_device", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'client_id'", ":"...
35.516129
19.548387
def neighbours(self, healpix_index): """ Find all the HEALPix pixels that are the neighbours of a HEALPix pixel Parameters ---------- healpix_index : `~numpy.ndarray` Array of HEALPix pixels Returns ------- neigh : `~numpy.ndarray` Array giving the neighbours starting SW and rotating clockwise. This has one extra dimension compared to ``healpix_index`` - the first dimension - which is set to 8. For example if healpix_index has shape (2, 3), ``neigh`` has shape (8, 2, 3). """ return neighbours(healpix_index, self.nside, order=self.order)
[ "def", "neighbours", "(", "self", ",", "healpix_index", ")", ":", "return", "neighbours", "(", "healpix_index", ",", "self", ".", "nside", ",", "order", "=", "self", ".", "order", ")" ]
37.055556
21.5
def from_schemafile(cls, schemafile): """Create a Flatson instance from a schemafile """ with open(schemafile) as f: return cls(json.load(f))
[ "def", "from_schemafile", "(", "cls", ",", "schemafile", ")", ":", "with", "open", "(", "schemafile", ")", "as", "f", ":", "return", "cls", "(", "json", ".", "load", "(", "f", ")", ")" ]
34.6
2.4
def get_object(cls, api_token, id): """ Class method that will return a LoadBalancer object by its ID. Args: api_token (str): DigitalOcean API token id (str): Load Balancer ID """ load_balancer = cls(token=api_token, id=id) load_balancer.load() return load_balancer
[ "def", "get_object", "(", "cls", ",", "api_token", ",", "id", ")", ":", "load_balancer", "=", "cls", "(", "token", "=", "api_token", ",", "id", "=", "id", ")", "load_balancer", ".", "load", "(", ")", "return", "load_balancer" ]
30.545455
13.636364
def _dens(self,R,z,phi=0.,t=0.): """ NAME: _dens PURPOSE: evaluate the density for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the surface density HISTORY: 2018-08-19 - Written - Bovy (UofT) """ r2= R**2+z**2 if r2 != self.a2: return 0. else: # pragma: no cover return nu.infty
[ "def", "_dens", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "r2", "=", "R", "**", "2", "+", "z", "**", "2", "if", "r2", "!=", "self", ".", "a2", ":", "return", "0.", "else", ":", "# pragma: no cove...
24.571429
14.857143
def is_disabled(self, name): """Check if a given service name is disabled """ if self.services and name in self.services: return self.services[name]['config'] == 'disabled' return False
[ "def", "is_disabled", "(", "self", ",", "name", ")", ":", "if", "self", ".", "services", "and", "name", "in", "self", ".", "services", ":", "return", "self", ".", "services", "[", "name", "]", "[", "'config'", "]", "==", "'disabled'", "return", "False"...
43.4
13
def _function_handler(function, args, kwargs, pipe): """Runs the actual function in separate process and returns its result.""" signal.signal(signal.SIGINT, signal.SIG_IGN) result = process_execute(function, *args, **kwargs) send_result(pipe, result)
[ "def", "_function_handler", "(", "function", ",", "args", ",", "kwargs", ",", "pipe", ")", ":", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_IGN", ")", "result", "=", "process_execute", "(", "function", ",", "*", "args"...
37.428571
18
def get_shard_stats(self): """ :return: get stats for this mongodb shard """ return requests.get(self._stats_url, params={'include_stats': True}, headers={'X-Auth-Token': self._client.auth._token} ).json()['data']['stats']
[ "def", "get_shard_stats", "(", "self", ")", ":", "return", "requests", ".", "get", "(", "self", ".", "_stats_url", ",", "params", "=", "{", "'include_stats'", ":", "True", "}", ",", "headers", "=", "{", "'X-Auth-Token'", ":", "self", ".", "_client", ".",...
43.428571
15.714286
def __fetch_issue_attachments(self, issue_id): """Get attachments of an issue""" for attachments_raw in self.client.issue_collection(issue_id, "attachments"): attachments = json.loads(attachments_raw) for attachment in attachments['entries']: yield attachment
[ "def", "__fetch_issue_attachments", "(", "self", ",", "issue_id", ")", ":", "for", "attachments_raw", "in", "self", ".", "client", ".", "issue_collection", "(", "issue_id", ",", "\"attachments\"", ")", ":", "attachments", "=", "json", ".", "loads", "(", "attac...
38.75
20.625
def callback_save_indices(): '''Save index from bokeh textinput''' import datetime import os import pylleo import yamlord if datadirs_select.value != 'None': path_dir = os.path.join(parent_input.value, datadirs_select.value) cal_yaml_path = os.path.join(path_dir, 'cal.yml') param = (param_select.value).lower().replace('-','_') region = region_select.value start = int(start_input.value) end = int(end_input.value) msg = ''' Updated calibration times for:<br> <b>{}/{}</b> <br> <br> star index: {}<br> end index: {}<br> '''.format(param, region, start, end) output_window.text = output_template.format(msg) cal_dict = pylleo.lleocal.read_cal(cal_yaml_path) # Generalize for Class-ifying cal_dict = pylleo.lleocal.update(data, cal_dict, param, region, start, end) yamlord.write_yaml(cal_dict, cal_yaml_path) else: msg = ''' You must first load data and select indices for calibration regions before you can save the indices to `cal.yml` ''' output_window.text = output_template.format(msg) return None
[ "def", "callback_save_indices", "(", ")", ":", "import", "datetime", "import", "os", "import", "pylleo", "import", "yamlord", "if", "datadirs_select", ".", "value", "!=", "'None'", ":", "path_dir", "=", "os", ".", "path", ".", "join", "(", "parent_input", "....
33.052632
20.315789
def show_md5_view(md5): '''Renders template with `stream_sample` of the md5.''' if not WORKBENCH: return flask.redirect('/') md5_view = WORKBENCH.stream_sample(md5) return flask.render_template('templates/md5_view.html', md5_view=list(md5_view), md5=md5)
[ "def", "show_md5_view", "(", "md5", ")", ":", "if", "not", "WORKBENCH", ":", "return", "flask", ".", "redirect", "(", "'/'", ")", "md5_view", "=", "WORKBENCH", ".", "stream_sample", "(", "md5", ")", "return", "flask", ".", "render_template", "(", "'templat...
34.125
24.625
def is_naive_prime(self): """Checks if prime in very naive way :return: True iff prime """ if self.to_int < 2: return False elif self.to_int % 2 == 0: return False return self.to_int in LOW_PRIMES
[ "def", "is_naive_prime", "(", "self", ")", ":", "if", "self", ".", "to_int", "<", "2", ":", "return", "False", "elif", "self", ".", "to_int", "%", "2", "==", "0", ":", "return", "False", "return", "self", ".", "to_int", "in", "LOW_PRIMES" ]
26
11.5
async def _download_photo(self, photo, file, date, thumb, progress_callback): """Specialized version of .download_media() for photos""" # Determine the photo and its largest size if isinstance(photo, types.MessageMediaPhoto): photo = photo.photo if not isinstance(photo, types.Photo): return size = self._get_thumb(photo.sizes, thumb) if not size or isinstance(size, types.PhotoSizeEmpty): return file = self._get_proper_filename(file, 'photo', '.jpg', date=date) if isinstance(size, (types.PhotoCachedSize, types.PhotoStrippedSize)): return self._download_cached_photo_size(size, file) result = await self.download_file( types.InputPhotoFileLocation( id=photo.id, access_hash=photo.access_hash, file_reference=photo.file_reference, thumb_size=size.type ), file, file_size=size.size, progress_callback=progress_callback ) return result if file is bytes else file
[ "async", "def", "_download_photo", "(", "self", ",", "photo", ",", "file", ",", "date", ",", "thumb", ",", "progress_callback", ")", ":", "# Determine the photo and its largest size", "if", "isinstance", "(", "photo", ",", "types", ".", "MessageMediaPhoto", ")", ...
39.214286
18.107143
def _register_routes(self, methods): """ _register_routes """ # setup routes by decorator methods = [(n, v) for (n, v) in methods if v.__name__ == "wrapper"] methods = sorted(methods, key=lambda x: x[1]._order) for name, value in methods: value() # execute setting route return methods
[ "def", "_register_routes", "(", "self", ",", "methods", ")", ":", "# setup routes by decorator", "methods", "=", "[", "(", "n", ",", "v", ")", "for", "(", "n", ",", "v", ")", "in", "methods", "if", "v", ".", "__name__", "==", "\"wrapper\"", "]", "metho...
32.090909
13.363636
def funcGauss1D(x, mu, sig): """ Create 1D Gaussian. Source: http://mathworld.wolfram.com/GaussianFunction.html """ arrOut = np.exp(-np.power((x - mu)/sig, 2.)/2) # normalize arrOut = arrOut/(np.sqrt(2.*np.pi)*sig) return arrOut
[ "def", "funcGauss1D", "(", "x", ",", "mu", ",", "sig", ")", ":", "arrOut", "=", "np", ".", "exp", "(", "-", "np", ".", "power", "(", "(", "x", "-", "mu", ")", "/", "sig", ",", "2.", ")", "/", "2", ")", "# normalize", "arrOut", "=", "arrOut", ...
27.666667
14.111111
def newNsPropEatName(self, ns, name, value): """Create a new property tagged with a namespace and carried by a node. """ if ns is None: ns__o = None else: ns__o = ns._o ret = libxml2mod.xmlNewNsPropEatName(self._o, ns__o, name, value) if ret is None:raise treeError('xmlNewNsPropEatName() failed') __tmp = xmlAttr(_obj=ret) return __tmp
[ "def", "newNsPropEatName", "(", "self", ",", "ns", ",", "name", ",", "value", ")", ":", "if", "ns", "is", "None", ":", "ns__o", "=", "None", "else", ":", "ns__o", "=", "ns", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlNewNsPropEatName", "(", "self"...
43.888889
12.444444
def read_bonedata(self, fid): """Read bone data from an acclaim skeleton file stream.""" bone_count = 0 lin = self.read_line(fid) while lin[0]!=':': parts = lin.split() if parts[0] == 'begin': bone_count += 1 self.vertices.append(vertex(name = '', id=np.NaN, meta={'name': [], 'id': [], 'offset': [], 'orientation': [], 'axis': [0., 0., 0.], 'axis_order': [], 'C': np.eye(3), 'Cinv': np.eye(3), 'channels': [], 'bodymass': [], 'confmass': [], 'order': [], 'rot_ind': [], 'pos_ind': [], 'limits': [], 'xyz': np.array([0., 0., 0.]), 'rot': np.eye(3)})) lin = self.read_line(fid) elif parts[0]=='id': self.vertices[bone_count].id = int(parts[1]) lin = self.read_line(fid) self.vertices[bone_count].children = [] elif parts[0]=='name': self.vertices[bone_count].name = parts[1] lin = self.read_line(fid) elif parts[0]=='direction': direction = np.array([float(parts[1]), float(parts[2]), float(parts[3])]) lin = self.read_line(fid) elif parts[0]=='length': lgth = float(parts[1]) lin = self.read_line(fid) elif parts[0]=='axis': self.vertices[bone_count].meta['axis'] = np.array([float(parts[1]), float(parts[2]), float(parts[3])]) # order is reversed compared to bvh self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1].lower() lin = self.read_line(fid) elif parts[0]=='dof': order = [] for i in range(1, len(parts)): if parts[i]== 'rx': chan = 'Xrotation' order.append('x') elif parts[i] =='ry': chan = 'Yrotation' order.append('y') elif parts[i] == 'rz': chan = 'Zrotation' order.append('z') elif parts[i] == 'tx': chan = 'Xposition' elif parts[i] == 'ty': chan = 'Yposition' elif parts[i] == 'tz': chan = 'Zposition' elif parts[i] == 'l': chan = 'length' self.vertices[bone_count].meta['channels'].append(chan) # order is reversed compared to bvh self.vertices[bone_count].meta['order'] = order[::-1] lin = self.read_line(fid) elif parts[0]=='limits': self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]), float(parts[2][:-1])]] lin = self.read_line(fid) while lin !='end': parts = lin.split() self.vertices[bone_count].meta['limits'].append([float(parts[0][1:]), float(parts[1][:-1])]) lin = self.read_line(fid) self.vertices[bone_count].meta['limits'] = np.array(self.vertices[bone_count].meta['limits']) elif parts[0]=='end': self.vertices[bone_count].meta['offset'] = direction*lgth lin = self.read_line(fid) return lin
[ "def", "read_bonedata", "(", "self", ",", "fid", ")", ":", "bone_count", "=", "0", "lin", "=", "self", ".", "read_line", "(", "fid", ")", "while", "lin", "[", "0", "]", "!=", "':'", ":", "parts", "=", "lin", ".", "split", "(", ")", "if", "parts",...
41.663366
18.871287
def decorate(func, caller): """ decorate(func, caller) decorates a function using a caller. """ evaldict = dict(_call_=caller, _func_=func) fun = FunctionMaker.create( func, "return _call_(_func_, %(shortsignature)s)", evaldict, __wrapped__=func) if hasattr(func, '__qualname__'): fun.__qualname__ = func.__qualname__ return fun
[ "def", "decorate", "(", "func", ",", "caller", ")", ":", "evaldict", "=", "dict", "(", "_call_", "=", "caller", ",", "_func_", "=", "func", ")", "fun", "=", "FunctionMaker", ".", "create", "(", "func", ",", "\"return _call_(_func_, %(shortsignature)s)\"", ",...
33.636364
9.818182
def buffer_write(library, session, data): """Writes data to a formatted I/O write buffer synchronously. Corresponds to viBufWrite function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param data: data to be written. :type data: bytes :return: number of written bytes, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` """ return_count = ViUInt32() # [ViSession, ViBuf, ViUInt32, ViPUInt32] ret = library.viBufWrite(session, data, len(data), byref(return_count)) return return_count.value, ret
[ "def", "buffer_write", "(", "library", ",", "session", ",", "data", ")", ":", "return_count", "=", "ViUInt32", "(", ")", "# [ViSession, ViBuf, ViUInt32, ViPUInt32]", "ret", "=", "library", ".", "viBufWrite", "(", "session", ",", "data", ",", "len", "(", "data"...
38.235294
17.529412
def count_function(func): """ Decorator for functions that return a collection (technically a dict of collections) that should be counted up. Also automatically falls back to the Cohort-default filter_fn and normalized_per_mb if not specified. """ # Fall back to Cohort-level defaults. @use_defaults @wraps(func) def wrapper(row, cohort, filter_fn=None, normalized_per_mb=None, **kwargs): per_patient_data = func(row=row, cohort=cohort, filter_fn=filter_fn, normalized_per_mb=normalized_per_mb, **kwargs) patient_id = row["patient_id"] if patient_id in per_patient_data: count = len(per_patient_data[patient_id]) if normalized_per_mb: count /= float(get_patient_to_mb(cohort)[patient_id]) return count return np.nan return wrapper
[ "def", "count_function", "(", "func", ")", ":", "# Fall back to Cohort-level defaults.", "@", "use_defaults", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "row", ",", "cohort", ",", "filter_fn", "=", "None", ",", "normalized_per_mb", "=", "None", ","...
41.73913
17.652174
def pick(self, starting_node=None): """ Pick a node on the graph based on the links in a starting node. Additionally, set ``self.current_node`` to the newly picked node. * if ``starting_node`` is specified, start from there * if ``starting_node`` is ``None``, start from ``self.current_node`` * if ``starting_node`` is ``None`` and ``self.current_node`` is ``None``, pick a uniformally random node in ``self.node_list`` Args: starting_node (Node): ``Node`` to pick from. Returns: Node Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_1, 5) >>> node_1.add_link(node_2, 2) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> [graph.pick().get_value() for i in range(5)] # doctest: +SKIP ['One', 'One', 'Two', 'One', 'One'] """ if starting_node is None: if self.current_node is None: random_node = random.choice(self.node_list) self.current_node = random_node return random_node else: starting_node = self.current_node # Use weighted_choice on start_node.link_list self.current_node = weighted_choice( [(link.target, link.weight) for link in starting_node.link_list]) return self.current_node
[ "def", "pick", "(", "self", ",", "starting_node", "=", "None", ")", ":", "if", "starting_node", "is", "None", ":", "if", "self", ".", "current_node", "is", "None", ":", "random_node", "=", "random", ".", "choice", "(", "self", ".", "node_list", ")", "s...
39.289474
18.026316
def walletpassphrase(self, passphrase, timeout=99999999, mint_only=True): """used to unlock wallet for minting""" return self.req("walletpassphrase", [passphrase, timeout, mint_only])
[ "def", "walletpassphrase", "(", "self", ",", "passphrase", ",", "timeout", "=", "99999999", ",", "mint_only", "=", "True", ")", ":", "return", "self", ".", "req", "(", "\"walletpassphrase\"", ",", "[", "passphrase", ",", "timeout", ",", "mint_only", "]", "...
65.666667
23.333333
def try_mongodb_opts(self, host="localhost", database_name='INGInious'): """ Try MongoDB configuration """ try: mongo_client = MongoClient(host=host) except Exception as e: self._display_warning("Cannot connect to MongoDB on host %s: %s" % (host, str(e))) return None try: database = mongo_client[database_name] except Exception as e: self._display_warning("Cannot access database %s: %s" % (database_name, str(e))) return None try: GridFS(database) except Exception as e: self._display_warning("Cannot access gridfs %s: %s" % (database_name, str(e))) return None return database
[ "def", "try_mongodb_opts", "(", "self", ",", "host", "=", "\"localhost\"", ",", "database_name", "=", "'INGInious'", ")", ":", "try", ":", "mongo_client", "=", "MongoClient", "(", "host", "=", "host", ")", "except", "Exception", "as", "e", ":", "self", "."...
34.952381
24.809524
def dirs(self, *args, **kwargs): """ D.dirs() -> List of this directory's subdirectories. The elements of the list are Path objects. This does not walk recursively into subdirectories (but see :meth:`walkdirs`). Accepts parameters to :meth:`listdir`. """ return [p for p in self.listdir(*args, **kwargs) if p.isdir()]
[ "def", "dirs", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "[", "p", "for", "p", "in", "self", ".", "listdir", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "p", ".", "isdir", "(", ")", "]" ]
36.6
15.7
def get_resource_retriever(url): """ Get the appropriate retriever object for the specified url based on url scheme. Makes assumption that HTTP urls do not require any special authorization. For HTTP urls: returns HTTPResourceRetriever For s3:// urls returns S3ResourceRetriever :param url: url of the resource to be retrieved :return: ResourceRetriever object """ if url.startswith('http://') or url.startswith('https://'): return HttpResourceRetriever(url) else: raise ValueError('Unsupported scheme in url: %s' % url)
[ "def", "get_resource_retriever", "(", "url", ")", ":", "if", "url", ".", "startswith", "(", "'http://'", ")", "or", "url", ".", "startswith", "(", "'https://'", ")", ":", "return", "HttpResourceRetriever", "(", "url", ")", "else", ":", "raise", "ValueError",...
35.25
19.625
def is_comment_deleted(comid): """ Return True of the comment is deleted. Else False :param comid: ID of comment to check """ query = """SELECT status from "cmtRECORDCOMMENT" WHERE id=%s""" params = (comid,) res = run_sql(query, params) if res and res[0][0] != 'ok': return True return False
[ "def", "is_comment_deleted", "(", "comid", ")", ":", "query", "=", "\"\"\"SELECT status from \"cmtRECORDCOMMENT\" WHERE id=%s\"\"\"", "params", "=", "(", "comid", ",", ")", "res", "=", "run_sql", "(", "query", ",", "params", ")", "if", "res", "and", "res", "[", ...
25
14
def change_openid(self, from_appid, openid_list): '''微信公众号主体变更迁移用户 openid 详情请参考 http://kf.qq.com/faq/170221aUnmmU170221eUZJNf.html :param from_appid: 原公众号的 appid :param openid_list: 需要转换的openid,这些必须是旧账号目前关注的才行,否则会出错;一次最多100个 :return: 转换后的 openid 信息列表 ''' return self._post( 'changeopenid', data={'from_appid': from_appid, 'openid_list': openid_list}, result_processor=lambda x: x['result_list'] )
[ "def", "change_openid", "(", "self", ",", "from_appid", ",", "openid_list", ")", ":", "return", "self", ".", "_post", "(", "'changeopenid'", ",", "data", "=", "{", "'from_appid'", ":", "from_appid", ",", "'openid_list'", ":", "openid_list", "}", ",", "result...
32.8
21.066667
def setup_standalone_signals(instance): """Called when prefs dialog is running in standalone mode. It makes the delete event of dialog and click on close button finish the application. """ window = instance.get_widget('config-window') window.connect('delete-event', Gtk.main_quit) # We need to block the execution of the already associated # callback before connecting the new handler. button = instance.get_widget('button1') button.handler_block_by_func(instance.gtk_widget_destroy) button.connect('clicked', Gtk.main_quit) return instance
[ "def", "setup_standalone_signals", "(", "instance", ")", ":", "window", "=", "instance", ".", "get_widget", "(", "'config-window'", ")", "window", ".", "connect", "(", "'delete-event'", ",", "Gtk", ".", "main_quit", ")", "# We need to block the execution of the alread...
38.4
15.2
def _apply_diff(environ, diff): """Apply a frozen environment. :param dict diff: key-value pairs to apply to the environment. :returns: A dict of the key-value pairs that are being changed. """ original = {} if diff: for k, v in diff.iteritems(): if v is None: log.log(5, 'unset %s', k) else: log.log(5, '%s="%s"', k, v) original[k] = environ.get(k) if original[k] is None: log.log(1, '%s was not set', k) else: log.log(1, '%s was "%s"', k, original[k]) if v is None: environ.pop(k, None) else: environ[k] = v else: log.log(5, 'nothing to apply') return original
[ "def", "_apply_diff", "(", "environ", ",", "diff", ")", ":", "original", "=", "{", "}", "if", "diff", ":", "for", "k", ",", "v", "in", "diff", ".", "iteritems", "(", ")", ":", "if", "v", "is", "None", ":", "log", ".", "log", "(", "5", ",", "'...
23.96875
19.875
def get_cell_content(self): """Returns cell content""" try: if self.code_array.cell_attributes[self.key]["button_cell"]: return except IndexError: return try: return self.code_array[self.key] except IndexError: pass
[ "def", "get_cell_content", "(", "self", ")", ":", "try", ":", "if", "self", ".", "code_array", ".", "cell_attributes", "[", "self", ".", "key", "]", "[", "\"button_cell\"", "]", ":", "return", "except", "IndexError", ":", "return", "try", ":", "return", ...
20.6
23.8
def removeProfile(self, profile, silent=False): """ Removes the given profile from the toolbar. :param profile | <projexui.widgets.xviewwidget.XViewProfile> """ if not profile: return if not silent: title = 'Remove {0}'.format(self.profileText()) opts = QMessageBox.Yes | QMessageBox.No quest = 'Are you sure you want to remove "%s" from the toolbar?' quest %= profile.name() if isinstance(profile, XViewProfile) else profile answer = QMessageBox.question(self.window(), title, quest, opts) else: answer = QMessageBox.Yes if answer == QMessageBox.Yes: reset = profile == self.currentProfile() if not reset: try: reset = profile == self.currentProfile().name() except AttributeError: reset = False if reset and self.viewWidget(): self.viewWidget().reset(True) # remove the actions from this toolbar removed = [] for act in self.actions(): if not isinstance(act, XViewProfileAction): continue if not profile in (act.profile(), act.text()): continue removed.append(act.profile()) self.removeAction(act) self._profileGroup.removeAction(act) act.deleteLater() if not self.signalsBlocked() and removed: for prof in removed: self.profileRemoved.emit(prof) self.profilesChanged.emit()
[ "def", "removeProfile", "(", "self", ",", "profile", ",", "silent", "=", "False", ")", ":", "if", "not", "profile", ":", "return", "if", "not", "silent", ":", "title", "=", "'Remove {0}'", ".", "format", "(", "self", ".", "profileText", "(", ")", ")", ...
38.543478
15.847826
def unpublish(scm, published_branch, verbose, fake): """Removes a published branch from the remote repository.""" scm.fake = fake scm.verbose = fake or verbose scm.repo_check(require_remote=True) branch = scm.fuzzy_match_branch(published_branch) if not branch: scm.display_available_branches() raise click.BadArgumentUsage('Please specify a branch to unpublish') branch_names = scm.get_branch_names(local=False) if branch not in branch_names: raise click.BadArgumentUsage( "Branch {0} is not published. Use a branch that is published." .format(crayons.yellow(branch))) status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format( crayons.yellow(branch)), branch)
[ "def", "unpublish", "(", "scm", ",", "published_branch", ",", "verbose", ",", "fake", ")", ":", "scm", ".", "fake", "=", "fake", "scm", ".", "verbose", "=", "fake", "or", "verbose", "scm", ".", "repo_check", "(", "require_remote", "=", "True", ")", "br...
35.190476
18.809524
def _drop_oldest_chunk(self): ''' To handle the case when the items comming in the chunk is more than the maximum capacity of the chunk. Our intent behind is to remove the oldest chunk. So that the items come flowing in. >>> s = StreamCounter(5,5) >>> data_stream = ['a','b','c','d'] >>> for item in data_stream: ... s.add(item) >>> min(s.chunked_counts.keys()) 0 >>> s.chunked_counts {0: {'a': 1, 'b': 1, 'c': 1, 'd': 1}} >>> data_stream = ['a','b','c','d','a','e','f'] >>> for item in data_stream: ... s.add(item) >>> min(s.chunked_counts.keys()) 2 >>> s.chunked_counts {2: {'f': 1}} ''' chunk_id = min(self.chunked_counts.keys()) chunk = self.chunked_counts.pop(chunk_id) self.n_counts -= len(chunk) for k, v in list(chunk.items()): self.counts[k] -= v self.counts_total -= v
[ "def", "_drop_oldest_chunk", "(", "self", ")", ":", "chunk_id", "=", "min", "(", "self", ".", "chunked_counts", ".", "keys", "(", ")", ")", "chunk", "=", "self", ".", "chunked_counts", ".", "pop", "(", "chunk_id", ")", "self", ".", "n_counts", "-=", "l...
33.896552
14.241379
def _cluster_hits(hits, clusters, assigned_hit_array, cluster_hit_indices, column_cluster_distance, row_cluster_distance, frame_cluster_distance, min_hit_charge, max_hit_charge, ignore_same_hits, noisy_pixels, disabled_pixels): ''' Main precompiled function that loopes over the hits and clusters them ''' total_hits = hits.shape[0] if total_hits == 0: return 0 # total clusters max_cluster_hits = cluster_hit_indices.shape[0] if total_hits != clusters.shape[0]: raise ValueError("hits and clusters must be the same size") if total_hits != assigned_hit_array.shape[0]: raise ValueError("hits and assigned_hit_array must be the same size") # Correction for charge weighting # Some chips have non-zero charge for a charge value of zero, charge needs to be corrected to calculate cluster center correctly if min_hit_charge == 0: charge_correction = 1 else: charge_correction = 0 # Temporary variables that are reset for each cluster or event start_event_hit_index = 0 start_event_cluster_index = 0 cluster_size = 0 event_number = hits[0]['event_number'] event_cluster_index = 0 # Outer loop over all hits in the array (referred to as actual hit) for i in range(total_hits): # Check for new event and reset event variables if _new_event(hits[i]['event_number'], event_number): _finish_event( hits=hits, clusters=clusters, start_event_hit_index=start_event_hit_index, stop_event_hit_index=i, start_event_cluster_index=start_event_cluster_index, stop_event_cluster_index=start_event_cluster_index + event_cluster_index) start_event_hit_index = i start_event_cluster_index = start_event_cluster_index + event_cluster_index event_number = hits[i]['event_number'] event_cluster_index = 0 if assigned_hit_array[i] > 0: # Hit was already assigned to a cluster in the inner loop, thus skip actual hit continue if not _hit_ok( hit=hits[i], min_hit_charge=min_hit_charge, max_hit_charge=max_hit_charge) or (disabled_pixels.shape[0] != 0 and _pixel_masked(hits[i], disabled_pixels)): _set_hit_invalid(hit=hits[i], cluster_id=-1) assigned_hit_array[i] = 1 continue # Set/reset cluster variables for new cluster # Reset temp array with hit indices of actual cluster for the next cluster _set_1d_array(cluster_hit_indices, -1, cluster_size) cluster_hit_indices[0] = i assigned_hit_array[i] = 1 cluster_size = 1 # actual cluster has one hit so far for j in cluster_hit_indices: # Loop over all hits of the actual cluster; cluster_hit_indices is updated within the loop if new hit are found if j < 0: # There are no more cluster hits found break for k in range(cluster_hit_indices[0] + 1, total_hits): # Stop event hits loop if new event is reached if _new_event(hits[k]['event_number'], event_number): break # Hit is already assigned to a cluster, thus skip actual hit if assigned_hit_array[k] > 0: continue if not _hit_ok( hit=hits[k], min_hit_charge=min_hit_charge, max_hit_charge=max_hit_charge) or (disabled_pixels.shape[0] != 0 and _pixel_masked(hits[k], disabled_pixels)): _set_hit_invalid(hit=hits[k], cluster_id=-1) assigned_hit_array[k] = 1 continue # Check if event hit belongs to actual hit and thus to the actual cluster if _is_in_max_difference(hits[j]['column'], hits[k]['column'], column_cluster_distance) and _is_in_max_difference(hits[j]['row'], hits[k]['row'], row_cluster_distance) and _is_in_max_difference(hits[j]['frame'], hits[k]['frame'], frame_cluster_distance): if not ignore_same_hits or hits[j]['column'] != hits[k]['column'] or hits[j]['row'] != hits[k]['row']: cluster_size += 1 if cluster_size > max_cluster_hits: raise IndexError('cluster_hit_indices is too small to contain all cluster hits') cluster_hit_indices[cluster_size - 1] = k assigned_hit_array[k] = 1 else: _set_hit_invalid(hit=hits[k], cluster_id=-2) assigned_hit_array[k] = 1 # check for valid cluster and add it to the array if cluster_size == 1 and noisy_pixels.shape[0] != 0 and _pixel_masked(hits[cluster_hit_indices[0]], noisy_pixels): _set_hit_invalid(hit=hits[cluster_hit_indices[0]], cluster_id=-1) else: _finish_cluster( hits=hits, clusters=clusters, cluster_size=cluster_size, cluster_hit_indices=cluster_hit_indices, cluster_index=start_event_cluster_index + event_cluster_index, cluster_id=event_cluster_index, charge_correction=charge_correction, noisy_pixels=noisy_pixels, disabled_pixels=disabled_pixels) event_cluster_index += 1 # Last event is assumed to be finished at the end of the hit array, thus add info _finish_event( hits=hits, clusters=clusters, start_event_hit_index=start_event_hit_index, stop_event_hit_index=total_hits, start_event_cluster_index=start_event_cluster_index, stop_event_cluster_index=start_event_cluster_index + event_cluster_index) total_clusters = start_event_cluster_index + event_cluster_index return total_clusters
[ "def", "_cluster_hits", "(", "hits", ",", "clusters", ",", "assigned_hit_array", ",", "cluster_hit_indices", ",", "column_cluster_distance", ",", "row_cluster_distance", ",", "frame_cluster_distance", ",", "min_hit_charge", ",", "max_hit_charge", ",", "ignore_same_hits", ...
47.192
28.088
def setnonce(self, text=None): """ Set I{nonce} which is arbitraty set of bytes to prevent reply attacks. @param text: The nonce text value. Generated when I{None}. @type text: str """ if text is None: s = [] s.append(self.username) s.append(self.password) s.append(Token.sysdate()) m = md5() m.update(':'.join(s).encode("utf-8")) self.nonce = m.hexdigest() else: self.nonce = text
[ "def", "setnonce", "(", "self", ",", "text", "=", "None", ")", ":", "if", "text", "is", "None", ":", "s", "=", "[", "]", "s", ".", "append", "(", "self", ".", "username", ")", "s", ".", "append", "(", "self", ".", "password", ")", "s", ".", "...
29.777778
10.777778
def path(self, *paths, **kwargs): """Create new Path based on self.root and provided paths. :param paths: List of sub paths :param kwargs: required=False :rtype: Path """ return self.__class__(self.__root__, *paths, **kwargs)
[ "def", "path", "(", "self", ",", "*", "paths", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__class__", "(", "self", ".", "__root__", ",", "*", "paths", ",", "*", "*", "kwargs", ")" ]
33.375
11.625
def authenticate_token( self, token ): ''' authenticate_token checks the passed token and returns the user_id it is associated with. it is assumed that this method won't be directly exposed to the oauth client, but some kind of framework or wrapper. this allows the framework to have the user_id without doing additional DB calls. ''' token_data = self.data_store.fetch( 'tokens', token=token ) if not token_data: raise Proauth2Error( 'access_denied', 'token does not exist or has been revoked' ) return token_data['user_id']
[ "def", "authenticate_token", "(", "self", ",", "token", ")", ":", "token_data", "=", "self", ".", "data_store", ".", "fetch", "(", "'tokens'", ",", "token", "=", "token", ")", "if", "not", "token_data", ":", "raise", "Proauth2Error", "(", "'access_denied'", ...
52.583333
25.583333
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'matching_results') and self.matching_results is not None: _dict['matching_results'] = self.matching_results if hasattr(self, 'hits') and self.hits is not None: _dict['hits'] = [x._to_dict() for x in self.hits] return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'matching_results'", ")", "and", "self", ".", "matching_results", "is", "not", "None", ":", "_dict", "[", "'matching_results'", "]", "=", "self", ".", ...
44.666667
19.666667