text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def json_to_initkwargs(record_type, json_struct, kwargs=None): """This function converts a JSON dict (json_struct) to a set of init keyword arguments for the passed Record (or JsonRecord). It is called by the JsonRecord constructor. This function takes a JSON data structure and returns a keyword argument list to be passed to the class constructor. Any keys in the input dictionary which are not known are passed as a single ``unknown_json_keys`` value as a dict. This function should generally not be called directly, except as a part of a ``__init__`` or specialized visitor application. """ if kwargs is None: kwargs = {} if json_struct is None: json_struct = {} if not isinstance(json_struct, dict): raise exc.JsonRecordCoerceError( passed=json_struct, recordtype=record_type, ) unknown_keys = set(json_struct.keys()) for propname, prop in record_type.properties.iteritems(): # think "does" here rather than "is"; the slot does JSON if isinstance(prop, JsonProperty): json_name = prop.json_name if json_name is not None: if json_name in json_struct: if propname not in kwargs: try: kwargs[propname] = _json_to_value_initializer( prop.from_json( json_struct[json_name] ), prop.valuetype, ) except Exception as e: raise _box_ingress_error(json_name, e) unknown_keys.remove(json_name) elif prop.name in json_struct: json_val = json_struct[prop.name] unknown_keys.remove(prop.name) if prop.name not in kwargs: proptype = prop.valuetype try: kwargs[propname] = _json_to_value_initializer( json_val, proptype, ) except Exception as e: raise _box_ingress_error(prop.name, e) if unknown_keys: kwargs["unknown_json_keys"] = dict( (k, deepcopy(json_struct[k])) for k in unknown_keys ) return kwargs
[ "def", "json_to_initkwargs", "(", "record_type", ",", "json_struct", ",", "kwargs", "=", "None", ")", ":", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "if", "json_struct", "is", "None", ":", "json_struct", "=", "{", "}", "if", "not", "i...
42.309091
14.872727
def watch_transient_file(self, filename, mask, proc_class): """ Watch a transient file, which will be created and deleted frequently over time (e.g. pid file). @attention: Currently under the call to this function it is not possible to correctly watch the events triggered into the same base directory than the directory where is located this watched transient file. For instance it would be wrong to make these two successive calls: wm.watch_transient_file('/var/run/foo.pid', ...) and wm.add_watch('/var/run/', ...) @param filename: Filename. @type filename: string @param mask: Bitmask of events, should contain IN_CREATE and IN_DELETE. @type mask: int @param proc_class: ProcessEvent (or of one of its subclass), beware of accepting a ProcessEvent's instance as argument into __init__, see transient_file.py example for more details. @type proc_class: ProcessEvent's instance or of one of its subclasses. @return: Same as add_watch(). @rtype: Same as add_watch(). """ dirname = os.path.dirname(filename) if dirname == '': return {} # Maintains coherence with add_watch() basename = os.path.basename(filename) # Assuming we are watching at least for IN_CREATE and IN_DELETE mask |= IN_CREATE | IN_DELETE def cmp_name(event): if getattr(event, 'name') is None: return False return basename == event.name return self.add_watch(dirname, mask, proc_fun=proc_class(ChainIfTrue(func=cmp_name)), rec=False, auto_add=False, do_glob=False, exclude_filter=lambda path: False)
[ "def", "watch_transient_file", "(", "self", ",", "filename", ",", "mask", ",", "proc_class", ")", ":", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "if", "dirname", "==", "''", ":", "return", "{", "}", "# Maintains coherence wit...
47.15
19.25
def _pluck_pull_request_info(pull_request_url: str) -> PullRequestInfo: """ # Plucks a PullRequestInfo from a valid >>> _pluck_pull_request_info('https://github.com/zhammer/morning-cd/pull/17') PullRequestInfo(owner='zhammer', repo='morning-cd', number=17) # Raises a GithubPrError on bad urls >>> _pluck_pull_request_info('bad url') Traceback (most recent call last): ... faaspact_verifier.delivery.github_prs.GithubPrError: ... """ match = re.search( r'github\.com/(?P<owner>[\w-]+)/(?P<repo>[\w-]+)/pull/(?P<number>\d+)', pull_request_url ) if not match: raise GithubPrError(f'Couldnt parse url: {pull_request_url}') return PullRequestInfo( owner=match.group('owner'), repo=match.group('repo'), number=int(match.group('number')) )
[ "def", "_pluck_pull_request_info", "(", "pull_request_url", ":", "str", ")", "->", "PullRequestInfo", ":", "match", "=", "re", ".", "search", "(", "r'github\\.com/(?P<owner>[\\w-]+)/(?P<repo>[\\w-]+)/pull/(?P<number>\\d+)'", ",", "pull_request_url", ")", "if", "not", "mat...
33
19.88
def wrap_socket(socket, certfile, keyfile, password=None): """ Wraps an existing TCP socket and returns an SSLSocket object :param socket: The socket to wrap :param certfile: The server certificate file :param keyfile: The server private key file :param password: Password for the private key file (Python >= 3.3) :return: The wrapped socket :raise SSLError: Error wrapping the socket / loading the certificate :raise OSError: A password has been given, but ciphered key files are not supported by the current version of Python """ # Log warnings when some logger = logging.getLogger("ssl_wrap") def _password_support_error(): """ Logs a warning and raises an OSError if a password has been given but Python doesn't support ciphered key files. :raise OSError: If a password has been given """ if password: logger.error( "The ssl.wrap_socket() fallback method doesn't " "support key files with a password." ) raise OSError( "Can't decode the SSL key file: " "this version of Python doesn't support it" ) try: # Prefer the default context factory, as it will be updated to reflect # security issues (Python >= 2.7.9 and >= 3.4) default_context = ssl.create_default_context() except AttributeError: default_context = None try: # Try to equivalent to create_default_context() in Python 3.5 # Create an SSL context and set its options context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) if default_context is not None: # Copy options context.options = default_context.options else: # Set up the context as create_default_context() does in Python 3.5 # SSLv2 considered harmful # SSLv3 has problematic security context.options |= ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 # disallow ciphers with known vulnerabilities context.set_ciphers(_RESTRICTED_SERVER_CIPHERS) try: # Load the certificate, with a password context.load_cert_chain(certfile, keyfile, password) except TypeError: # The "password" argument isn't supported # Check support for key file password _password_support_error() # Load the certificate, without the password argument context.load_cert_chain(certfile, keyfile) # Return the wrapped socket return context.wrap_socket(socket, server_side=True) except AttributeError as ex: # Log a warning to advise the user of possible security holes logger.warning( "Can't create a custom SSLContext. " "The server should be considered insecure." ) logger.debug("Missing attribute: %s", ex) # Check support for key file password _password_support_error() # Fall back to the "old" wrap_socket method return ssl.wrap_socket( socket, server_side=True, certfile=certfile, keyfile=keyfile )
[ "def", "wrap_socket", "(", "socket", ",", "certfile", ",", "keyfile", ",", "password", "=", "None", ")", ":", "# Log warnings when some", "logger", "=", "logging", ".", "getLogger", "(", "\"ssl_wrap\"", ")", "def", "_password_support_error", "(", ")", ":", "\"...
36.139535
19.697674
def cell_strings(term): """Return the strings that represent each possible living cell state. Return the most colorful ones the terminal supports. """ num_colors = term.number_of_colors if num_colors >= 16: funcs = term.on_bright_red, term.on_bright_green, term.on_bright_cyan elif num_colors >= 8: funcs = term.on_red, term.on_green, term.on_blue else: # For black and white, use the checkerboard cursor from the vt100 # alternate charset: return (term.reverse(' '), term.smacs + term.reverse('a') + term.rmacs, term.smacs + 'a' + term.rmacs) # Wrap spaces in whatever pretty colors we chose: return [f(' ') for f in funcs]
[ "def", "cell_strings", "(", "term", ")", ":", "num_colors", "=", "term", ".", "number_of_colors", "if", "num_colors", ">=", "16", ":", "funcs", "=", "term", ".", "on_bright_red", ",", "term", ".", "on_bright_green", ",", "term", ".", "on_bright_cyan", "elif"...
37.684211
17.157895
def file_name(self, category=None, extension=None): """ :param category: audio|image|office|text|video :param extension: file extension """ extension = extension if extension else self.file_extension(category) filename = self.generator.word() return '{0}.{1}'.format(filename, extension)
[ "def", "file_name", "(", "self", ",", "category", "=", "None", ",", "extension", "=", "None", ")", ":", "extension", "=", "extension", "if", "extension", "else", "self", ".", "file_extension", "(", "category", ")", "filename", "=", "self", ".", "generator"...
42
9.25
def policy_map_clss_span_session(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") policy_map = ET.SubElement(config, "policy-map", xmlns="urn:brocade.com:mgmt:brocade-policer") po_name_key = ET.SubElement(policy_map, "po-name") po_name_key.text = kwargs.pop('po_name') clss = ET.SubElement(policy_map, "class") cl_name_key = ET.SubElement(clss, "cl-name") cl_name_key.text = kwargs.pop('cl_name') span = ET.SubElement(clss, "span") session = ET.SubElement(span, "session") session.text = kwargs.pop('session') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "policy_map_clss_span_session", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "policy_map", "=", "ET", ".", "SubElement", "(", "config", ",", "\"policy-map\"", ",", "xmlns", "=", "\"urn:...
44.125
13.0625
def process_url(url, key): """ Yields DOE CODE records from a DOE CODE .json URL response Converts a DOE CODE API .json URL response into DOE CODE projects """ logger.debug('Fetching DOE CODE JSON: %s', url) if key is None: raise ValueError('DOE CODE API Key value is missing!') response = requests.get(url, headers={"Authorization": "Basic " + key}) doecode_json = response.json() for record in doecode_json['records']: yield record
[ "def", "process_url", "(", "url", ",", "key", ")", ":", "logger", ".", "debug", "(", "'Fetching DOE CODE JSON: %s'", ",", "url", ")", "if", "key", "is", "None", ":", "raise", "ValueError", "(", "'DOE CODE API Key value is missing!'", ")", "response", "=", "req...
29.625
21.375
def create_slug(title, plain_len=None): ''' Tries to create a slug from a title, trading off collision risk with readability and minimized cruft title - a unicode object with a title to use as basis of the slug plain_len - the maximum character length preserved (from the beginning) of the title >>> from versa.contrib.datachefids import create_slug >>> create_slug(u"The quick brown fox jumps over the lazy dog") 'the_quick_brown_fox_jumps_over_the_lazy_dog' >>> create_slug(u"The quick brown fox jumps over the lazy dog", 20) 'the_quick_brown_fox' ''' if plain_len: title = title[:plain_len] pass1 = OMIT_FROM_SLUG_PAT.sub('_', title).lower() return NORMALIZE_UNDERSCORES_PAT.sub('_', pass1)
[ "def", "create_slug", "(", "title", ",", "plain_len", "=", "None", ")", ":", "if", "plain_len", ":", "title", "=", "title", "[", ":", "plain_len", "]", "pass1", "=", "OMIT_FROM_SLUG_PAT", ".", "sub", "(", "'_'", ",", "title", ")", ".", "lower", "(", ...
45.875
26.125
def update_data_frames(network, cluster_weights, dates, hours): """ Updates the snapshots, snapshots weights and the dataframes based on the original data in the network and the medoids created by clustering these original data. Parameters ----------- network : pyPSA network object cluster_weights: dictionary dates: Datetimeindex Returns ------- network """ network.snapshot_weightings = network.snapshot_weightings.loc[dates] network.snapshots = network.snapshot_weightings.index # set new snapshot weights from cluster_weights snapshot_weightings = [] for i in cluster_weights.values(): x = 0 while x < hours: snapshot_weightings.append(i) x += 1 for i in range(len(network.snapshot_weightings)): network.snapshot_weightings[i] = snapshot_weightings[i] # put the snapshot in the right order network.snapshots.sort_values() network.snapshot_weightings.sort_index() return network
[ "def", "update_data_frames", "(", "network", ",", "cluster_weights", ",", "dates", ",", "hours", ")", ":", "network", ".", "snapshot_weightings", "=", "network", ".", "snapshot_weightings", ".", "loc", "[", "dates", "]", "network", ".", "snapshots", "=", "netw...
28.314286
20.857143
def find_path(name, path=None, exact=False): """ Search for a file or directory on your local filesystem by name (file must be in a directory specified in a PATH environment variable) Args: fname (PathLike or str): file name to match. If exact is False this may be a glob pattern path (str or Iterable[PathLike]): list of directories to search either specified as an os.pathsep separated string or a list of directories. Defaults to environment PATH. exact (bool): if True, only returns exact matches. Default False. Notes: For recursive behavior set `path=(d for d, _, _ in os.walk('.'))`, where '.' might be replaced by the root directory of interest. Example: >>> list(find_path('ping', exact=True)) >>> list(find_path('bin')) >>> list(find_path('bin')) >>> list(find_path('*cc*')) >>> list(find_path('cmake*')) Example: >>> import ubelt as ub >>> from os.path import dirname >>> path = dirname(dirname(ub.util_platform.__file__)) >>> res = sorted(find_path('ubelt/util_*.py', path=path)) >>> assert len(res) >= 10 >>> res = sorted(find_path('ubelt/util_platform.py', path=path, exact=True)) >>> print(res) >>> assert len(res) == 1 """ path = os.environ.get('PATH', os.defpath) if path is None else path dpaths = path.split(os.pathsep) if isinstance(path, six.string_types) else path candidates = (join(dpath, name) for dpath in dpaths) if exact: if WIN32: # nocover pathext = [''] + os.environ.get('PATHEXT', '').split(os.pathsep) candidates = (p + ext for p in candidates for ext in pathext) candidates = filter(exists, candidates) else: import glob candidates = it.chain.from_iterable( glob.glob(pattern) for pattern in candidates) return candidates
[ "def", "find_path", "(", "name", ",", "path", "=", "None", ",", "exact", "=", "False", ")", ":", "path", "=", "os", ".", "environ", ".", "get", "(", "'PATH'", ",", "os", ".", "defpath", ")", "if", "path", "is", "None", "else", "path", "dpaths", "...
39.163265
22.183673
def setup_logging(self): """Setup logging module based on known modules in the config file """ logging.getLogger('amqp').setLevel(str_to_logging(self.get('logging', 'amqp'))) logging.getLogger('rdflib').setLevel(str_to_logging(self.get('logging', 'rdflib')))
[ "def", "setup_logging", "(", "self", ")", ":", "logging", ".", "getLogger", "(", "'amqp'", ")", ".", "setLevel", "(", "str_to_logging", "(", "self", ".", "get", "(", "'logging'", ",", "'amqp'", ")", ")", ")", "logging", ".", "getLogger", "(", "'rdflib'",...
57.2
22.8
def _should_allocate_port(pid): """Determine if we should allocate a port for use by the given process id.""" if pid <= 0: log.info('Not allocating a port to invalid pid') return False if pid == 1: # The client probably meant to send us its parent pid but # had been reparented to init. log.info('Not allocating a port to init.') return False try: os.kill(pid, 0) except ProcessLookupError: log.info('Not allocating a port to a non-existent process') return False return True
[ "def", "_should_allocate_port", "(", "pid", ")", ":", "if", "pid", "<=", "0", ":", "log", ".", "info", "(", "'Not allocating a port to invalid pid'", ")", "return", "False", "if", "pid", "==", "1", ":", "# The client probably meant to send us its parent pid but", "#...
34.75
17.5625
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: MessageInteractionContext for this MessageInteractionInstance :rtype: twilio.rest.proxy.v1.service.session.participant.message_interaction.MessageInteractionContext """ if self._context is None: self._context = MessageInteractionContext( self._version, service_sid=self._solution['service_sid'], session_sid=self._solution['session_sid'], participant_sid=self._solution['participant_sid'], sid=self._solution['sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "MessageInteractionContext", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]",...
45.529412
22.941176
def field_get_subfields(field): """ Given a field, will place all subfields into a dictionary Parameters: * field - tuple: The field to get subfields for Returns: a dictionary, codes as keys and a list of values as the value """ pairs = {} for key, value in field[0]: if key in pairs and pairs[key] != value: pairs[key].append(value) else: pairs[key] = [value] return pairs
[ "def", "field_get_subfields", "(", "field", ")", ":", "pairs", "=", "{", "}", "for", "key", ",", "value", "in", "field", "[", "0", "]", ":", "if", "key", "in", "pairs", "and", "pairs", "[", "key", "]", "!=", "value", ":", "pairs", "[", "key", "]"...
35.916667
12.666667
def _consume_flags(self): """Read flags until we encounter the first token that isn't a flag.""" flags = [] while self._at_flag(): flag = self._unconsumed_args.pop() if not self._check_for_help_request(flag): flags.append(flag) return flags
[ "def", "_consume_flags", "(", "self", ")", ":", "flags", "=", "[", "]", "while", "self", ".", "_at_flag", "(", ")", ":", "flag", "=", "self", ".", "_unconsumed_args", ".", "pop", "(", ")", "if", "not", "self", ".", "_check_for_help_request", "(", "flag...
33.625
12.625
def enrich_pubmed_citations(graph: BELGraph, manager: Manager) -> Set[str]: """Overwrite all PubMed citations with values from NCBI's eUtils lookup service. :return: A set of PMIDs for which the eUtils service crashed """ pmids = get_pubmed_identifiers(graph) pmid_data, errors = get_citations_by_pmids(manager=manager, pmids=pmids) for u, v, k in filter_edges(graph, has_pubmed): pmid = graph[u][v][k][CITATION][CITATION_REFERENCE].strip() if pmid not in pmid_data: log.warning('Missing data for PubMed identifier: %s', pmid) errors.add(pmid) continue graph[u][v][k][CITATION].update(pmid_data[pmid]) return errors
[ "def", "enrich_pubmed_citations", "(", "graph", ":", "BELGraph", ",", "manager", ":", "Manager", ")", "->", "Set", "[", "str", "]", ":", "pmids", "=", "get_pubmed_identifiers", "(", "graph", ")", "pmid_data", ",", "errors", "=", "get_citations_by_pmids", "(", ...
36.315789
23.315789
def _linkFeature(self, feature): """ Link a feature with its parents. """ parentNames = feature.attributes.get("Parent") if parentNames is None: self.roots.add(feature) else: for parentName in parentNames: self._linkToParent(feature, parentName)
[ "def", "_linkFeature", "(", "self", ",", "feature", ")", ":", "parentNames", "=", "feature", ".", "attributes", ".", "get", "(", "\"Parent\"", ")", "if", "parentNames", "is", "None", ":", "self", ".", "roots", ".", "add", "(", "feature", ")", "else", "...
32.4
8
def _check_required_group(self): """ Returns True if the group requirement (AUTH_LDAP_REQUIRE_GROUP) is met. Always returns True if AUTH_LDAP_REQUIRE_GROUP is None. """ required_group_dn = self.settings.REQUIRE_GROUP if required_group_dn is not None: is_member = self._get_groups().is_member_of(required_group_dn) if not is_member: raise self.AuthenticationFailed("user is not a member of AUTH_LDAP_REQUIRE_GROUP") return True
[ "def", "_check_required_group", "(", "self", ")", ":", "required_group_dn", "=", "self", ".", "settings", ".", "REQUIRE_GROUP", "if", "required_group_dn", "is", "not", "None", ":", "is_member", "=", "self", ".", "_get_groups", "(", ")", ".", "is_member_of", "(...
39.384615
22.307692
def git_list_tags(repo_dir, with_messages=False): """Return a list of git tags for the git repo in `repo_dir`.""" command = ['git', 'tag', '-l'] if with_messages: command.append('-n1') raw = execute_git_command(command, repo_dir=repo_dir).splitlines() output = [l.strip() for l in raw if l.strip()] if with_messages: output = [tuple(j.strip() for j in line.split(None, 1)) for line in output] return output
[ "def", "git_list_tags", "(", "repo_dir", ",", "with_messages", "=", "False", ")", ":", "command", "=", "[", "'git'", ",", "'tag'", ",", "'-l'", "]", "if", "with_messages", ":", "command", ".", "append", "(", "'-n1'", ")", "raw", "=", "execute_git_command",...
41.636364
13.909091
def set_tag(self, name, tag_class): """ Define a new tag parser method :param name: The name of the tag :type name: str :param tag_class: The Tag class, this must be a subclass of base parser.tags.Tag :type tag_class: Tag """ # Has this tag already been defined? if name in self._tags: self._log.warn('Overwriting an existing Tag class: {tag}'.format(tag=name)) # Make sure the tag class adhered to the base Tag interface if not issubclass(tag_class, Tag): self._log.error('Tag class must implement the base Tag interface, please review the documentation on ' 'defining custom tags. (Refusing to set the tag "{tag}")'.format(tag=name)) return self._tags[name] = tag_class
[ "def", "set_tag", "(", "self", ",", "name", ",", "tag_class", ")", ":", "# Has this tag already been defined?", "if", "name", "in", "self", ".", "_tags", ":", "self", ".", "_log", ".", "warn", "(", "'Overwriting an existing Tag class: {tag}'", ".", "format", "("...
40.85
22.75
def _torque_queue_nodes(queue): """Retrieve the nodes available for a queue. Parses out nodes from `acl_hosts` in qstat -Qf and extracts the initial names of nodes used in pbsnodes. """ qstat_out = subprocess.check_output(["qstat", "-Qf", queue]).decode() hosts = [] in_hosts = False for line in qstat_out.split("\n"): if line.strip().startswith("acl_hosts = "): hosts.extend(line.replace("acl_hosts = ", "").strip().split(",")) in_hosts = True elif in_hosts: if line.find(" = ") > 0: break else: hosts.extend(line.strip().split(",")) return tuple([h.split(".")[0].strip() for h in hosts if h.strip()])
[ "def", "_torque_queue_nodes", "(", "queue", ")", ":", "qstat_out", "=", "subprocess", ".", "check_output", "(", "[", "\"qstat\"", ",", "\"-Qf\"", ",", "queue", "]", ")", ".", "decode", "(", ")", "hosts", "=", "[", "]", "in_hosts", "=", "False", "for", ...
37.736842
17.368421
def proxy(opts, functions=None, returners=None, whitelist=None, utils=None): ''' Returns the proxy module for this salt-proxy-minion ''' ret = LazyLoader( _module_dirs(opts, 'proxy'), opts, tag='proxy', pack={'__salt__': functions, '__ret__': returners, '__utils__': utils}, ) ret.pack['__proxy__'] = ret return ret
[ "def", "proxy", "(", "opts", ",", "functions", "=", "None", ",", "returners", "=", "None", ",", "whitelist", "=", "None", ",", "utils", "=", "None", ")", ":", "ret", "=", "LazyLoader", "(", "_module_dirs", "(", "opts", ",", "'proxy'", ")", ",", "opts...
26
26.857143
def namespace(self, key, glob=False): """Return a namespace for keyring""" if not self.name: self.name = os.environ['DJANGO_SETTINGS_MODULE'] ns = '.'.join([key, self._glob]) if glob else '.'.join([self.name, self._glob]) return ns
[ "def", "namespace", "(", "self", ",", "key", ",", "glob", "=", "False", ")", ":", "if", "not", "self", ".", "name", ":", "self", ".", "name", "=", "os", ".", "environ", "[", "'DJANGO_SETTINGS_MODULE'", "]", "ns", "=", "'.'", ".", "join", "(", "[", ...
45.166667
18.166667
async def edit(self, **fields): """|coro| Edits the current profile of the client. If a bot account is used then a password field is optional, otherwise it is required. Note ----- To upload an avatar, a :term:`py:bytes-like object` must be passed in that represents the image being uploaded. If this is done through a file then the file must be opened via ``open('some_filename', 'rb')`` and the :term:`py:bytes-like object` is given through the use of ``fp.read()``. The only image formats supported for uploading is JPEG and PNG. Parameters ----------- password: :class:`str` The current password for the client's account. Only applicable to user accounts. new_password: :class:`str` The new password you wish to change to. Only applicable to user accounts. email: :class:`str` The new email you wish to change to. Only applicable to user accounts. house: Optional[:class:`HypeSquadHouse`] The hypesquad house you wish to change to. Could be ``None`` to leave the current house. Only applicable to user accounts. username: :class:`str` The new username you wish to change to. avatar: :class:`bytes` A :term:`py:bytes-like object` representing the image to upload. Could be ``None`` to denote no avatar. Raises ------ HTTPException Editing your profile failed. InvalidArgument Wrong image format passed for ``avatar``. ClientException Password is required for non-bot accounts. House field was not a HypeSquadHouse. """ try: avatar_bytes = fields['avatar'] except KeyError: avatar = self.avatar else: if avatar_bytes is not None: avatar = _bytes_to_base64_data(avatar_bytes) else: avatar = None not_bot_account = not self.bot password = fields.get('password') if not_bot_account and password is None: raise ClientException('Password is required for non-bot accounts.') args = { 'password': password, 'username': fields.get('username', self.name), 'avatar': avatar } if not_bot_account: args['email'] = fields.get('email', self.email) if 'new_password' in fields: args['new_password'] = fields['new_password'] http = self._state.http if 'house' in fields: house = fields['house'] if house is None: await http.leave_hypesquad_house() elif not isinstance(house, HypeSquadHouse): raise ClientException('`house` parameter was not a HypeSquadHouse') else: value = house.value await http.change_hypesquad_house(value) data = await http.edit_profile(**args) if not_bot_account: self.email = data['email'] try: http._token(data['token'], bot=False) except KeyError: pass self._update(data)
[ "async", "def", "edit", "(", "self", ",", "*", "*", "fields", ")", ":", "try", ":", "avatar_bytes", "=", "fields", "[", "'avatar'", "]", "except", "KeyError", ":", "avatar", "=", "self", ".", "avatar", "else", ":", "if", "avatar_bytes", "is", "not", ...
33.142857
19.367347
def sync_auth(self, vault_client, resources): """Synchronizes auth mount wrappers. These happen early in the cycle, to ensure that user backends are proper. They may also be used to set mount tuning""" for auth in self.auths(): auth.sync(vault_client) auth_resources = [x for x in resources if isinstance(x, (LDAP, UserPass))] for resource in auth_resources: resource.sync(vault_client) return [x for x in resources if not isinstance(x, (LDAP, UserPass, AuditLog))]
[ "def", "sync_auth", "(", "self", ",", "vault_client", ",", "resources", ")", ":", "for", "auth", "in", "self", ".", "auths", "(", ")", ":", "auth", ".", "sync", "(", "vault_client", ")", "auth_resources", "=", "[", "x", "for", "x", "in", "resources", ...
38.866667
12.333333
def send_packet(self, packet, protocol='json', time_precision=None): """Send an UDP packet. :param packet: the packet to be sent :type packet: (if protocol is 'json') dict (if protocol is 'line') list of line protocol strings :param protocol: protocol of input data, either 'json' or 'line' :type protocol: str :param time_precision: Either 's', 'm', 'ms' or 'u', defaults to None :type time_precision: str """ if protocol == 'json': data = make_lines(packet, time_precision).encode('utf-8') elif protocol == 'line': data = ('\n'.join(packet) + '\n').encode('utf-8') self.udp_socket.sendto(data, (self._host, self._udp_port))
[ "def", "send_packet", "(", "self", ",", "packet", ",", "protocol", "=", "'json'", ",", "time_precision", "=", "None", ")", ":", "if", "protocol", "==", "'json'", ":", "data", "=", "make_lines", "(", "packet", ",", "time_precision", ")", ".", "encode", "(...
46.5625
18.75
def parse_datetime(self, text): '''Parse datetime from line of text.''' return parse_datetime(text, date_format=self.date_format, is_day_period=self.is_day_period)
[ "def", "parse_datetime", "(", "self", ",", "text", ")", ":", "return", "parse_datetime", "(", "text", ",", "date_format", "=", "self", ".", "date_format", ",", "is_day_period", "=", "self", ".", "is_day_period", ")" ]
51.5
16
def sample(self, initial_pos, num_samples, trajectory_length, stepsize=None, return_type='dataframe'): """ Method to return samples using Hamiltonian Monte Carlo Parameters ---------- initial_pos: A 1d array like object Vector representing values of parameter position, the starting state in markov chain. num_samples: int Number of samples to be generated trajectory_length: int or float Target trajectory length, stepsize * number of steps(L), where L is the number of steps taken per HMC iteration, and stepsize is step size for splitting time method. stepsize: float , defaults to None The stepsize for proposing new values of position and momentum in simulate_dynamics If None, then will be choosen suitably return_type: string (dataframe | recarray) Return type for samples, either of 'dataframe' or 'recarray'. Defaults to 'dataframe' Returns ------- sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument Examples -------- >>> from pgmpy.sampling import HamiltonianMC as HMC, GradLogPDFGaussian, ModifiedEuler >>> from pgmpy.factors.continuous import GaussianDistribution as JGD >>> import numpy as np >>> mean = np.array([1, -1]) >>> covariance = np.array([[1, 0.2], [0.2, 1]]) >>> model = JGD(['x', 'y'], mean, covariance) >>> sampler = HMC(model=model, grad_log_pdf=GradLogPDFGaussian, simulate_dynamics=ModifiedEuler) >>> samples = sampler.sample(np.array([1, 1]), num_samples = 5, ... trajectory_length=6, stepsize=0.25, return_type='dataframe') >>> samples x y 0 1.000000e+00 1.000000e+00 1 1.592133e+00 1.152911e+00 2 1.608700e+00 1.315349e+00 3 1.608700e+00 1.315349e+00 4 6.843856e-01 6.237043e-01 >>> mean = np.array([4, 1, -1]) >>> covariance = np.array([[1, 0.7 , 0.8], [0.7, 1, 0.2], [0.8, 0.2, 1]]) >>> model = JGD(['x', 'y', 'z'], mean, covariance) >>> sampler = HMC(model=model, grad_log_pdf=GLPG) >>> samples = sampler.sample(np.array([1, 1]), num_samples = 10000, ... trajectory_length=6, stepsize=0.25, return_type='dataframe') >>> np.cov(samples.values.T) array([[ 1.00795398, 0.71384233, 0.79802097], [ 0.71384233, 1.00633524, 0.21313767], [ 0.79802097, 0.21313767, 0.98519017]]) """ self.accepted_proposals = 1.0 initial_pos = _check_1d_array_object(initial_pos, 'initial_pos') _check_length_equal(initial_pos, self.model.variables, 'initial_pos', 'model.variables') if stepsize is None: stepsize = self._find_reasonable_stepsize(initial_pos) types = [(var_name, 'float') for var_name in self.model.variables] samples = np.zeros(num_samples, dtype=types).view(np.recarray) # Assigning after converting into tuple because value was being changed after assignment # Reason for this is unknown samples[0] = tuple(initial_pos) position_m = initial_pos lsteps = int(max(1, round(trajectory_length / stepsize, 0))) for i in range(1, num_samples): # Genrating sample position_m, _ = self._sample(position_m, trajectory_length, stepsize, lsteps) samples[i] = tuple(position_m) self.acceptance_rate = self.accepted_proposals / num_samples return _return_samples(return_type, samples)
[ "def", "sample", "(", "self", ",", "initial_pos", ",", "num_samples", ",", "trajectory_length", ",", "stepsize", "=", "None", ",", "return_type", "=", "'dataframe'", ")", ":", "self", ".", "accepted_proposals", "=", "1.0", "initial_pos", "=", "_check_1d_array_ob...
43.494118
24.294118
def time_to_channels(embedded_video): """Put time dimension on channels in an embedded video.""" video_shape = common_layers.shape_list(embedded_video) if len(video_shape) != 5: raise ValueError("Assuming videos given as tensors in the format " "[batch, time, height, width, channels] but got one " "of shape: %s" % str(video_shape)) transposed = tf.transpose(embedded_video, [0, 2, 3, 1, 4]) return tf.reshape(transposed, [ video_shape[0], video_shape[2], video_shape[3], video_shape[1] * video_shape[4] ])
[ "def", "time_to_channels", "(", "embedded_video", ")", ":", "video_shape", "=", "common_layers", ".", "shape_list", "(", "embedded_video", ")", "if", "len", "(", "video_shape", ")", "!=", "5", ":", "raise", "ValueError", "(", "\"Assuming videos given as tensors in t...
47.166667
15.833333
def number_of_records_per_hour(self, value=None): """Corresponds to IDD Field `number_of_records_per_hour` Args: value (int): value for IDD Field `number_of_records_per_hour` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = int(value) except ValueError: raise ValueError( 'value {} need to be of type int ' 'for field `number_of_records_per_hour`'.format(value)) self._number_of_records_per_hour = value
[ "def", "number_of_records_per_hour", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "int", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to ...
34.952381
20.904762
def fetch(self): """ Fetch a AuthorizedConnectAppInstance :returns: Fetched AuthorizedConnectAppInstance :rtype: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return AuthorizedConnectAppInstance( self._version, payload, account_sid=self._solution['account_sid'], connect_app_sid=self._solution['connect_app_sid'], )
[ "def", "fetch", "(", "self", ")", ":", "params", "=", "values", ".", "of", "(", "{", "}", ")", "payload", "=", "self", ".", "_version", ".", "fetch", "(", "'GET'", ",", "self", ".", "_uri", ",", "params", "=", "params", ",", ")", "return", "Autho...
28.142857
20.047619
def crop_box(im, box=False, **kwargs): """Uses box coordinates to crop an image without resizing it first.""" if box: im = im.crop(box) return im
[ "def", "crop_box", "(", "im", ",", "box", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "box", ":", "im", "=", "im", ".", "crop", "(", "box", ")", "return", "im" ]
32.2
14.6
def score(self, X, y, **kwargs): """ Generates a 2D array where each row is the count of the predicted classes and each column is the true class Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values Returns ------- score_ : float Global accuracy score """ # We're relying on predict to raise NotFitted y_pred = self.predict(X) y_type, y_true, y_pred = _check_targets(y, y_pred) if y_type not in ("binary", "multiclass"): raise YellowbrickValueError("%s is not supported" % y_type) indices = unique_labels(y_true, y_pred) if len(self.classes_) > len(indices): raise ModelError("y and y_pred contain zero values " "for one of the specified classes") elif len(self.classes_) < len(indices): raise NotImplementedError("filtering classes is " "currently not supported") # Create a table of predictions whose rows are the true classes # and whose columns are the predicted classes; each element # is the count of predictions for that class that match the true # value of that class. self.predictions_ = np.array([ [ (y_pred[y == label_t] == label_p).sum() for label_p in indices ] for label_t in indices ]) self.draw() self.score_ = self.estimator.score(X, y) return self.score_
[ "def", "score", "(", "self", ",", "X", ",", "y", ",", "*", "*", "kwargs", ")", ":", "# We're relying on predict to raise NotFitted", "y_pred", "=", "self", ".", "predict", "(", "X", ")", "y_type", ",", "y_true", ",", "y_pred", "=", "_check_targets", "(", ...
32.384615
20.961538
def setup(self, extra_args=tuple()): """ Configure the cluster nodes. Actual action is delegated to the :py:class:`elasticluster.providers.AbstractSetupProvider` that was provided at construction time. :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: bool - True on success, False otherwise """ try: # setup the cluster using the setup provider ret = self._setup_provider.setup_cluster(self, extra_args) except Exception as err: log.error( "The cluster hosts are up and running," " but %s failed to set the cluster up: %s", self._setup_provider.HUMAN_READABLE_NAME, err) ret = False if not ret: log.warning( "Cluster `%s` not yet configured. Please, re-run " "`elasticluster setup %s` and/or check your configuration", self.name, self.name) return ret
[ "def", "setup", "(", "self", ",", "extra_args", "=", "tuple", "(", ")", ")", ":", "try", ":", "# setup the cluster using the setup provider", "ret", "=", "self", ".", "_setup_provider", ".", "setup_cluster", "(", "self", ",", "extra_args", ")", "except", "Exce...
34.935484
19.387097
def restore_config(): ''' Reapplies the previous configuration. .. versionadded:: 2017.7.5 .. note:: The current configuration will be come the previous configuration. If run a second time back-to-back it is like toggling between two configs. Returns: bool: True if successfully restored Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.restore_config ''' cmd = 'Restore-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'A previous configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Previous Configuration Not Found') raise return True
[ "def", "restore_config", "(", ")", ":", "cmd", "=", "'Restore-DscConfiguration'", "try", ":", "_pshell", "(", "cmd", ",", "ignore_retcode", "=", "True", ")", "except", "CommandExecutionError", "as", "exc", ":", "if", "'A previous configuration does not exist'", "in"...
25.333333
24.8
def read(self, size=-1): '''This reads at most "size" bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. ''' if size == 0: return self.string_type() if size < 0: # delimiter default is EOF self.expect(self.delimiter) return self.before # I could have done this more directly by not using expect(), but # I deliberately decided to couple read() to expect() so that # I would catch any bugs early and ensure consistent behavior. # It's a little less efficient, but there is less for me to # worry about if I have to later modify read() or expect(). # Note, it's OK if size==-1 in the regex. That just means it # will never match anything in which case we stop only on EOF. cre = re.compile(self._coerce_expect_string('.{%d}' % size), re.DOTALL) # delimiter default is EOF index = self.expect([cre, self.delimiter]) if index == 0: ### FIXME self.before should be ''. Should I assert this? return self.after return self.before
[ "def", "read", "(", "self", ",", "size", "=", "-", "1", ")", ":", "if", "size", "==", "0", ":", "return", "self", ".", "string_type", "(", ")", "if", "size", "<", "0", ":", "# delimiter default is EOF", "self", ".", "expect", "(", "self", ".", "del...
47.714286
23.142857
async def trigger_act(self, addr): """Trigger agent in :attr:`addr` to act. This method is quite inefficient if used repeatedly for a large number of agents. .. seealso:: :py:meth:`creamas.mp.MultiEnvironment.trigger_all` """ r_agent = await self.env.connect(addr, timeout=TIMEOUT) return await r_agent.act()
[ "async", "def", "trigger_act", "(", "self", ",", "addr", ")", ":", "r_agent", "=", "await", "self", ".", "env", ".", "connect", "(", "addr", ",", "timeout", "=", "TIMEOUT", ")", "return", "await", "r_agent", ".", "act", "(", ")" ]
30.666667
21.416667
def attach_team(context, id, team_id): """attach_team(context, id, team_id) Attach a team to a topic. >>> dcictl topic-attach-team [OPTIONS] :param string id: ID of the topic to attach to [required] :param string team_id: ID of the team to attach to this topic [required] """ team_id = team_id or identity.my_team_id(context) result = topic.attach_team(context, id=id, team_id=team_id) utils.format_output(result, context.format)
[ "def", "attach_team", "(", "context", ",", "id", ",", "team_id", ")", ":", "team_id", "=", "team_id", "or", "identity", ".", "my_team_id", "(", "context", ")", "result", "=", "topic", ".", "attach_team", "(", "context", ",", "id", "=", "id", ",", "team...
35.076923
18.076923
def migrate_autoload_details(autoload_details, shell_name, shell_type): """ Migrate autoload details. Add namespace for attributes :param autoload_details: :param shell_name: :param shell_type: :return: """ mapping = {} for resource in autoload_details.resources: resource.model = "{shell_name}.{model}".format(shell_name=shell_name, model=resource.model) mapping[resource.relative_address] = resource.model for attribute in autoload_details.attributes: if not attribute.relative_address: # Root element attribute.attribute_name = "{shell_type}.{attr_name}".format(shell_type=shell_type, attr_name=attribute.attribute_name) else: attribute.attribute_name = "{model}.{attr_name}".format(model=mapping[attribute.relative_address], attr_name=attribute.attribute_name) return autoload_details
[ "def", "migrate_autoload_details", "(", "autoload_details", ",", "shell_name", ",", "shell_type", ")", ":", "mapping", "=", "{", "}", "for", "resource", "in", "autoload_details", ".", "resources", ":", "resource", ".", "model", "=", "\"{shell_name}.{model}\"", "."...
42
30.791667
def expand_dict_as_keys(d): """Expands a dictionary into a list of immutables with cartesian product :param d: dictionary (of strings or lists) :returns: cartesian product of list parts """ to_product = [] for key, values in sorted(d.items()): # if we sort the inputs here, itertools.product will keep a stable sort order for us later key_values = sorted([(key, v) for v in utils.ensure_listable(values) if v is not None]) if key_values: to_product.append(key_values) return list(itertools.product(*to_product))
[ "def", "expand_dict_as_keys", "(", "d", ")", ":", "to_product", "=", "[", "]", "for", "key", ",", "values", "in", "sorted", "(", "d", ".", "items", "(", ")", ")", ":", "# if we sort the inputs here, itertools.product will keep a stable sort order for us later", "key...
43.384615
17.307692
def bookSSE(symbols=None, on_data=None, token='', version=''): '''Book shows IEX’s bids and asks for given symbols. https://iexcloud.io/docs/api/#deep-book Args: symbols (string); Tickers to request on_data (function): Callback on data token (string); Access token version (string); API version ''' return _runSSE('book', symbols, on_data, token, version)
[ "def", "bookSSE", "(", "symbols", "=", "None", ",", "on_data", "=", "None", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "return", "_runSSE", "(", "'book'", ",", "symbols", ",", "on_data", ",", "token", ",", "version", ")" ]
30.615385
20
def sender(self): """ :returns: A :class:`~okcupyd.profile.Profile` instance belonging to the sender of this message. """ return (self._message_thread.user_profile if 'from_me' in self._message_element.attrib['class'] else self._message_thread.correspondent_profile)
[ "def", "sender", "(", "self", ")", ":", "return", "(", "self", ".", "_message_thread", ".", "user_profile", "if", "'from_me'", "in", "self", ".", "_message_element", ".", "attrib", "[", "'class'", "]", "else", "self", ".", "_message_thread", ".", "correspond...
42.625
15.625
def create(self): """ Create an instance of the Parking Planning Service with the typical starting settings. """ self.service.create() os.environ[self.__module__ + '.uri'] = self.service.settings.data['url'] os.environ[self.__module__ + '.zone_id'] = self.get_predix_zone_id()
[ "def", "create", "(", "self", ")", ":", "self", ".", "service", ".", "create", "(", ")", "os", ".", "environ", "[", "self", ".", "__module__", "+", "'.uri'", "]", "=", "self", ".", "service", ".", "settings", ".", "data", "[", "'url'", "]", "os", ...
40.625
17.875
def make_prediction_output_tensors(args, features, input_ops, model_fn_ops, keep_target): """Makes the final prediction output layer.""" target_name = feature_transforms.get_target_name(features) key_names = get_key_names(features) outputs = {} outputs.update({key_name: tf.squeeze(input_ops.features[key_name]) for key_name in key_names}) if is_classification_model(args.model): # build maps from ints to the origional categorical strings. class_names = read_vocab(args, target_name) table = tf.contrib.lookup.index_to_string_table_from_tensor( mapping=class_names, default_value='UNKNOWN') # Get the label of the input target. if keep_target: input_target_label = table.lookup(input_ops.features[target_name]) outputs[PG_TARGET] = tf.squeeze(input_target_label) # TODO(brandondutra): get the score of the target label too. probabilities = model_fn_ops.predictions['probabilities'] # if top_n == 0, this means use all the classes. We will use class names as # probabilities labels. if args.top_n == 0: predicted_index = tf.argmax(probabilities, axis=1) predicted = table.lookup(predicted_index) outputs.update({PG_CLASSIFICATION_FIRST_LABEL: predicted}) probabilities_list = tf.unstack(probabilities, axis=1) for class_name, p in zip(class_names, probabilities_list): outputs[class_name] = p else: top_n = args.top_n # get top k labels and their scores. (top_k_values, top_k_indices) = tf.nn.top_k(probabilities, k=top_n) top_k_labels = table.lookup(tf.to_int64(top_k_indices)) # Write the top_k values using 2*top_n columns. num_digits = int(math.ceil(math.log(top_n, 10))) if num_digits == 0: num_digits = 1 for i in range(0, top_n): # Pad i based on the size of k. So if k = 100, i = 23 -> i = '023'. This # makes sorting the columns easy. padded_i = str(i + 1).zfill(num_digits) if i == 0: label_alias = PG_CLASSIFICATION_FIRST_LABEL else: label_alias = PG_CLASSIFICATION_LABEL_TEMPLATE % padded_i label_tensor_name = (tf.squeeze( tf.slice(top_k_labels, [0, i], [tf.shape(top_k_labels)[0], 1]))) if i == 0: score_alias = PG_CLASSIFICATION_FIRST_SCORE else: score_alias = PG_CLASSIFICATION_SCORE_TEMPLATE % padded_i score_tensor_name = (tf.squeeze( tf.slice(top_k_values, [0, i], [tf.shape(top_k_values)[0], 1]))) outputs.update({label_alias: label_tensor_name, score_alias: score_tensor_name}) else: if keep_target: outputs[PG_TARGET] = tf.squeeze(input_ops.features[target_name]) scores = model_fn_ops.predictions['scores'] outputs[PG_REGRESSION_PREDICTED_TARGET] = tf.squeeze(scores) return outputs
[ "def", "make_prediction_output_tensors", "(", "args", ",", "features", ",", "input_ops", ",", "model_fn_ops", ",", "keep_target", ")", ":", "target_name", "=", "feature_transforms", ".", "get_target_name", "(", "features", ")", "key_names", "=", "get_key_names", "("...
36.3125
22.0125
def _previous_pages_count(self): 'A generator of previous page integers.' skip = self.skip if skip == 0: return 0 count, remainder = divmod(skip, self.limit) return count
[ "def", "_previous_pages_count", "(", "self", ")", ":", "skip", "=", "self", ".", "skip", "if", "skip", "==", "0", ":", "return", "0", "count", ",", "remainder", "=", "divmod", "(", "skip", ",", "self", ".", "limit", ")", "return", "count" ]
30.857143
14.571429
def add_button_box(self, stdbtns): """Create dialog button box and add it to the dialog layout""" bbox = QDialogButtonBox(stdbtns) run_btn = bbox.addButton(_("Run"), QDialogButtonBox.AcceptRole) run_btn.clicked.connect(self.run_btn_clicked) bbox.accepted.connect(self.accept) bbox.rejected.connect(self.reject) btnlayout = QHBoxLayout() btnlayout.addStretch(1) btnlayout.addWidget(bbox) self.layout().addLayout(btnlayout)
[ "def", "add_button_box", "(", "self", ",", "stdbtns", ")", ":", "bbox", "=", "QDialogButtonBox", "(", "stdbtns", ")", "run_btn", "=", "bbox", ".", "addButton", "(", "_", "(", "\"Run\"", ")", ",", "QDialogButtonBox", ".", "AcceptRole", ")", "run_btn", ".", ...
45.545455
7.272727
def plotBoostTrace(sp, inputVectors, columnIndex): """ Plot boostfactor for a selected column Note that learning is ON for SP here :param sp: sp instance :param inputVectors: input data :param columnIndex: index for the column of interest """ numInputVector, inputSize = inputVectors.shape columnNumber = np.prod(sp.getColumnDimensions()) boostFactorsTrace = np.zeros((columnNumber, numInputVector)) activeDutyCycleTrace = np.zeros((columnNumber, numInputVector)) minActiveDutyCycleTrace = np.zeros((columnNumber, numInputVector)) for i in range(numInputVector): outputColumns = np.zeros(sp.getColumnDimensions(), dtype=uintType) inputVector = copy.deepcopy(inputVectors[i][:]) sp.compute(inputVector, True, outputColumns) boostFactors = np.zeros((columnNumber, ), dtype=realDType) sp.getBoostFactors(boostFactors) boostFactorsTrace[:, i] = boostFactors activeDutyCycle = np.zeros((columnNumber, ), dtype=realDType) sp.getActiveDutyCycles(activeDutyCycle) activeDutyCycleTrace[:, i] = activeDutyCycle minActiveDutyCycle = np.zeros((columnNumber, ), dtype=realDType) sp.getMinActiveDutyCycles(minActiveDutyCycle) minActiveDutyCycleTrace[:, i] = minActiveDutyCycle plt.figure() plt.subplot(2, 1, 1) plt.plot(boostFactorsTrace[columnIndex, :]) plt.ylabel('Boost Factor') plt.subplot(2, 1, 2) plt.plot(activeDutyCycleTrace[columnIndex, :]) plt.plot(minActiveDutyCycleTrace[columnIndex, :]) plt.xlabel(' Time ') plt.ylabel('Active Duty Cycle')
[ "def", "plotBoostTrace", "(", "sp", ",", "inputVectors", ",", "columnIndex", ")", ":", "numInputVector", ",", "inputSize", "=", "inputVectors", ".", "shape", "columnNumber", "=", "np", ".", "prod", "(", "sp", ".", "getColumnDimensions", "(", ")", ")", "boost...
36.487805
16.487805
def get_filename_block_as_codepoints(self): """ TODO: Support tokenized BASIC. Now we only create ASCII BASIC. """ codepoints = [] codepoints += list(string2codepoint(self.filename.ljust(8, " "))) codepoints.append(self.cfg.FTYPE_BASIC) # one byte file type codepoints.append(self.cfg.BASIC_ASCII) # one byte ASCII flag # one byte gap flag (0x00=no gaps, 0xFF=gaps) # http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4231&p=9110#p9110 codepoints.append(self.gap_flag) # machine code starting/loading address if self.file_type != self.cfg.FTYPE_BASIC: # BASIC programm (0x00) codepoints = iter(codepoints) self.start_address = get_word(codepoints) log.info("machine code starting address: %s" % hex(self.start_address)) self.load_address = get_word(codepoints) log.info("machine code loading address: %s" % hex(self.load_address)) else: # not needed in BASIC files # http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4341&p=9109#p9109 pass log.debug("filename block: %s" % pformat_codepoints(codepoints)) return codepoints
[ "def", "get_filename_block_as_codepoints", "(", "self", ")", ":", "codepoints", "=", "[", "]", "codepoints", "+=", "list", "(", "string2codepoint", "(", "self", ".", "filename", ".", "ljust", "(", "8", ",", "\" \"", ")", ")", ")", "codepoints", ".", "appen...
42.586207
24.172414
def list2cmdline(seq): """ Translate a sequence of arguments into a command line string, using the same rules as the MS C runtime: 1) Arguments are delimited by white space, which is either a space or a tab. 2) A string surrounded by double quotation marks is interpreted as a single argument, regardless of white space or pipe characters contained within. A quoted string can be embedded in an argument. 3) A double quotation mark preceded by a backslash is interpreted as a literal double quotation mark. 4) Backslashes are interpreted literally, unless they immediately precede a double quotation mark. 5) If backslashes immediately precede a double quotation mark, every pair of backslashes is interpreted as a literal backslash. If the number of backslashes is odd, the last backslash escapes the next double quotation mark as described in rule 3. """ # See # http://msdn.microsoft.com/en-us/library/17w5ykft.aspx # or search http://msdn.microsoft.com for # "Parsing C++ Command-Line Arguments" result = [] needquote = False for arg in seq: bs_buf = [] # Add a space to separate this argument from the others if result: result.append(' ') needquote = (" " in arg) or ("\t" in arg) or ("|" in arg) or not arg if needquote: result.append('"') for c in arg: if c == '\\': # Don't know if we need to double yet. bs_buf.append(c) elif c == '"': # Double backslashes. result.append('\\' * len(bs_buf)*2) bs_buf = [] result.append('\\"') else: # Normal char if bs_buf: result.extend(bs_buf) bs_buf = [] result.append(c) # Add remaining backslashes, if any. if bs_buf: result.extend(bs_buf) if needquote: result.extend(bs_buf) result.append('"') return ''.join(result)
[ "def", "list2cmdline", "(", "seq", ")", ":", "# See", "# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx", "# or search http://msdn.microsoft.com for", "# \"Parsing C++ Command-Line Arguments\"", "result", "=", "[", "]", "needquote", "=", "False", "for", "arg", "in", "s...
30.911765
19.411765
def ISINSTANCE(instance, A_tuple): # noqa """ Allows you to do isinstance checks on futures. Really, I discourage this because duck-typing is usually better. But this can provide you with a way to use isinstance with futures. Works with other objects too. :param instance: :param A_tuple: :return: """ try: instance = instance._redpipe_future_result except AttributeError: pass return isinstance(instance, A_tuple)
[ "def", "ISINSTANCE", "(", "instance", ",", "A_tuple", ")", ":", "# noqa", "try", ":", "instance", "=", "instance", ".", "_redpipe_future_result", "except", "AttributeError", ":", "pass", "return", "isinstance", "(", "instance", ",", "A_tuple", ")" ]
27.352941
18.294118
def _result_handler(self, response: Dict[str, Any]): """应答结果响应处理. 将结果解析出来设置给任务对应的Future对象上 Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True """ res = response.get("MESSAGE") result = res.get("RESULT") return result
[ "def", "_result_handler", "(", "self", ",", "response", ":", "Dict", "[", "str", ",", "Any", "]", ")", ":", "res", "=", "response", ".", "get", "(", "\"MESSAGE\"", ")", "result", "=", "res", ".", "get", "(", "\"RESULT\"", ")", "return", "result" ]
21.333333
17.133333
def ranked_in_list_in(self, leaderboard_name, members, **options): ''' Retrieve a page of leaders from the named leaderboard for a given list of members. @param leaderboard_name [String] Name of the leaderboard. @param members [Array] Member names. @param options [Hash] Options to be used when retrieving the page from the named leaderboard. @return a page of leaders from the named leaderboard for a given list of members. ''' ranks_for_members = [] pipeline = self.redis_connection.pipeline() for member in members: if self.order == self.ASC: pipeline.zrank(leaderboard_name, member) else: pipeline.zrevrank(leaderboard_name, member) pipeline.zscore(leaderboard_name, member) responses = pipeline.execute() for index, member in enumerate(members): data = {} data[self.MEMBER_KEY] = member rank = responses[index * 2] if rank is not None: rank += 1 else: if not options.get('include_missing', True): continue data[self.RANK_KEY] = rank score = responses[index * 2 + 1] if score is not None: score = float(score) data[self.SCORE_KEY] = score ranks_for_members.append(data) if ('with_member_data' in options) and (True == options['with_member_data']): for index, member_data in enumerate(self.members_data_for_in(leaderboard_name, members)): try: ranks_for_members[index][self.MEMBER_DATA_KEY] = member_data except: pass if 'sort_by' in options: sort_value_if_none = float('-inf') if self.order == self.ASC else float('+inf') if self.RANK_KEY == options['sort_by']: ranks_for_members = sorted( ranks_for_members, key=lambda member: member.get(self.RANK_KEY) if member.get(self.RANK_KEY) is not None else sort_value_if_none ) elif self.SCORE_KEY == options['sort_by']: ranks_for_members = sorted( ranks_for_members, key=lambda member: member.get(self.SCORE_KEY) if member.get(self.SCORE_KEY) is not None else sort_value_if_none ) return ranks_for_members
[ "def", "ranked_in_list_in", "(", "self", ",", "leaderboard_name", ",", "members", ",", "*", "*", "options", ")", ":", "ranks_for_members", "=", "[", "]", "pipeline", "=", "self", ".", "redis_connection", ".", "pipeline", "(", ")", "for", "member", "in", "m...
40.04918
23.491803
def b2a_qp(data, quotetabs=False, istext=True, header=False): """quotetabs=True means that tab and space characters are always quoted. istext=False means that \r and \n are treated as regular characters header=True encodes space characters with '_' and requires real '_' characters to be quoted. """ MAXLINESIZE = 76 # See if this string is using CRLF line ends lf = data.find('\n') crlf = lf > 0 and data[lf-1] == '\r' inp = 0 linelen = 0 odata = [] while inp < len(data): c = data[inp] if (c > '~' or c == '=' or (header and c == '_') or (c == '.' and linelen == 0 and (inp+1 == len(data) or data[inp+1] == '\n' or data[inp+1] == '\r')) or (not istext and (c == '\r' or c == '\n')) or ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or (c <= ' ' and c != '\r' and c != '\n' and (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): linelen += 3 if linelen >= MAXLINESIZE: odata.append('=') if crlf: odata.append('\r') odata.append('\n') linelen = 3 odata.append('=' + two_hex_digits(ord(c))) inp += 1 else: if (istext and (c == '\n' or (inp+1 < len(data) and c == '\r' and data[inp+1] == '\n'))): linelen = 0 # Protect against whitespace on end of line if (len(odata) > 0 and (odata[-1] == ' ' or odata[-1] == '\t')): ch = ord(odata[-1]) odata[-1] = '=' odata.append(two_hex_digits(ch)) if crlf: odata.append('\r') odata.append('\n') if c == '\r': inp += 2 else: inp += 1 else: if (inp + 1 < len(data) and data[inp+1] != '\n' and (linelen + 1) >= MAXLINESIZE): odata.append('=') if crlf: odata.append('\r') odata.append('\n') linelen = 0 linelen += 1 if header and c == ' ': c = '_' odata.append(c) inp += 1 return ''.join(odata)
[ "def", "b2a_qp", "(", "data", ",", "quotetabs", "=", "False", ",", "istext", "=", "True", ",", "header", "=", "False", ")", ":", "MAXLINESIZE", "=", "76", "# See if this string is using CRLF line ends", "lf", "=", "data", ".", "find", "(", "'\\n'", ")", "c...
36.014493
15.304348
def parent(groups,ID): """given a groups dictionary and an ID, return its actual parent ID.""" if ID in groups.keys(): return ID # already a parent if not ID in groups.keys(): for actualParent in groups.keys(): if ID in groups[actualParent]: return actualParent # found the actual parent return None
[ "def", "parent", "(", "groups", ",", "ID", ")", ":", "if", "ID", "in", "groups", ".", "keys", "(", ")", ":", "return", "ID", "# already a parent", "if", "not", "ID", "in", "groups", ".", "keys", "(", ")", ":", "for", "actualParent", "in", "groups", ...
39
10.444444
def rewire_inputs(data_list): """Rewire inputs of provided data objects. Input parameter is a list of original and copied data object model instances: ``[{'original': original, 'copy': copy}]``. This function finds which objects reference other objects (in the list) on the input and replaces original objects with the copies (mutates copies' inputs). """ if len(data_list) < 2: return data_list mapped_ids = {bundle['original'].id: bundle['copy'].id for bundle in data_list} for bundle in data_list: updated = False copy = bundle['copy'] for field_schema, fields in iterate_fields(copy.input, copy.process.input_schema): name = field_schema['name'] value = fields[name] if field_schema['type'].startswith('data:') and value in mapped_ids: fields[name] = mapped_ids[value] updated = True elif field_schema['type'].startswith('list:data:') and any([id_ in mapped_ids for id_ in value]): fields[name] = [mapped_ids[id_] if id_ in mapped_ids else id_ for id_ in value] updated = True if updated: copy.save() return data_list
[ "def", "rewire_inputs", "(", "data_list", ")", ":", "if", "len", "(", "data_list", ")", "<", "2", ":", "return", "data_list", "mapped_ids", "=", "{", "bundle", "[", "'original'", "]", ".", "id", ":", "bundle", "[", "'copy'", "]", ".", "id", "for", "b...
34.4
26.485714
def write_options_to_YAML(self, filename): """Writes the options in YAML format to a file. :param str filename: Target file to write the options. """ fd = open(filename, "w") yaml.dump(_options_to_dict(self.gc), fd, default_flow_style=False) fd.close()
[ "def", "write_options_to_YAML", "(", "self", ",", "filename", ")", ":", "fd", "=", "open", "(", "filename", ",", "\"w\"", ")", "yaml", ".", "dump", "(", "_options_to_dict", "(", "self", ".", "gc", ")", ",", "fd", ",", "default_flow_style", "=", "False", ...
36.75
16
def infer_cm(tpm): """Infer the connectivity matrix associated with a state-by-node TPM in multidimensional form. """ network_size = tpm.shape[-1] all_contexts = tuple(all_states(network_size - 1)) cm = np.empty((network_size, network_size), dtype=int) for a, b in np.ndindex(cm.shape): cm[a][b] = infer_edge(tpm, a, b, all_contexts) return cm
[ "def", "infer_cm", "(", "tpm", ")", ":", "network_size", "=", "tpm", ".", "shape", "[", "-", "1", "]", "all_contexts", "=", "tuple", "(", "all_states", "(", "network_size", "-", "1", ")", ")", "cm", "=", "np", ".", "empty", "(", "(", "network_size", ...
37.4
12
def deploy_paying_proxy_contract(self, initializer=b'', deployer_account=None, deployer_private_key=None) -> str: """ Deploy proxy contract. Takes deployer_account (if unlocked in the node) or the deployer private key :param initializer: Initializer :param deployer_account: Unlocked ethereum account :param deployer_private_key: Private key of an ethereum account :return: deployed contract address """ assert deployer_account or deployer_private_key deployer_address = deployer_account or self.ethereum_client.private_key_to_address(deployer_private_key) safe_proxy_contract = get_paying_proxy_contract(self.w3) tx = safe_proxy_contract.constructor(self.master_copy_address, initializer, NULL_ADDRESS, NULL_ADDRESS, 0).buildTransaction({'from': deployer_address}) tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key, public_key=deployer_account) tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=60) assert tx_receipt.status return tx_receipt.contractAddress
[ "def", "deploy_paying_proxy_contract", "(", "self", ",", "initializer", "=", "b''", ",", "deployer_account", "=", "None", ",", "deployer_private_key", "=", "None", ")", "->", "str", ":", "assert", "deployer_account", "or", "deployer_private_key", "deployer_address", ...
61.190476
32.428571
def allowed_domains(self): """ This property lists the allowed domains for a load balancer. The allowed domains are restrictions set for the allowed domain names used for adding load balancer nodes. In order to submit a domain name as an address for the load balancer node to add, the user must verify that the domain is valid by using the List Allowed Domains call. Once verified, simply supply the domain name in place of the node's address in the add_nodes() call. """ if self._allowed_domains is None: uri = "/loadbalancers/alloweddomains" resp, body = self.method_get(uri) dom_list = body["allowedDomains"] self._allowed_domains = [itm["allowedDomain"]["name"] for itm in dom_list] return self._allowed_domains
[ "def", "allowed_domains", "(", "self", ")", ":", "if", "self", ".", "_allowed_domains", "is", "None", ":", "uri", "=", "\"/loadbalancers/alloweddomains\"", "resp", ",", "body", "=", "self", ".", "method_get", "(", "uri", ")", "dom_list", "=", "body", "[", ...
47.5
18.055556
def add_vertex(self, vertex, **attr): """ Add vertex and update vertex attributes """ self.vertices[vertex] = [] if attr: self.nodes[vertex] = attr self.pred[vertex] = [] self.succ[vertex] = []
[ "def", "add_vertex", "(", "self", ",", "vertex", ",", "*", "*", "attr", ")", ":", "self", ".", "vertices", "[", "vertex", "]", "=", "[", "]", "if", "attr", ":", "self", ".", "nodes", "[", "vertex", "]", "=", "attr", "self", ".", "pred", "[", "v...
29
6.555556
def lv_voltage_deviation(network, mode=None, voltage_levels='mv_lv'): """ Checks for voltage stability issues in LV grids. Parameters ---------- network : :class:`~.grid.network.Network` mode : None or String If None voltage at all nodes in LV grid is checked. If mode is set to 'stations' only voltage at busbar is checked. voltage_levels : :obj:`str` Specifies which allowed voltage deviations to use. Possible options are: * 'mv_lv' This is the default. The allowed voltage deviation for nodes in the MV grid is the same as for nodes in the LV grid. Further load and feed-in case are not distinguished. * 'lv' Use this to handle allowed voltage deviations in the MV and LV grid differently. Here, load and feed-in case are differentiated as well. Returns ------- :obj:`dict` Dictionary with :class:`~.grid.grids.LVGrid` as key and a :pandas:`pandas.DataFrame<dataframe>` with its critical nodes, sorted descending by voltage deviation, as value. Index of the dataframe are all nodes (of type :class:`~.grid.components.Generator`, :class:`~.grid.components.Load`, etc.) with over-voltage issues. Columns are 'v_mag_pu' containing the maximum voltage deviation as float and 'time_index' containing the corresponding time step the over-voltage occured in as :pandas:`pandas.Timestamp<timestamp>`. Notes ----- Over-voltage is determined based on allowed voltage deviations defined in the config file 'config_grid_expansion' in section 'grid_expansion_allowed_voltage_deviations'. """ crit_nodes = {} v_dev_allowed_per_case = {} if voltage_levels == 'mv_lv': offset = network.config[ 'grid_expansion_allowed_voltage_deviations']['hv_mv_trafo_offset'] control_deviation = network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'hv_mv_trafo_control_deviation'] v_dev_allowed_per_case['feedin_case_upper'] = \ 1 + offset + control_deviation + network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'mv_lv_feedin_case_max_v_deviation'] v_dev_allowed_per_case['load_case_lower'] = \ 1 + offset - control_deviation - network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'mv_lv_load_case_max_v_deviation'] v_dev_allowed_per_case['feedin_case_lower'] = 0.9 v_dev_allowed_per_case['load_case_upper'] = 1.1 v_dev_allowed_upper = \ network.timeseries.timesteps_load_feedin_case.case.apply( lambda _: v_dev_allowed_per_case['{}_upper'.format(_)]) v_dev_allowed_lower = \ network.timeseries.timesteps_load_feedin_case.case.apply( lambda _: v_dev_allowed_per_case['{}_lower'.format(_)]) elif voltage_levels == 'lv': pass else: raise ValueError( 'Specified mode {} is not a valid option.'.format(voltage_levels)) for lv_grid in network.mv_grid.lv_grids: if mode: if mode == 'stations': nodes = [lv_grid.station] else: raise ValueError( "{} is not a valid option for input variable 'mode' in " "function lv_voltage_deviation. Try 'stations' or " "None".format(mode)) else: nodes = lv_grid.graph.nodes() if voltage_levels == 'lv': if mode == 'stations': # get voltage at primary side to calculate upper bound for # feed-in case and lower bound for load case v_lv_station_primary = network.results.v_res( nodes=[lv_grid.station], level='mv').iloc[:, 0] timeindex = v_lv_station_primary.index v_dev_allowed_per_case['feedin_case_upper'] = \ v_lv_station_primary + network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'mv_lv_station_feedin_case_max_v_deviation'] v_dev_allowed_per_case['load_case_lower'] = \ v_lv_station_primary - network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'mv_lv_station_load_case_max_v_deviation'] else: # get voltage at secondary side to calculate upper bound for # feed-in case and lower bound for load case v_lv_station_secondary = network.results.v_res( nodes=[lv_grid.station], level='lv').iloc[:, 0] timeindex = v_lv_station_secondary.index v_dev_allowed_per_case['feedin_case_upper'] = \ v_lv_station_secondary + network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'lv_feedin_case_max_v_deviation'] v_dev_allowed_per_case['load_case_lower'] = \ v_lv_station_secondary - network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'lv_load_case_max_v_deviation'] v_dev_allowed_per_case['feedin_case_lower'] = pd.Series( 0.9, index=timeindex) v_dev_allowed_per_case['load_case_upper'] = pd.Series( 1.1, index=timeindex) # maximum allowed voltage deviation in each time step v_dev_allowed_upper = [] v_dev_allowed_lower = [] for t in timeindex: case = \ network.timeseries.timesteps_load_feedin_case.loc[ t, 'case'] v_dev_allowed_upper.append( v_dev_allowed_per_case[ '{}_upper'.format(case)].loc[t]) v_dev_allowed_lower.append( v_dev_allowed_per_case[ '{}_lower'.format(case)].loc[t]) v_dev_allowed_upper = pd.Series(v_dev_allowed_upper, index=timeindex) v_dev_allowed_lower = pd.Series(v_dev_allowed_lower, index=timeindex) crit_nodes_grid = _voltage_deviation( network, nodes, v_dev_allowed_upper, v_dev_allowed_lower, voltage_level='lv') if not crit_nodes_grid.empty: crit_nodes[lv_grid] = crit_nodes_grid.sort_values( by=['v_mag_pu'], ascending=False) if crit_nodes: if mode == 'stations': logger.debug( '==> {} LV station(s) has/have voltage issues.'.format( len(crit_nodes))) else: logger.debug( '==> {} LV grid(s) has/have voltage issues.'.format( len(crit_nodes))) else: if mode == 'stations': logger.debug('==> No voltage issues in LV stations.') else: logger.debug('==> No voltage issues in LV grids.') return crit_nodes
[ "def", "lv_voltage_deviation", "(", "network", ",", "mode", "=", "None", ",", "voltage_levels", "=", "'mv_lv'", ")", ":", "crit_nodes", "=", "{", "}", "v_dev_allowed_per_case", "=", "{", "}", "if", "voltage_levels", "==", "'mv_lv'", ":", "offset", "=", "netw...
43.237805
21.25
def sleeping_func(arg, secs=10, result_queue=None): """This methods illustrates how the workers can be used.""" import time time.sleep(secs) if result_queue is not None: result_queue.put(arg) else: return arg
[ "def", "sleeping_func", "(", "arg", ",", "secs", "=", "10", ",", "result_queue", "=", "None", ")", ":", "import", "time", "time", ".", "sleep", "(", "secs", ")", "if", "result_queue", "is", "not", "None", ":", "result_queue", ".", "put", "(", "arg", ...
29.625
16
def show_editor_buffer(self, editor_buffer): """ Open this `EditorBuffer` in the active window. """ assert isinstance(editor_buffer, EditorBuffer) self.active_window.editor_buffer = editor_buffer
[ "def", "show_editor_buffer", "(", "self", ",", "editor_buffer", ")", ":", "assert", "isinstance", "(", "editor_buffer", ",", "EditorBuffer", ")", "self", ".", "active_window", ".", "editor_buffer", "=", "editor_buffer" ]
38.333333
8
def commit(self, project_id, mode, mutations, transaction=None): """Perform a ``commit`` request. :type project_id: str :param project_id: The project to connect to. This is usually your project name in the cloud console. :type mode: :class:`.gapic.datastore.v1.enums.CommitRequest.Mode` :param mode: The type of commit to perform. Expected to be one of ``TRANSACTIONAL`` or ``NON_TRANSACTIONAL``. :type mutations: list :param mutations: List of :class:`.datastore_pb2.Mutation`, the mutations to perform. :type transaction: bytes :param transaction: (Optional) The transaction ID returned from :meth:`begin_transaction`. Non-transactional commits must pass :data:`None`. :rtype: :class:`.datastore_pb2.CommitResponse` :returns: The returned protobuf response object. """ request_pb = _datastore_pb2.CommitRequest( project_id=project_id, mode=mode, transaction=transaction, mutations=mutations, ) return _rpc( self.client._http, project_id, "commit", self.client._base_url, request_pb, _datastore_pb2.CommitResponse, )
[ "def", "commit", "(", "self", ",", "project_id", ",", "mode", ",", "mutations", ",", "transaction", "=", "None", ")", ":", "request_pb", "=", "_datastore_pb2", ".", "CommitRequest", "(", "project_id", "=", "project_id", ",", "mode", "=", "mode", ",", "tran...
36.918919
20.243243
def replace_all(filepath, searchExp, replaceExp): """ Replace all the ocurrences (in a file) of a string with another value. """ for line in fileinput.input(filepath, inplace=1): if searchExp in line: line = line.replace(searchExp, replaceExp) sys.stdout.write(line)
[ "def", "replace_all", "(", "filepath", ",", "searchExp", ",", "replaceExp", ")", ":", "for", "line", "in", "fileinput", ".", "input", "(", "filepath", ",", "inplace", "=", "1", ")", ":", "if", "searchExp", "in", "line", ":", "line", "=", "line", ".", ...
37.875
11.375
def main(): """Main entry point for CLI commands.""" options = docopt(__doc__, version=__version__) if options['segment']: segment( options['<file>'], options['--output'], options['--target-duration'], options['--mpegts'], )
[ "def", "main", "(", ")", ":", "options", "=", "docopt", "(", "__doc__", ",", "version", "=", "__version__", ")", "if", "options", "[", "'segment'", "]", ":", "segment", "(", "options", "[", "'<file>'", "]", ",", "options", "[", "'--output'", "]", ",", ...
29.1
13.5
def depolarizeCells(self, basalInput, apicalInput, learn): """ Calculate predictions. @param basalInput (numpy array) List of active input bits for the basal dendrite segments @param apicalInput (numpy array) List of active input bits for the apical dendrite segments @param learn (bool) Whether learning is enabled. Some TM implementations may depolarize cells differently or do segment activity bookkeeping when learning is enabled. """ (activeApicalSegments, matchingApicalSegments, apicalPotentialOverlaps) = self._calculateApicalSegmentActivity( self.apicalConnections, apicalInput, self.connectedPermanence, self.activationThreshold, self.minThreshold) if learn or self.useApicalModulationBasalThreshold==False: reducedBasalThresholdCells = () else: reducedBasalThresholdCells = self.apicalConnections.mapSegmentsToCells( activeApicalSegments) (activeBasalSegments, matchingBasalSegments, basalPotentialOverlaps) = self._calculateBasalSegmentActivity( self.basalConnections, basalInput, reducedBasalThresholdCells, self.connectedPermanence, self.activationThreshold, self.minThreshold, self.reducedBasalThreshold) predictedCells = self._calculatePredictedCells(activeBasalSegments, activeApicalSegments) self.predictedCells = predictedCells self.activeBasalSegments = activeBasalSegments self.activeApicalSegments = activeApicalSegments self.matchingBasalSegments = matchingBasalSegments self.matchingApicalSegments = matchingApicalSegments self.basalPotentialOverlaps = basalPotentialOverlaps self.apicalPotentialOverlaps = apicalPotentialOverlaps
[ "def", "depolarizeCells", "(", "self", ",", "basalInput", ",", "apicalInput", ",", "learn", ")", ":", "(", "activeApicalSegments", ",", "matchingApicalSegments", ",", "apicalPotentialOverlaps", ")", "=", "self", ".", "_calculateApicalSegmentActivity", "(", "self", "...
40.302326
21.697674
def createDataFromFile(self, filePath, inputEncoding = None, defaultFps = None): """Fetch a given filePath and parse its contents. May raise the following exceptions: * RuntimeError - generic exception telling that parsing was unsuccessfull * IOError - failed to open a file at given filePath @return SubtitleData filled with non-empty, default datafields. Client should modify them and then perform an add/update operation""" file_ = File(filePath) if inputEncoding is None: inputEncoding = file_.detectEncoding() inputEncoding = inputEncoding.lower() videoInfo = VideoInfo(defaultFps) if defaultFps is not None else file_.detectFps() subtitles = self._parseFile(file_, inputEncoding, videoInfo.fps) data = SubtitleData() data.subtitles = subtitles data.fps = videoInfo.fps data.inputEncoding = inputEncoding data.outputEncoding = inputEncoding data.outputFormat = self._parser.parsedFormat() data.videoPath = videoInfo.videoPath return data
[ "def", "createDataFromFile", "(", "self", ",", "filePath", ",", "inputEncoding", "=", "None", ",", "defaultFps", "=", "None", ")", ":", "file_", "=", "File", "(", "filePath", ")", "if", "inputEncoding", "is", "None", ":", "inputEncoding", "=", "file_", "."...
40.518519
21.62963
def Brkic_2011_1(Re, eD): r'''Calculates Darcy friction factor using the method in Brkic (2011) [2]_ as shown in [1]_. .. math:: f_d = [-2\log(10^{-0.4343\beta} + \frac{\epsilon}{3.71D})]^{-2} .. math:: \beta = \ln \frac{Re}{1.816\ln\left(\frac{1.1Re}{\ln(1+1.1Re)}\right)} Parameters ---------- Re : float Reynolds number, [-] eD : float Relative roughness, [-] Returns ------- fd : float Darcy friction factor [-] Notes ----- No range of validity specified for this equation. Examples -------- >>> Brkic_2011_1(1E5, 1E-4) 0.01812455874141297 References ---------- .. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence and Combustion 90, no. 1 (January 1, 2013): 1-27. doi:10.1007/s10494-012-9419-7 .. [2] Brkic, Dejan."Review of Explicit Approximations to the Colebrook Relation for Flow Friction." Journal of Petroleum Science and Engineering 77, no. 1 (April 2011): 34-48. doi:10.1016/j.petrol.2011.02.006. ''' beta = log(Re/(1.816*log(1.1*Re/log(1+1.1*Re)))) return (-2*log10(10**(-0.4343*beta)+eD/3.71))**-2
[ "def", "Brkic_2011_1", "(", "Re", ",", "eD", ")", ":", "beta", "=", "log", "(", "Re", "/", "(", "1.816", "*", "log", "(", "1.1", "*", "Re", "/", "log", "(", "1", "+", "1.1", "*", "Re", ")", ")", ")", ")", "return", "(", "-", "2", "*", "lo...
28.409091
25
def search_golr_wrap(id, category, **args): """ performs searches in both directions """ #assocs1 = search_associations_compact(object=id, subject_category=category, **args) #assocs2 = search_associations_compact(subject=id, object_category=category, **args) assocs1, facets1 = search_compact_wrap(object=id, subject_category=category, **args) assocs2, facets2 = search_compact_wrap(subject=id, object_category=category, **args) facets = facets1 if len(assocs2) > 0: facets = facets2 return assocs1 + assocs2, facets
[ "def", "search_golr_wrap", "(", "id", ",", "category", ",", "*", "*", "args", ")", ":", "#assocs1 = search_associations_compact(object=id, subject_category=category, **args)", "#assocs2 = search_associations_compact(subject=id, object_category=category, **args)", "assocs1", ",", "fac...
46.083333
20.916667
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(Trigger, self).fix_config(options) opt = "condition" if opt not in options: options[opt] = "True" if opt not in self.help: self.help[opt] = "The (optional) condition for teeing off the tokens; uses the 'eval' method, "\ "ie the expression must evaluate to a boolean value; storage values placeholders "\ "'@{...}' get replaced with their string representations before evaluating the "\ "expression (string)." return options
[ "def", "fix_config", "(", "self", ",", "options", ")", ":", "options", "=", "super", "(", "Trigger", ",", "self", ")", ".", "fix_config", "(", "options", ")", "opt", "=", "\"condition\"", "if", "opt", "not", "in", "options", ":", "options", "[", "opt",...
41.190476
25
def clickMouseButtonLeft(self, coord, interval=None): """Click the left mouse button without modifiers pressed. Parameters: coordinates to click on screen (tuple (x, y)) Returns: None """ modFlags = 0 self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags) if interval: self._postQueuedEvents(interval=interval) else: self._postQueuedEvents()
[ "def", "clickMouseButtonLeft", "(", "self", ",", "coord", ",", "interval", "=", "None", ")", ":", "modFlags", "=", "0", "self", ".", "_queueMouseButton", "(", "coord", ",", "Quartz", ".", "kCGMouseButtonLeft", ",", "modFlags", ")", "if", "interval", ":", "...
33.153846
19.615385
def get_fields_by_class(cls, field_class): """ Return a list of field names matching a field class :param field_class: field class object :return: list """ ret = [] for key, val in getattr(cls, '_fields').items(): if isinstance(val, field_class): ret.append(key) return ret
[ "def", "get_fields_by_class", "(", "cls", ",", "field_class", ")", ":", "ret", "=", "[", "]", "for", "key", ",", "val", "in", "getattr", "(", "cls", ",", "'_fields'", ")", ".", "items", "(", ")", ":", "if", "isinstance", "(", "val", ",", "field_class...
26.769231
17.076923
def admin_emails(doc): """View for admin email addresses (organisation and global)""" if doc.get('type') == 'user' and doc.get('state') != 'deactivated': if doc.get('role') == 'administrator': yield None, doc['email'] for org_id, state in doc.get('organisations', {}).items(): if state.get('role') == 'administrator' and state.get('state') != 'deactivated': yield org_id, doc['email']
[ "def", "admin_emails", "(", "doc", ")", ":", "if", "doc", ".", "get", "(", "'type'", ")", "==", "'user'", "and", "doc", ".", "get", "(", "'state'", ")", "!=", "'deactivated'", ":", "if", "doc", ".", "get", "(", "'role'", ")", "==", "'administrator'",...
55.125
17.375
def set_access_control_lists(self, access_control_lists, security_namespace_id): """SetAccessControlLists. Create or update one or more access control lists. All data that currently exists for the ACLs supplied will be overwritten. :param :class:`<VssJsonCollectionWrapper> <azure.devops.v5_0.security.models.VssJsonCollectionWrapper>` access_control_lists: A list of ACLs to create or update. :param str security_namespace_id: Security namespace identifier. """ route_values = {} if security_namespace_id is not None: route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str') content = self._serialize.body(access_control_lists, 'VssJsonCollectionWrapper') self._send(http_method='POST', location_id='18a2ad18-7571-46ae-bec7-0c7da1495885', version='5.0', route_values=route_values, content=content)
[ "def", "set_access_control_lists", "(", "self", ",", "access_control_lists", ",", "security_namespace_id", ")", ":", "route_values", "=", "{", "}", "if", "security_namespace_id", "is", "not", "None", ":", "route_values", "[", "'securityNamespaceId'", "]", "=", "self...
66.666667
32.933333
def normalize_serial_number(sn, max_length=None, left_fill='0', right_fill=str(), blank=str(), valid_chars=' -0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', invalid_chars=None, strip_whitespace=True, join=False, na=rex.nones): r"""Make a string compatible with typical serial number requirements # Default configuration strips internal and external whitespaces and retains only the last 10 characters >>> normalize_serial_number('1C 234567890 ') '0234567890' >>> normalize_serial_number('1C 234567890 ', max_length=20) '000000001C 234567890' >>> normalize_serial_number('Unknown', blank=None, left_fill=str()) '' >>> normalize_serial_number('N/A', blank='', left_fill=str()) 'A' >>> normalize_serial_number('1C 234567890 ', max_length=20, left_fill='') '1C 234567890' Notice how the max_length setting (20) carries over from the previous test! >>> len(normalize_serial_number('Unknown', blank=False)) 20 >>> normalize_serial_number('Unknown', blank=False) '00000000000000000000' >>> normalize_serial_number(' \t1C\t-\t234567890 \x00\x7f', max_length=14, left_fill='0', ... valid_chars='0123456789ABC', invalid_chars=None, join=True) '1C\t-\t234567890' Notice how the max_length setting carries over from the previous test! >>> len(normalize_serial_number('Unknown', blank=False)) 14 Restore the default max_length setting >>> len(normalize_serial_number('Unknown', blank=False, max_length=10)) 10 >>> normalize_serial_number('NO SERIAL', blank='--=--', left_fill='') # doctest: +NORMALIZE_WHITESPACE 'NO SERIAL' >>> normalize_serial_number('NO SERIAL', blank='', left_fill='') # doctest: +NORMALIZE_WHITESPACE 'NO SERIAL' >>> normalize_serial_number('1C 234567890 ', valid_chars='0123456789') '0234567890' """ # All 9 kwargs have persistent default values stored as attributes of the funcion instance if max_length is None: max_length = normalize_serial_number.max_length else: normalize_serial_number.max_length = max_length if left_fill is None: left_fill = normalize_serial_number.left_fill else: normalize_serial_number.left_fill = left_fill if right_fill is None: right_fill = normalize_serial_number.right_fill else: normalize_serial_number.right_fill = right_fill if blank is None: blank = normalize_serial_number.blank else: normalize_serial_number.blank = blank if valid_chars is None: valid_chars = normalize_serial_number.valid_chars else: normalize_serial_number.valid_chars = valid_chars if invalid_chars is None: invalid_chars = normalize_serial_number.invalid_chars else: normalize_serial_number.invalid_chars = invalid_chars if strip_whitespace is None: strip_whitespace = normalize_serial_number.strip_whitespace else: normalize_serial_number.strip_whitespace = strip_whitespace if join is None: join = normalize_serial_number.join else: normalize_serial_number.join = join if na is None: na = normalize_serial_number.na else: normalize_serial_number.na = na if invalid_chars is None: invalid_chars = (c for c in charlist.ascii_all if c not in valid_chars) invalid_chars = ''.join(invalid_chars) sn = str(sn).strip(invalid_chars) if strip_whitespace: sn = sn.strip() if invalid_chars: if join: sn = sn.translate(dict(zip(invalid_chars, [''] * len(invalid_chars)))) else: sn = multisplit(sn, invalid_chars)[-1] sn = sn[-max_length:] if strip_whitespace: sn = sn.strip() if na: if isinstance(na, (tuple, set, dict, list)) and sn in na: sn = '' elif na.match(sn): sn = '' if not sn and not (blank is False): return blank if left_fill: sn = left_fill * int(max_length - len(sn) / len(left_fill)) + sn if right_fill: sn = sn + right_fill * (max_length - len(sn) / len(right_fill)) return sn
[ "def", "normalize_serial_number", "(", "sn", ",", "max_length", "=", "None", ",", "left_fill", "=", "'0'", ",", "right_fill", "=", "str", "(", ")", ",", "blank", "=", "str", "(", ")", ",", "valid_chars", "=", "' -0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM...
38.490909
25.136364
def spacing(self): """ Get image spacing Returns ------- tuple """ libfn = utils.get_lib_fn('getSpacing%s'%self._libsuffix) return libfn(self.pointer)
[ "def", "spacing", "(", "self", ")", ":", "libfn", "=", "utils", ".", "get_lib_fn", "(", "'getSpacing%s'", "%", "self", ".", "_libsuffix", ")", "return", "libfn", "(", "self", ".", "pointer", ")" ]
20.6
18.4
def unmarshall(values): """ Transform a response payload from DynamoDB to a native dict :param dict values: The response payload from DynamoDB :rtype: dict :raises ValueError: if an unsupported type code is encountered """ unmarshalled = {} for key in values: unmarshalled[key] = _unmarshall_dict(values[key]) return unmarshalled
[ "def", "unmarshall", "(", "values", ")", ":", "unmarshalled", "=", "{", "}", "for", "key", "in", "values", ":", "unmarshalled", "[", "key", "]", "=", "_unmarshall_dict", "(", "values", "[", "key", "]", ")", "return", "unmarshalled" ]
27.923077
19.923077
def summary(self, varnames=None, ranefs=False, transformed=False, hpd=.95, quantiles=None, diagnostics=['effective_n', 'gelman_rubin']): '''Returns a DataFrame of summary/diagnostic statistics for the parameters. Args: varnames (list): List of variable names to include; if None (default), all eligible variables are included. ranefs (bool): Whether or not to include random effects in the summary. Default is False. transformed (bool): Whether or not to include internally transformed variables in the summary. Default is False. hpd (float, between 0 and 1): Show Highest Posterior Density (HPD) intervals with specified width/proportion for all parameters. If None, HPD intervals are suppressed. quantiles (float, list): Show specified quantiles of the marginal posterior distributions for all parameters. If list, must be a list of floats between 0 and 1. If None (default), no quantiles are shown. diagnostics (list): List of functions to use to compute convergence diagnostics for all parameters. Each element can be either a callable or a string giving the name of a function in the diagnostics module. Valid strings are 'gelman_rubin' and 'effective_n'. Functions must accept a MCMCResults object as the sole input, and return a DataFrame with one labeled row per parameter. If None, no convergence diagnostics are computed. ''' samples = self.to_df(varnames, ranefs, transformed) # build the basic DataFrame df = pd.DataFrame({'mean': samples.mean(0), 'sd': samples.std(0)}) # add user-specified quantiles if quantiles is not None: if not isinstance(quantiles, (list, tuple)): quantiles = [quantiles] qnames = ['q' + str(q) for q in quantiles] df = df.merge(samples.quantile(quantiles).set_index([qnames]).T, left_index=True, right_index=True) # add HPD intervals if hpd is not None: df = df.merge(samples.apply(self._hpd_interval, axis=0, width=hpd).T, left_index=True, right_index=True) # add convergence diagnostics if diagnostics is not None: _names = self._filter_names(ranefs=ranefs, transformed=transformed) _self = self[_names] if self.n_chains > 1: for diag in diagnostics: if isinstance(diag, string_types): diag = getattr(bmd, diag) df = df.merge(diag(_self), left_index=True, right_index=True) else: warnings.warn('Multiple MCMC chains are required in order ' 'to compute convergence diagnostics.') # For bernoulli models, tell user which event is being modeled if self.model.family.name == 'bernoulli': event = next(i for i, x in enumerate(self.model.y.data.flatten()) if x > .99) warnings.warn('Modeling the probability that {}==\'{}\''.format( self.model.y.name, str(self.model.clean_data[self.model.y.name][event]))) return df
[ "def", "summary", "(", "self", ",", "varnames", "=", "None", ",", "ranefs", "=", "False", ",", "transformed", "=", "False", ",", "hpd", "=", ".95", ",", "quantiles", "=", "None", ",", "diagnostics", "=", "[", "'effective_n'", ",", "'gelman_rubin'", "]", ...
50.411765
25
def clean_draft_pages_from_space(confluence, space_key, count, date_now): """ Remove draft pages from space using datetime.now :param confluence: :param space_key: :param count: :param date_now: :return: int counter """ pages = confluence.get_all_draft_pages_from_space(space=space_key, start=0, limit=500) for page in pages: page_id = page['id'] draft_page = confluence.get_draft_page_by_id(page_id=page_id) last_date_string = draft_page['version']['when'] last_date = datetime.datetime.strptime(last_date_string.replace(".000", "")[:-6], "%Y-%m-%dT%H:%M:%S") if (date_now - last_date) > datetime.timedelta(days=DRAFT_DAYS): count += 1 print("Removing page with page id: " + page_id) confluence.remove_page_as_draft(page_id=page_id) print("Removed page with date " + last_date_string) return count
[ "def", "clean_draft_pages_from_space", "(", "confluence", ",", "space_key", ",", "count", ",", "date_now", ")", ":", "pages", "=", "confluence", ".", "get_all_draft_pages_from_space", "(", "space", "=", "space_key", ",", "start", "=", "0", ",", "limit", "=", "...
43.333333
22.47619
def stream_header_legacy(self, f): """Stream the block header in the standard way to the file-like object f.""" stream_struct("L##LL", f, self.version, self.previous_block_hash, self.merkle_root, self.timestamp, self.difficulty) f.write(self.nonce[:4])
[ "def", "stream_header_legacy", "(", "self", ",", "f", ")", ":", "stream_struct", "(", "\"L##LL\"", ",", "f", ",", "self", ".", "version", ",", "self", ".", "previous_block_hash", ",", "self", ".", "merkle_root", ",", "self", ".", "timestamp", ",", "self", ...
58.8
16
async def download_file(self, file_path: base.String, destination: Optional[base.InputFile] = None, timeout: Optional[base.Integer] = sentinel, chunk_size: Optional[base.Integer] = 65536, seek: Optional[base.Boolean] = True) -> Union[io.BytesIO, io.FileIO]: """ Download file by file_path to destination if You want to automatically create destination (:class:`io.BytesIO`) use default value of destination and handle result of this method. :param file_path: file path on telegram server (You can get it from :obj:`aiogram.types.File`) :type file_path: :obj:`str` :param destination: filename or instance of :class:`io.IOBase`. For e. g. :class:`io.BytesIO` :param timeout: Integer :param chunk_size: Integer :param seek: Boolean - go to start of file when downloading is finished. :return: destination """ if destination is None: destination = io.BytesIO() url = api.Methods.file_url(token=self.__token, path=file_path) dest = destination if isinstance(destination, io.IOBase) else open(destination, 'wb') async with self.session.get(url, timeout=timeout, proxy=self.proxy, proxy_auth=self.proxy_auth) as response: while True: chunk = await response.content.read(chunk_size) if not chunk: break dest.write(chunk) dest.flush() if seek: dest.seek(0) return dest
[ "async", "def", "download_file", "(", "self", ",", "file_path", ":", "base", ".", "String", ",", "destination", ":", "Optional", "[", "base", ".", "InputFile", "]", "=", "None", ",", "timeout", ":", "Optional", "[", "base", ".", "Integer", "]", "=", "s...
45.885714
26.171429
def fix_pdf_with_ghostscript_to_tmp_file(input_doc_fname): """Attempt to fix a bad PDF file with a Ghostscript command, writing the output PDF to a temporary file and returning the filename. Caller is responsible for deleting the file.""" if not gs_executable: init_and_test_gs_executable(exit_on_fail=True) temp_file_name = get_temporary_filename(extension=".pdf") gs_run_command = [gs_executable, "-dSAFER", "-o", temp_file_name, "-dPDFSETTINGS=/prepress", "-sDEVICE=pdfwrite", input_doc_fname] try: gs_output = get_external_subprocess_output(gs_run_command, print_output=True, indent_string=" ", env=gs_environment) except subprocess.CalledProcessError: print("\nError in pdfCropMargins: Ghostscript returned a non-zero exit" "\nstatus when attempting to fix the file:\n ", input_doc_fname, file=sys.stderr) cleanup_and_exit(1) except UnicodeDecodeError: print("\nWarning in pdfCropMargins: In attempting to repair the PDF file" "\nGhostscript produced a message containing characters which cannot" "\nbe decoded by the 'utf-8' codec. Ignoring and hoping for the best.", file=sys.stderr) return temp_file_name
[ "def", "fix_pdf_with_ghostscript_to_tmp_file", "(", "input_doc_fname", ")", ":", "if", "not", "gs_executable", ":", "init_and_test_gs_executable", "(", "exit_on_fail", "=", "True", ")", "temp_file_name", "=", "get_temporary_filename", "(", "extension", "=", "\".pdf\"", ...
56.782609
26.869565
def humanize_timedelta(seconds): """Creates a string representation of timedelta.""" hours, remainder = divmod(seconds, 3600) days, hours = divmod(hours, 24) minutes, seconds = divmod(remainder, 60) if days: result = '{}d'.format(days) if hours: result += ' {}h'.format(hours) if minutes: result += ' {}m'.format(minutes) return result if hours: result = '{}h'.format(hours) if minutes: result += ' {}m'.format(minutes) return result if minutes: result = '{}m'.format(minutes) if seconds: result += ' {}s'.format(seconds) return result return '{}s'.format(seconds)
[ "def", "humanize_timedelta", "(", "seconds", ")", ":", "hours", ",", "remainder", "=", "divmod", "(", "seconds", ",", "3600", ")", "days", ",", "hours", "=", "divmod", "(", "hours", ",", "24", ")", "minutes", ",", "seconds", "=", "divmod", "(", "remain...
26
16.185185
def argsplit(args, sep=','): """used to split JS args (it is not that simple as it seems because sep can be inside brackets). pass args *without* brackets! Used also to parse array and object elements, and more""" parsed_len = 0 last = 0 splits = [] for e in bracket_split(args, brackets=['()', '[]', '{}']): if e[0] not in {'(', '[', '{'}: for i, char in enumerate(e): if char == sep: splits.append(args[last:parsed_len + i]) last = parsed_len + i + 1 parsed_len += len(e) splits.append(args[last:]) return splits
[ "def", "argsplit", "(", "args", ",", "sep", "=", "','", ")", ":", "parsed_len", "=", "0", "last", "=", "0", "splits", "=", "[", "]", "for", "e", "in", "bracket_split", "(", "args", ",", "brackets", "=", "[", "'()'", ",", "'[]'", ",", "'{}'", "]",...
33.210526
14.631579
def add_column(self, data, column_name="", inplace=False): """ Returns an SFrame with a new column. The number of elements in the data given must match the length of every other column of the SFrame. If no name is given, a default name is chosen. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- data : SArray The 'column' of data to add. column_name : string, optional The name of the column. If no name is given, a default name is chosen. inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place. Returns ------- out : SFrame The current SFrame. See Also -------- add_columns Examples -------- >>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']}) >>> sa = turicreate.SArray(['cat', 'dog', 'fossa']) >>> # This line is equivalent to `sf['species'] = sa` >>> res = sf.add_column(sa, 'species') >>> res +----+-----+---------+ | id | val | species | +----+-----+---------+ | 1 | A | cat | | 2 | B | dog | | 3 | C | fossa | +----+-----+---------+ [3 rows x 3 columns] """ # Check type for pandas dataframe or SArray? if not isinstance(data, SArray): if isinstance(data, _Iterable): data = SArray(data) else: if self.num_columns() == 0: data = SArray([data]) else: data = SArray.from_const(data, self.num_rows()) if not isinstance(column_name, str): raise TypeError("Invalid column name: must be str") if inplace: ret = self else: ret = self.copy() with cython_context(): ret.__proxy__.add_column(data.__proxy__, column_name) ret._cache = None return ret
[ "def", "add_column", "(", "self", ",", "data", ",", "column_name", "=", "\"\"", ",", "inplace", "=", "False", ")", ":", "# Check type for pandas dataframe or SArray?", "if", "not", "isinstance", "(", "data", ",", "SArray", ")", ":", "if", "isinstance", "(", ...
29.675676
20.459459
def get_object_from_name(name): ''' Returns the named object. Arguments: name (str): A string of form `package.subpackage.etc.module.property`. This function will import `package.subpackage.etc.module` and return `property` from that module. ''' dot = name.rindex(".") mod_name, property_name = name[:dot], name[dot + 1:] __import__(mod_name) return getattr(sys.modules[mod_name], property_name)
[ "def", "get_object_from_name", "(", "name", ")", ":", "dot", "=", "name", ".", "rindex", "(", "\".\"", ")", "mod_name", ",", "property_name", "=", "name", "[", ":", "dot", "]", ",", "name", "[", "dot", "+", "1", ":", "]", "__import__", "(", "mod_name...
31.785714
23.928571
def SetKeyPathPrefix(self, key_path_prefix): """Sets the Window Registry key path prefix. Args: key_path_prefix (str): Windows Registry key path prefix. """ self._key_path_prefix = key_path_prefix self._key_path_prefix_length = len(key_path_prefix) self._key_path_prefix_upper = key_path_prefix.upper()
[ "def", "SetKeyPathPrefix", "(", "self", ",", "key_path_prefix", ")", ":", "self", ".", "_key_path_prefix", "=", "key_path_prefix", "self", ".", "_key_path_prefix_length", "=", "len", "(", "key_path_prefix", ")", "self", ".", "_key_path_prefix_upper", "=", "key_path_...
36.111111
14.666667
def signin(request, auth_form=AuthenticationForm, template_name='userena/signin_form.html', redirect_field_name=REDIRECT_FIELD_NAME, redirect_signin_function=signin_redirect, extra_context=None): """ Signin using email or username with password. Signs a user in by combining email/username with password. If the combination is correct and the user :func:`is_active` the :func:`redirect_signin_function` is called with the arguments ``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is trying the login. The returned value of the function will be the URL that is redirected to. A user can also select to be remembered for ``USERENA_REMEMBER_DAYS``. :param auth_form: Form to use for signing the user in. Defaults to the :class:`AuthenticationForm` supplied by userena. :param template_name: String defining the name of the template to use. Defaults to ``userena/signin_form.html``. :param redirect_field_name: Form field name which contains the value for a redirect to the succeeding page. Defaults to ``next`` and is set in ``REDIRECT_FIELD_NAME`` setting. :param redirect_signin_function: Function which handles the redirect. This functions gets the value of ``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It must return a string which specifies the URI to redirect to. :param extra_context: A dictionary containing extra variables that should be passed to the rendered template. The ``form`` key is always the ``auth_form``. **Context** ``form`` Form used for authentication supplied by ``auth_form``. """ form = auth_form() if request.method == 'POST': form = auth_form(request.POST, request.FILES) if form.is_valid(): identification, password, remember_me = (form.cleaned_data['identification'], form.cleaned_data['password'], form.cleaned_data['remember_me']) user = authenticate(identification=identification, password=password) if user.is_active: login(request, user) if remember_me: request.session.set_expiry(userena_settings.USERENA_REMEMBER_ME_DAYS[1] * 86400) else: request.session.set_expiry(0) if userena_settings.USERENA_USE_MESSAGES: messages.success(request, _('You have been signed in.'), fail_silently=True) #send a signal that a user has signed in userena_signals.account_signin.send(sender=None, user=user) # Whereto now? redirect_to = redirect_signin_function( request.GET.get(redirect_field_name, request.POST.get(redirect_field_name)), user) return HttpResponseRedirect(redirect_to) else: return redirect(reverse('userena_disabled', kwargs={'username': user.username})) if not extra_context: extra_context = dict() extra_context.update({ 'form': form, 'next': request.GET.get(redirect_field_name, request.POST.get(redirect_field_name)), }) return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
[ "def", "signin", "(", "request", ",", "auth_form", "=", "AuthenticationForm", ",", "template_name", "=", "'userena/signin_form.html'", ",", "redirect_field_name", "=", "REDIRECT_FIELD_NAME", ",", "redirect_signin_function", "=", "signin_redirect", ",", "extra_context", "=...
43.301205
24.795181
def request_access(self, verifier): """ Get OAuth access token so we can make requests """ client = OAuth1( client_key=self._server_cache[self.client.server].key, client_secret=self._server_cache[self.client.server].secret, resource_owner_key=self.store["oauth-request-token"], resource_owner_secret=self.store["oauth-request-secret"], verifier=verifier, ) request = {"auth": client} response = self._requester( requests.post, "oauth/access_token", **request ) data = parse.parse_qs(response.text) self.store["oauth-access-token"] = data[self.PARAM_TOKEN][0] self.store["oauth-access-secret"] = data[self.PARAM_TOKEN_SECRET][0] self._server_tokens = {}
[ "def", "request_access", "(", "self", ",", "verifier", ")", ":", "client", "=", "OAuth1", "(", "client_key", "=", "self", ".", "_server_cache", "[", "self", ".", "client", ".", "server", "]", ".", "key", ",", "client_secret", "=", "self", ".", "_server_c...
36.818182
20.545455
def secondary_mass(mass1, mass2): """Returns the smaller of mass1 and mass2 (s = secondary).""" mass1, mass2, input_is_array = ensurearray(mass1, mass2) if mass1.shape != mass2.shape: raise ValueError("mass1 and mass2 must have same shape") ms = copy.copy(mass2) mask = mass1 < mass2 ms[mask] = mass1[mask] return formatreturn(ms, input_is_array)
[ "def", "secondary_mass", "(", "mass1", ",", "mass2", ")", ":", "mass1", ",", "mass2", ",", "input_is_array", "=", "ensurearray", "(", "mass1", ",", "mass2", ")", "if", "mass1", ".", "shape", "!=", "mass2", ".", "shape", ":", "raise", "ValueError", "(", ...
41.555556
11.666667
def add_command_set(self, command_set): """ Adds all of the commands and events from specified CommandSet command set into this one. :param command_set: a commands set to add commands from """ for command in command_set.get_commands(): self.add_command(command) for event in command_set.get_events(): self.add_event(event)
[ "def", "add_command_set", "(", "self", ",", "command_set", ")", ":", "for", "command", "in", "command_set", ".", "get_commands", "(", ")", ":", "self", ".", "add_command", "(", "command", ")", "for", "event", "in", "command_set", ".", "get_events", "(", ")...
33.416667
15.083333
def inspect(self, name): '''Inspect a local image in the database, which typically includes the basic fields in the model. ''' print(name) container = self.get(name) if container is not None: collection = container.collection.name fields = container.__dict__.copy() fields['collection'] = collection fields['metrics'] = json.loads(fields['metrics']) del fields['_sa_instance_state'] fields['created_at'] = str(fields['created_at']) print(json.dumps(fields, indent=4, sort_keys=True)) return fields
[ "def", "inspect", "(", "self", ",", "name", ")", ":", "print", "(", "name", ")", "container", "=", "self", ".", "get", "(", "name", ")", "if", "container", "is", "not", "None", ":", "collection", "=", "container", ".", "collection", ".", "name", "fie...
36.375
16.5
def FlowAccumFromProps( props, weights = None, in_place = False ): """Calculates flow accumulation from flow proportions. Args: props (rdarray): An elevation model weights (rdarray): Flow accumulation weights to use. This is the amount of flow generated by each cell. If this is not provided, each cell will generate 1 unit of flow. in_place (bool): If True, then `weights` is modified in place. An accumulation matrix is always returned, but it will just be a view of the modified data if `in_place` is True. Returns: A flow accumulation array. If `weights` was provided and `in_place` was True, then this matrix is a view of the modified data. """ if type(props) is not rd3array: raise Exception("A richdem.rd3array or numpy.ndarray is required!") if weights is not None and in_place: accum = rdarray(weights, no_data=-1) elif weights is not None and not in_place: accum = rdarray(weights, copy=True, meta_obj=props, no_data=-1) elif weights is None: accum = rdarray(np.ones(shape=props.shape[0:2], dtype='float64'), meta_obj=props, no_data=-1) else: raise Exception("Execution should never reach this point!") if accum.dtype!='float64': raise Exception("Accumulation array must be of type 'float64'!") accumw = accum.wrap() _AddAnalysis(accum, "FlowAccumFromProps(dem, weights={weights}, in_place={in_place})".format( weights = 'None' if weights is None else 'weights', in_place = in_place )) _richdem.FlowAccumulation(props.wrap(),accumw) accum.copyFromWrapped(accumw) return accum
[ "def", "FlowAccumFromProps", "(", "props", ",", "weights", "=", "None", ",", "in_place", "=", "False", ")", ":", "if", "type", "(", "props", ")", "is", "not", "rd3array", ":", "raise", "Exception", "(", "\"A richdem.rd3array or numpy.ndarray is required!\"", ")"...
35.857143
26.081633
def _needs_ref_WCS(reglist): """ Check if the region list contains shapes in image-like coordinates """ from pyregion.wcs_helper import image_like_coordformats for r in reglist: if r.coord_format in image_like_coordformats: return True return False
[ "def", "_needs_ref_WCS", "(", "reglist", ")", ":", "from", "pyregion", ".", "wcs_helper", "import", "image_like_coordformats", "for", "r", "in", "reglist", ":", "if", "r", ".", "coord_format", "in", "image_like_coordformats", ":", "return", "True", "return", "Fa...
31.222222
16
def __query_cmd(self, command, device=None): """Calls a command""" base_url = u'%s&switchcmd=%s' % (self.__homeauto_url_with_sid(), command) if device is None: url = base_url else: url = '%s&ain=%s' % (base_url, device) if self.__debug: print(u'Query Command URI: ' + url) return self.__query(url)
[ "def", "__query_cmd", "(", "self", ",", "command", ",", "device", "=", "None", ")", ":", "base_url", "=", "u'%s&switchcmd=%s'", "%", "(", "self", ".", "__homeauto_url_with_sid", "(", ")", ",", "command", ")", "if", "device", "is", "None", ":", "url", "="...
28.615385
20.076923
def sed2(img, contour=None, shape=[3, 4]): """ plot tiled image of multiple slices :param img: :param contour: :param shape: :return: """ """ :param img: :param contour: :param shape: :return: """ plt.imshow(slices(img, shape), cmap='gray') if contour is not None: plt.contour(slices(contour, shape))
[ "def", "sed2", "(", "img", ",", "contour", "=", "None", ",", "shape", "=", "[", "3", ",", "4", "]", ")", ":", "\"\"\"\r\n :param img:\r\n :param contour:\r\n :param shape:\r\n :return:\r\n \"\"\"", "plt", ".", "imshow", "(", "slices", "(", "img", ",...
19.473684
15.263158
def addobject(bunchdt, data, commdct, key, theidf, aname=None, **kwargs): """add an object to the eplus model""" obj = newrawobject(data, commdct, key) abunch = obj2bunch(data, commdct, obj) if aname: namebunch(abunch, aname) data.dt[key].append(obj) bunchdt[key].append(abunch) for key, value in list(kwargs.items()): abunch[key] = value return abunch
[ "def", "addobject", "(", "bunchdt", ",", "data", ",", "commdct", ",", "key", ",", "theidf", ",", "aname", "=", "None", ",", "*", "*", "kwargs", ")", ":", "obj", "=", "newrawobject", "(", "data", ",", "commdct", ",", "key", ")", "abunch", "=", "obj2...
35.454545
12
def moreData(ra,dec,box): """Search the CFHT archive for more images of this location""" import cfhtCutout cdata={'ra_deg': ra, 'dec_deg': dec, 'radius_deg': 0.2} inter=cfhtCutout.find_images(cdata,0.2)
[ "def", "moreData", "(", "ra", ",", "dec", ",", "box", ")", ":", "import", "cfhtCutout", "cdata", "=", "{", "'ra_deg'", ":", "ra", ",", "'dec_deg'", ":", "dec", ",", "'radius_deg'", ":", "0.2", "}", "inter", "=", "cfhtCutout", ".", "find_images", "(", ...
33.666667
15.5