text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def push_to_server(self): """ Use appropriate CIM API call to save payment profile to Authorize.NET 1. If customer has no profile yet, create one with this payment profile 2. If payment profile is not on Authorize.NET yet, create it there 3. If payment profile exists on Authorize.NET update it there """ if not self.customer_profile_id: try: self.customer_profile = CustomerProfile.objects.get( customer=self.customer) except CustomerProfile.DoesNotExist: pass if self.payment_profile_id: response = update_payment_profile( self.customer_profile.profile_id, self.payment_profile_id, self.raw_data, self.raw_data, ) response.raise_if_error() elif self.customer_profile_id: output = create_payment_profile( self.customer_profile.profile_id, self.raw_data, self.raw_data, ) response = output['response'] response.raise_if_error() self.payment_profile_id = output['payment_profile_id'] else: output = add_profile( self.customer.id, self.raw_data, self.raw_data, ) response = output['response'] response.raise_if_error() self.customer_profile = CustomerProfile.objects.create( customer=self.customer, profile_id=output['profile_id'], sync=False, ) self.payment_profile_id = output['payment_profile_ids'][0]
[ "def", "push_to_server", "(", "self", ")", ":", "if", "not", "self", ".", "customer_profile_id", ":", "try", ":", "self", ".", "customer_profile", "=", "CustomerProfile", ".", "objects", ".", "get", "(", "customer", "=", "self", ".", "customer", ")", "exce...
38.704545
13.568182
def display_notes(self, notes): """Renders "notes" reported by ENSIME, such as typecheck errors.""" # TODO: this can probably be a cached property like isneovim hassyntastic = bool(int(self._vim.eval('exists(":SyntasticCheck")'))) if hassyntastic: self.__display_notes_with_syntastic(notes) else: self.__display_notes(notes) self._vim.command('redraw!')
[ "def", "display_notes", "(", "self", ",", "notes", ")", ":", "# TODO: this can probably be a cached property like isneovim", "hassyntastic", "=", "bool", "(", "int", "(", "self", ".", "_vim", ".", "eval", "(", "'exists(\":SyntasticCheck\")'", ")", ")", ")", "if", ...
34.75
21.333333
def _GetStringValue(self, data_dict, name, default_value=None): """Retrieves a specific string value from the data dict. Args: data_dict (dict[str, list[str]): values per name. name (str): name of the value to retrieve. default_value (Optional[object]): value to return if the name has no value set in data_dict. Returns: str: value represented as a string. """ values = data_dict.get(name, None) if not values: return default_value for index, value in enumerate(values): if ',' in value: values[index] = '"{0:s}"'.format(value) return ', '.join(values)
[ "def", "_GetStringValue", "(", "self", ",", "data_dict", ",", "name", ",", "default_value", "=", "None", ")", ":", "values", "=", "data_dict", ".", "get", "(", "name", ",", "None", ")", "if", "not", "values", ":", "return", "default_value", "for", "index...
29.666667
18.857143
def main(): """ Main entrypoint for command-line webserver. """ parser = argparse.ArgumentParser() parser.add_argument("-H", "--host", help="Web server Host address to bind to", default="0.0.0.0", action="store", required=False) parser.add_argument("-p", "--port", help="Web server Port to bind to", default=8080, action="store", required=False) args = parser.parse_args() logging.basicConfig() run(host=args.host, port=args.port, reloader=True, server=SERVER)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "\"-H\"", ",", "\"--host\"", ",", "help", "=", "\"Web server Host address to bind to\"", ",", "default", "=", "\"0.0.0.0\"", ",", "act...
41
20.846154
def not_(self, value, name=''): """ Bitwise integer complement: name = ~value """ if isinstance(value.type, types.VectorType): rhs = values.Constant(value.type, (-1,) * value.type.count) else: rhs = values.Constant(value.type, -1) return self.xor(value, rhs, name=name)
[ "def", "not_", "(", "self", ",", "value", ",", "name", "=", "''", ")", ":", "if", "isinstance", "(", "value", ".", "type", ",", "types", ".", "VectorType", ")", ":", "rhs", "=", "values", ".", "Constant", "(", "value", ".", "type", ",", "(", "-",...
34.4
11.4
def itertags(html, tag): """ Brute force regex based HTML tag parser. This is a rough-and-ready searcher to find HTML tags when standards compliance is not required. Will find tags that are commented out, or inside script tag etc. :param html: HTML page :param tag: tag name to find :return: generator with Tags """ for match in tag_re.finditer(html): if match.group("tag") == tag: attrs = dict((a.group("key").lower(), a.group("value")) for a in attr_re.finditer(match.group("attr"))) yield Tag(match.group("tag"), attrs, match.group("inner"))
[ "def", "itertags", "(", "html", ",", "tag", ")", ":", "for", "match", "in", "tag_re", ".", "finditer", "(", "html", ")", ":", "if", "match", ".", "group", "(", "\"tag\"", ")", "==", "tag", ":", "attrs", "=", "dict", "(", "(", "a", ".", "group", ...
45.923077
24.846154
def get_input_media_referenced_files(self, var_name): """ Generates a tuple with the value for the json/url argument and a dictionary for the multipart file upload. Will return something which might be similar to `('attach://{var_name}', {var_name: ('foo.png', open('foo.png', 'rb'), 'image/png')})` or in the case of the :class:`InputFileUseFileID` class, just `('AwADBAADbXXXXXXXXXXXGBdhD2l6_XX', None)` :param var_name: name used to reference the file. :type var_name: str :return: tuple of (file_id, dict) :rtype: tuple """ # file to be uploaded string = 'attach://{name}'.format(name=var_name) return string, self.get_request_files(var_name)
[ "def", "get_input_media_referenced_files", "(", "self", ",", "var_name", ")", ":", "# file to be uploaded", "string", "=", "'attach://{name}'", ".", "format", "(", "name", "=", "var_name", ")", "return", "string", ",", "self", ".", "get_request_files", "(", "var_n...
41.388889
22.611111
def task_done(self): """Indicate that a formerly enqueued task is complete. Used by queue consumers. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises ValueError if called more times than there were items placed in the queue. """ self._parent._check_closing() with self._parent._all_tasks_done: if self._parent._unfinished_tasks <= 0: raise ValueError('task_done() called too many times') self._parent._unfinished_tasks -= 1 if self._parent._unfinished_tasks == 0: self._parent._finished.set() self._parent._all_tasks_done.notify_all()
[ "def", "task_done", "(", "self", ")", ":", "self", ".", "_parent", ".", "_check_closing", "(", ")", "with", "self", ".", "_parent", ".", "_all_tasks_done", ":", "if", "self", ".", "_parent", ".", "_unfinished_tasks", "<=", "0", ":", "raise", "ValueError", ...
44
20.045455
def all(self): " execute query, get all list of lists" query,inputs = self._toedn() return self.db.q(query, inputs = inputs, limit = self._limit, offset = self._offset, history = self._history)
[ "def", "all", "(", "self", ")", ":", "query", ",", "inputs", "=", "self", ".", "_toedn", "(", ")", "return", "self", ".", "db", ".", "q", "(", "query", ",", "inputs", "=", "inputs", ",", "limit", "=", "self", ".", "_limit", ",", "offset", "=", ...
28.25
12.5
def ensure_remote_branch_is_tracked(branch): """Track the specified remote branch if it is not already tracked.""" if branch == MASTER_BRANCH: # We don't need to explicitly track the master branch, so we're done. return # Ensure the specified branch is in the local branch list. output = subprocess.check_output(['git', 'branch', '--list']) for line in output.split('\n'): if line.strip() == branch: # We are already tracking the remote branch break else: # We are not tracking the remote branch, so track it. try: sys.stdout.write(subprocess.check_output( ['git', 'checkout', '--track', 'origin/%s' % branch])) except subprocess.CalledProcessError: # Bail gracefully. raise SystemExit(1)
[ "def", "ensure_remote_branch_is_tracked", "(", "branch", ")", ":", "if", "branch", "==", "MASTER_BRANCH", ":", "# We don't need to explicitly track the master branch, so we're done.", "return", "# Ensure the specified branch is in the local branch list.", "output", "=", "subprocess",...
40.9
17.95
def SampleStart(self): """Starts measuring the CPU time.""" self._start_cpu_time = time.clock() self.start_sample_time = time.time() self.total_cpu_time = 0
[ "def", "SampleStart", "(", "self", ")", ":", "self", ".", "_start_cpu_time", "=", "time", ".", "clock", "(", ")", "self", ".", "start_sample_time", "=", "time", ".", "time", "(", ")", "self", ".", "total_cpu_time", "=", "0" ]
33.6
6.4
def get_module_part(dotted_name, context_file=None): """given a dotted name return the module part of the name : >>> get_module_part('astroid.as_string.dump') 'astroid.as_string' :type dotted_name: str :param dotted_name: full name of the identifier we are interested in :type context_file: str or None :param context_file: context file to consider, necessary if the identifier has been introduced using a relative import unresolvable in the actual context (i.e. modutils) :raise ImportError: if there is no such module in the directory :rtype: str or None :return: the module part of the name or None if we have not been able at all to import the given name XXX: deprecated, since it doesn't handle package precedence over module (see #10066) """ # os.path trick if dotted_name.startswith("os.path"): return "os.path" parts = dotted_name.split(".") if context_file is not None: # first check for builtin module which won't be considered latter # in that case (path != None) if parts[0] in BUILTIN_MODULES: if len(parts) > 2: raise ImportError(dotted_name) return parts[0] # don't use += or insert, we want a new list to be created ! path = None starti = 0 if parts[0] == "": assert ( context_file is not None ), "explicit relative import, but no context_file?" path = [] # prevent resolving the import non-relatively starti = 1 while parts[starti] == "": # for all further dots: change context starti += 1 context_file = os.path.dirname(context_file) for i in range(starti, len(parts)): try: file_from_modpath( parts[starti : i + 1], path=path, context_file=context_file ) except ImportError: if i < max(1, len(parts) - 2): raise return ".".join(parts[:i]) return dotted_name
[ "def", "get_module_part", "(", "dotted_name", ",", "context_file", "=", "None", ")", ":", "# os.path trick", "if", "dotted_name", ".", "startswith", "(", "\"os.path\"", ")", ":", "return", "\"os.path\"", "parts", "=", "dotted_name", ".", "split", "(", "\".\"", ...
33.644068
19.372881
def _auth(self, username, password, pkey, key_filenames, allow_agent, look_for_keys): """ Try, in order: - The key passed in, if one was passed in. - Any key we can find through an SSH agent (if allowed). - Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (if allowed). - Plain username/password auth, if a password was given. (The password might be needed to unlock a private key.) The password is required for two-factor authentication. """ saved_exception = None two_factor = False allowed_types = [] if pkey is not None: try: self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint())) allowed_types = self._transport.auth_publickey(username, pkey) two_factor = (allowed_types == ['password']) if not two_factor: return except SSHException, e: saved_exception = e if not two_factor: for key_filename in key_filenames: for pkey_class in (RSAKey, DSSKey): try: key = pkey_class.from_private_key_file(key_filename, password) self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename)) self._transport.auth_publickey(username, key) two_factor = (allowed_types == ['password']) if not two_factor: return break except SSHException, e: saved_exception = e if not two_factor and allow_agent: if self._agent == None: self._agent = Agent() for key in self._agent.get_keys(): try: self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint())) # for 2-factor auth a successfully auth'd key will result in ['password'] allowed_types = self._transport.auth_publickey(username, key) two_factor = (allowed_types == ['password']) if not two_factor: return break except SSHException, e: saved_exception = e if not two_factor: keyfiles = [] rsa_key = os.path.expanduser('~/.ssh/id_rsa') dsa_key = os.path.expanduser('~/.ssh/id_dsa') if os.path.isfile(rsa_key): keyfiles.append((RSAKey, rsa_key)) if os.path.isfile(dsa_key): keyfiles.append((DSSKey, dsa_key)) # look in ~/ssh/ for windows users: rsa_key = os.path.expanduser('~/ssh/id_rsa') dsa_key = os.path.expanduser('~/ssh/id_dsa') if os.path.isfile(rsa_key): keyfiles.append((RSAKey, rsa_key)) if os.path.isfile(dsa_key): keyfiles.append((DSSKey, dsa_key)) if not look_for_keys: keyfiles = [] for pkey_class, filename in keyfiles: try: key = pkey_class.from_private_key_file(filename, password) self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename)) # for 2-factor auth a successfully auth'd key will result in ['password'] allowed_types = self._transport.auth_publickey(username, key) two_factor = (allowed_types == ['password']) if not two_factor: return break except SSHException, e: saved_exception = e except IOError, e: saved_exception = e if password is not None: try: self._transport.auth_password(username, password) return except SSHException, e: saved_exception = e elif two_factor: raise SSHException('Two-factor authentication requires a password') # if we got an auth-failed exception earlier, re-raise it if saved_exception is not None: raise saved_exception raise SSHException('No authentication methods available')
[ "def", "_auth", "(", "self", ",", "username", ",", "password", ",", "pkey", ",", "key_filenames", ",", "allow_agent", ",", "look_for_keys", ")", ":", "saved_exception", "=", "None", "two_factor", "=", "False", "allowed_types", "=", "[", "]", "if", "pkey", ...
42.096154
19.596154
def add_items(self, items_list, incl_pmag=True, incl_parents=True): """ Add items and/or update existing items in grid """ num_rows = self.GetNumberRows() current_grid_rows = [self.GetCellValue(num, 0) for num in range(num_rows)] er_data = {item.name: item.er_data for item in items_list} pmag_data = {item.name: item.pmag_data for item in items_list} items_list = sorted(items_list, key=lambda item: item.name) for item in items_list[:]: if item.name in current_grid_rows: pass else: self.add_row(item.name, item) self.add_data(er_data)#, pmag=False) if incl_pmag: self.add_data(pmag_data, pmag=True) if incl_parents: self.add_parents()
[ "def", "add_items", "(", "self", ",", "items_list", ",", "incl_pmag", "=", "True", ",", "incl_parents", "=", "True", ")", ":", "num_rows", "=", "self", ".", "GetNumberRows", "(", ")", "current_grid_rows", "=", "[", "self", ".", "GetCellValue", "(", "num", ...
41.842105
14.894737
def get_create_option(self, context, q): """Form the correct create_option to append to results.""" create_option = [] display_create_option = False if self.create_field and q: page_obj = context.get('page_obj', None) if page_obj is None or page_obj.number == 1: display_create_option = True if display_create_option and self.has_add_permission(self.request): ''' Generate querysets of Locations, StaffMembers, and Users that match the query string. ''' for s in Location.objects.filter( Q( Q(name__istartswith=q) & Q(transactionparty__isnull=True) ) ): create_option += [{ 'id': 'Location_%s' % s.id, 'text': _('Generate from location "%(location)s"') % {'location': s.name}, 'create_id': True, }] for s in StaffMember.objects.filter( Q( (Q(firstName__istartswith=q) | Q(lastName__istartswith=q)) & Q(transactionparty__isnull=True) ) ): create_option += [{ 'id': 'StaffMember_%s' % s.id, 'text': _('Generate from staff member "%(staff_member)s"') % {'staff_member': s.fullName}, 'create_id': True, }] for s in User.objects.filter( Q( (Q(first_name__istartswith=q) | Q(last_name__istartswith=q)) & Q(staffmember__isnull=True) & Q(transactionparty__isnull=True) ) ): create_option += [{ 'id': 'User_%s' % s.id, 'text': _('Generate from user "%(user)s"') % {'user': s.get_full_name()}, 'create_id': True, }] # Finally, allow creation from a name only. create_option += [{ 'id': q, 'text': _('Create "%(new_value)s"') % {'new_value': q}, 'create_id': True, }] return create_option
[ "def", "get_create_option", "(", "self", ",", "context", ",", "q", ")", ":", "create_option", "=", "[", "]", "display_create_option", "=", "False", "if", "self", ".", "create_field", "and", "q", ":", "page_obj", "=", "context", ".", "get", "(", "'page_obj'...
40.107143
20.125
def to_dict(self): """Converts this embed object into a dict.""" # add in the raw data into the dict result = { key[1:]: getattr(self, key) for key in self.__slots__ if key[0] == '_' and hasattr(self, key) } # deal with basic convenience wrappers try: colour = result.pop('colour') except KeyError: pass else: if colour: result['color'] = colour.value try: timestamp = result.pop('timestamp') except KeyError: pass else: if timestamp: if timestamp.tzinfo: result['timestamp'] = timestamp.astimezone(tz=datetime.timezone.utc).isoformat() else: result['timestamp'] = timestamp.replace(tzinfo=datetime.timezone.utc).isoformat() # add in the non raw attribute ones if self.type: result['type'] = self.type if self.description: result['description'] = self.description if self.url: result['url'] = self.url if self.title: result['title'] = self.title return result
[ "def", "to_dict", "(", "self", ")", ":", "# add in the raw data into the dict", "result", "=", "{", "key", "[", "1", ":", "]", ":", "getattr", "(", "self", ",", "key", ")", "for", "key", "in", "self", ".", "__slots__", "if", "key", "[", "0", "]", "==...
26.755556
21.088889
def __save_reference(self, o, cls, args, kwargs): """ Saves a reference to the original object Facade is passed. This will either be the object itself or a LazyBones instance for lazy-loading later :param mixed o: The original object :param class cls: The class definition for the original object :param tuple args: The positional arguments to the original object :param dict kwargs: The keyword arguments to the original object """ if not o and cls: self['__original_object'] = LazyBones( cls, args, kwargs ) else: while hasattr( o, '__class__' ) and o.__class__ == Wrapper: o = o.wrapper__unwrap() self['__original_object'] = o
[ "def", "__save_reference", "(", "self", ",", "o", ",", "cls", ",", "args", ",", "kwargs", ")", ":", "if", "not", "o", "and", "cls", ":", "self", "[", "'__original_object'", "]", "=", "LazyBones", "(", "cls", ",", "args", ",", "kwargs", ")", "else", ...
43.941176
21.823529
def labels_to_onehots(labels, num_classes): """Convert a vector of integer class labels to a matrix of one-hot target vectors. :param labels: a vector of integer labels, 0 to num_classes. Has shape (batch_size,). :param num_classes: the total number of classes :return: has shape (batch_size, num_classes) """ batch_size = labels.get_shape().as_list()[0] with tf.name_scope("one_hot"): labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, batch_size, 1), 1) sparse_ptrs = tf.concat(1, [indices, labels], name="ptrs") onehots = tf.sparse_to_dense(sparse_ptrs, [batch_size, num_classes], 1.0, 0.0) return onehots
[ "def", "labels_to_onehots", "(", "labels", ",", "num_classes", ")", ":", "batch_size", "=", "labels", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "0", "]", "with", "tf", ".", "name_scope", "(", "\"one_hot\"", ")", ":", "labels", "=", "tf...
45.0625
17.25
def run(self, parent=None): """Start the configeditor :returns: None :rtype: None :raises: None """ self.gw = GuerillaMGMTWin(parent=parent) self.gw.show()
[ "def", "run", "(", "self", ",", "parent", "=", "None", ")", ":", "self", ".", "gw", "=", "GuerillaMGMTWin", "(", "parent", "=", "parent", ")", "self", ".", "gw", ".", "show", "(", ")" ]
22.666667
15.111111
def updateFeature(self, features, gdbVersion=None, rollbackOnFailure=True): """ updates an existing feature in a feature service layer Input: feature - feature object(s) to get updated. A single feature or a list of feature objects can be passed Output: dictionary of result messages """ params = { "f" : "json", "rollbackOnFailure" : rollbackOnFailure } if gdbVersion is not None: params['gdbVersion'] = gdbVersion if isinstance(features, Feature): params['features'] = json.dumps([features.asDictionary]) elif isinstance(features, list): vals = [] for feature in features: if isinstance(feature, Feature): vals.append(feature.asDictionary) params['features'] = json.dumps(vals) elif isinstance(features, FeatureSet): params['features'] = json.dumps([f.asDictionary for f in features], default=_date_handler) else: return {'message' : "invalid inputs"} updateURL = self._url + "/updateFeatures" res = self._post(url=updateURL, securityHandler=self._securityHandler, param_dict=params, proxy_port=self._proxy_port, proxy_url=self._proxy_url) return res
[ "def", "updateFeature", "(", "self", ",", "features", ",", "gdbVersion", "=", "None", ",", "rollbackOnFailure", "=", "True", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"rollbackOnFailure\"", ":", "rollbackOnFailure", "}", "if", "gdbVersion"...
41.513514
14.972973
def _resolv_name(self, hostname): """Convert hostname to IP address.""" ip = hostname try: ip = socket.gethostbyname(hostname) except Exception as e: logger.debug("{}: Cannot convert {} to IP address ({})".format(self.plugin_name, hostname, e)) return ip
[ "def", "_resolv_name", "(", "self", ",", "hostname", ")", ":", "ip", "=", "hostname", "try", ":", "ip", "=", "socket", ".", "gethostbyname", "(", "hostname", ")", "except", "Exception", "as", "e", ":", "logger", ".", "debug", "(", "\"{}: Cannot convert {} ...
38.875
20
def upload(target): # type: (str) -> None """ Upload the release to a pypi server. TODO: Make sure the git directory is clean before allowing a release. Args: target (str): pypi target as defined in ~/.pypirc """ log.info("Uploading to pypi server <33>{}".format(target)) with conf.within_proj_dir(): shell.run('python setup.py sdist register -r "{}"'.format(target)) shell.run('python setup.py sdist upload -r "{}"'.format(target))
[ "def", "upload", "(", "target", ")", ":", "# type: (str) -> None", "log", ".", "info", "(", "\"Uploading to pypi server <33>{}\"", ".", "format", "(", "target", ")", ")", "with", "conf", ".", "within_proj_dir", "(", ")", ":", "shell", ".", "run", "(", "'pyth...
34.642857
21.571429
def create_http_monitor(self, topics, transport_url, transport_token=None, transport_method='PUT', connect_timeout=0, response_timeout=0, batch_size=1, batch_duration=0, compression='none', format_type='json'): """Creates a HTTP Monitor instance in Device Cloud for a given list of topics :param topics: a string list of topics (e.g. ['DeviceCore[U]', 'FileDataCore']). :param transport_url: URL of the customer web server. :param transport_token: Credentials for basic authentication in the following format: username:password :param transport_method: HTTP method to use for sending data: PUT or POST. The default is PUT. :param connect_timeout: A value of 0 means use the system default of 5000 (5 seconds). :param response_timeout: A value of 0 means use the system default of 5000 (5 seconds). :param batch_size: How many Msgs received before sending data. :param batch_duration: How long to wait before sending batch if it does not exceed batch_size. :param compression: Compression value (i.e. 'gzip'). :param format_type: What format server should send data in (i.e. 'xml' or 'json'). Returns an object of the created Monitor """ monitor_xml = """\ <Monitor> <monTopic>{topics}</monTopic> <monBatchSize>{batch_size}</monBatchSize> <monFormatType>{format_type}</monFormatType> <monTransportType>http</monTransportType> <monTransportUrl>{transport_url}</monTransportUrl> <monTransportToken>{transport_token}</monTransportToken> <monTransportMethod>{transport_method}</monTransportMethod> <monConnectTimeout>{connect_timeout}</monConnectTimeout> <monResponseTimeout>{response_timeout}</monResponseTimeout> <monCompression>{compression}</monCompression> </Monitor> """.format( topics=','.join(topics), transport_url=transport_url, transport_token=transport_token, transport_method=transport_method, connect_timeout=connect_timeout, response_timeout=response_timeout, batch_size=batch_size, batch_duration=batch_duration, format_type=format_type, compression=compression, ) monitor_xml = textwrap.dedent(monitor_xml) response = self._conn.post("/ws/Monitor", monitor_xml) location = ET.fromstring(response.text).find('.//location').text monitor_id = int(location.split('/')[-1]) return HTTPDeviceCloudMonitor(self._conn, monitor_id)
[ "def", "create_http_monitor", "(", "self", ",", "topics", ",", "transport_url", ",", "transport_token", "=", "None", ",", "transport_method", "=", "'PUT'", ",", "connect_timeout", "=", "0", ",", "response_timeout", "=", "0", ",", "batch_size", "=", "1", ",", ...
52.352941
22.921569
def save_model(self, request, obj, form, change): """ If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``, send a notification email to the user being saved if their ``active`` status has changed to ``True``. If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``, send a verification email instead. """ must_send_verification_mail_after_save = False if change and settings.ACCOUNTS_APPROVAL_REQUIRED: if obj.is_active and not User.objects.get(id=obj.id).is_active: if settings.ACCOUNTS_VERIFICATION_REQUIRED: # Accounts verification requires an inactive account obj.is_active = False # The token generated by send_verification_mail() # must match the _saved_ User object, # so postpone send_verification_mail() until later must_send_verification_mail_after_save = True else: send_approved_mail(request, obj) super(UserProfileAdmin, self).save_model(request, obj, form, change) if must_send_verification_mail_after_save: user = User.objects.get(id=obj.id) send_verification_mail(request, user, "signup_verify")
[ "def", "save_model", "(", "self", ",", "request", ",", "obj", ",", "form", ",", "change", ")", ":", "must_send_verification_mail_after_save", "=", "False", "if", "change", "and", "settings", ".", "ACCOUNTS_APPROVAL_REQUIRED", ":", "if", "obj", ".", "is_active", ...
54
18
def divide_url(self, url): """ divide url into host and path two parts """ if 'https://' in url: host = url[8:].split('/')[0] path = url[8 + len(host):] elif 'http://' in url: host = url[7:].split('/')[0] path = url[7 + len(host):] else: host = url.split('/')[0] path = url[len(host):] return host, path
[ "def", "divide_url", "(", "self", ",", "url", ")", ":", "if", "'https://'", "in", "url", ":", "host", "=", "url", "[", "8", ":", "]", ".", "split", "(", "'/'", ")", "[", "0", "]", "path", "=", "url", "[", "8", "+", "len", "(", "host", ")", ...
29.857143
7
def format(self, format_str): """Returns a formatted version of format_str. The only named replacement fields supported by this method and their corresponding API calls are: * {num} group_num * {name} group_name * {symbol} group_symbol * {variant} group_variant * {current_data} group_data * {nums} groups_nums * {names} groups_names * {symbols} groups_symbols * {variants} groups_variants * {all_data} groups_data Passing other replacement fields will result in raising exceptions. :param format_str: a new style format string :rtype: str """ return format_str.format(**{ "num": self.group_num, "name": self.group_name, "symbol": self.group_symbol, "variant": self.group_variant, "current_data": self.group_data, "count": self.groups_count, "names": self.groups_names, "symbols": self.groups_symbols, "variants": self.groups_variants, "all_data": self.groups_data})
[ "def", "format", "(", "self", ",", "format_str", ")", ":", "return", "format_str", ".", "format", "(", "*", "*", "{", "\"num\"", ":", "self", ".", "group_num", ",", "\"name\"", ":", "self", ".", "group_name", ",", "\"symbol\"", ":", "self", ".", "group...
36.5
9
def restore_expanded_state(self): """Restore all items expanded state""" if self.__expanded_state is None: return for item in self.get_items()+self.get_top_level_items(): user_text = get_item_user_text(item) is_expanded = self.__expanded_state.get(hash(user_text)) if is_expanded is not None: item.setExpanded(is_expanded)
[ "def", "restore_expanded_state", "(", "self", ")", ":", "if", "self", ".", "__expanded_state", "is", "None", ":", "return", "for", "item", "in", "self", ".", "get_items", "(", ")", "+", "self", ".", "get_top_level_items", "(", ")", ":", "user_text", "=", ...
45.555556
10.777778
def update(gandi, resource, name, size, quantity, password, sshkey, upgrade, console, snapshotprofile, reset_mysql_password, background, delete_snapshotprofile): """Update a PaaS instance. Resource can be a Hostname or an ID """ if snapshotprofile and delete_snapshotprofile: raise UsageError('You must not set snapshotprofile and ' 'delete-snapshotprofile.') pwd = None if password: pwd = click.prompt('password', hide_input=True, confirmation_prompt=True) if delete_snapshotprofile: snapshotprofile = '' result = gandi.paas.update(resource, name, size, quantity, pwd, sshkey, upgrade, console, snapshotprofile, reset_mysql_password, background) if background: gandi.pretty_echo(result) return result
[ "def", "update", "(", "gandi", ",", "resource", ",", "name", ",", "size", ",", "quantity", ",", "password", ",", "sshkey", ",", "upgrade", ",", "console", ",", "snapshotprofile", ",", "reset_mysql_password", ",", "background", ",", "delete_snapshotprofile", ")...
32.925926
21.555556
def to_dict(self): """Converts this object to an (ordered) dictionary of field-value pairs. >>> m = MRZ(['IDAUT10000999<6<<<<<<<<<<<<<<<', '7109094F1112315AUT<<<<<<<<<<<6', 'MUSTERFRAU<<ISOLDE<<<<<<<<<<<<']).to_dict() >>> assert m['type'] == 'ID' and m['country'] == 'AUT' and m['number'] == '10000999<' >>> assert m['valid_number'] and m['valid_date_of_birth'] and m['valid_expiration_date'] and not m['valid_composite'] """ result = OrderedDict() result['mrz_type'] = self.mrz_type result['valid_score'] = self.valid_score if self.mrz_type is not None: result['type'] = self.type result['country'] = self.country result['number'] = self.number result['date_of_birth'] = self.date_of_birth result['expiration_date'] = self.expiration_date result['nationality'] = self.nationality result['sex'] = self.sex result['names'] = self.names result['surname'] = self.surname if self.mrz_type == 'TD1': result['optional1'] = self.optional1 result['optional2'] = self.optional2 elif self.mrz_type in ['TD2', 'MRVA', 'MRVB']: result['optional1'] = self.optional1 else: result['personal_number'] = self.personal_number result['check_number'] = self.check_number result['check_date_of_birth'] = self.check_date_of_birth result['check_expiration_date'] = self.check_expiration_date if self.mrz_type not in ['MRVA', 'MRVB']: result['check_composite'] = self.check_composite if self.mrz_type == 'TD3': result['check_personal_number'] = self.check_personal_number result['valid_number'] = self.valid_check_digits[0] result['valid_date_of_birth'] = self.valid_check_digits[1] result['valid_expiration_date'] = self.valid_check_digits[2] if self.mrz_type not in ['MRVA', 'MRVB']: result['valid_composite'] = self.valid_check_digits[3] if self.mrz_type == 'TD3': result['valid_personal_number'] = self.valid_check_digits[4] if 'method' in self.aux: result['method'] = self.aux['method'] return result
[ "def", "to_dict", "(", "self", ")", ":", "result", "=", "OrderedDict", "(", ")", "result", "[", "'mrz_type'", "]", "=", "self", ".", "mrz_type", "result", "[", "'valid_score'", "]", "=", "self", ".", "valid_score", "if", "self", ".", "mrz_type", "is", ...
51.977778
19.422222
def save_var(name, value): """ Save a variable to the table specified by _State.vars_table_name. Key is the name of the variable, and value is the value. """ connection = _State.connection() _State.reflect_metadata() vars_table = sqlalchemy.Table( _State.vars_table_name, _State.metadata, sqlalchemy.Column('name', sqlalchemy.types.Text, primary_key=True), sqlalchemy.Column('value_blob', sqlalchemy.types.LargeBinary), sqlalchemy.Column('type', sqlalchemy.types.Text), keep_existing=True ) vars_table.create(bind=connection, checkfirst=True) column_type = get_column_type(value) if column_type == sqlalchemy.types.LargeBinary: value_blob = value else: value_blob = unicode(value).encode('utf-8') values = dict(name=name, value_blob=value_blob, # value_blob=Blob(value), type=column_type.__visit_name__.lower()) vars_table.insert(prefixes=['OR REPLACE']).values(**values).execute()
[ "def", "save_var", "(", "name", ",", "value", ")", ":", "connection", "=", "_State", ".", "connection", "(", ")", "_State", ".", "reflect_metadata", "(", ")", "vars_table", "=", "sqlalchemy", ".", "Table", "(", "_State", ".", "vars_table_name", ",", "_Stat...
33
19.709677
def versions_available(self): """ Query PyPI for a particular version or all versions of a package @returns: 0 if version(s) found or 1 if none found """ if self.version: spec = "%s==%s" % (self.project_name, self.version) else: spec = self.project_name if self.all_versions and self.version in self.all_versions: print_pkg_versions(self.project_name, [self.version]) elif not self.version and self.all_versions: print_pkg_versions(self.project_name, self.all_versions) else: if self.version: self.logger.error("No pacakge found for version %s" \ % self.version) else: self.logger.error("No pacakge found for %s" % self.project_name) return 1 return 0
[ "def", "versions_available", "(", "self", ")", ":", "if", "self", ".", "version", ":", "spec", "=", "\"%s==%s\"", "%", "(", "self", ".", "project_name", ",", "self", ".", "version", ")", "else", ":", "spec", "=", "self", ".", "project_name", "if", "sel...
35.458333
21.625
def onMessage(self, payload, isBinary): """ Send the payload onto the {slack.[payload['type]'} channel. The message is transalated from IDs to human-readable identifiers. Note: The slack API only sends JSON, isBinary will always be false. """ msg = self.translate(unpack(payload)) if 'type' in msg: channel_name = 'slack.{}'.format(msg['type']) print('Sending on {}'.format(channel_name)) channels.Channel(channel_name).send({'text': pack(msg)})
[ "def", "onMessage", "(", "self", ",", "payload", ",", "isBinary", ")", ":", "msg", "=", "self", ".", "translate", "(", "unpack", "(", "payload", ")", ")", "if", "'type'", "in", "msg", ":", "channel_name", "=", "'slack.{}'", ".", "format", "(", "msg", ...
43.916667
18.083333
def add_ini_opts(self, cp, sec): """Add job-specific options from configuration file. Parameters ----------- cp : ConfigParser object The ConfigParser object holding the workflow configuration settings sec : string The section containing options for this job. """ for opt in cp.options(sec): value = string.strip(cp.get(sec, opt)) opt = '--%s' %(opt,) if opt in self.file_input_options: # This now expects the option to be a file # Check is we have a list of files values = [path for path in value.split(' ') if path] self.common_raw_options.append(opt) self.common_raw_options.append(' ') # Get LFN and PFN for path in values: # Here I decide if the path is URL or # IFO:/path/to/file or IFO:url://path/to/file # That's somewhat tricksy as we used : as delimiter split_path = path.split(':', 1) if len(split_path) == 1: ifo = None path = path else: # Have I split a URL or not? if split_path[1].startswith('//'): # URL ifo = None path = path else: #IFO:path or IFO:URL ifo = split_path[0] path = split_path[1] curr_lfn = os.path.basename(path) # If the file exists make sure to use the # fill path as a file:// URL if os.path.isfile(path): curr_pfn = urlparse.urljoin('file:', urllib.pathname2url( os.path.abspath(path))) else: curr_pfn = path if curr_lfn in file_input_from_config_dict.keys(): file_pfn = file_input_from_config_dict[curr_lfn][2] assert(file_pfn == curr_pfn) curr_file = file_input_from_config_dict[curr_lfn][1] else: local_file_path = resolve_url(curr_pfn) curr_file = File.from_path(local_file_path) tuple_val = (local_file_path, curr_file, curr_pfn) file_input_from_config_dict[curr_lfn] = tuple_val self.common_input_files.append(curr_file) if ifo: self.common_raw_options.append(ifo + ':') self.common_raw_options.append(curr_file.dax_repr) else: self.common_raw_options.append(curr_file.dax_repr) self.common_raw_options.append(' ') else: self.common_options += [opt, value]
[ "def", "add_ini_opts", "(", "self", ",", "cp", ",", "sec", ")", ":", "for", "opt", "in", "cp", ".", "options", "(", "sec", ")", ":", "value", "=", "string", ".", "strip", "(", "cp", ".", "get", "(", "sec", ",", "opt", ")", ")", "opt", "=", "'...
43.985714
17.628571
def check_version(): """ Tells you if you have an old version of intern. """ import requests r = requests.get('https://pypi.python.org/pypi/intern/json').json() r = r['info']['version'] if r != __version__: print("You are using version {}. A newer version of intern is available: {} ".format(__version__, r) + "\n\n'pip install -U intern' to update.") return r
[ "def", "check_version", "(", ")", ":", "import", "requests", "r", "=", "requests", ".", "get", "(", "'https://pypi.python.org/pypi/intern/json'", ")", ".", "json", "(", ")", "r", "=", "r", "[", "'info'", "]", "[", "'version'", "]", "if", "r", "!=", "__ve...
36.727273
20.363636
def make_defaults_and_annotations(make_function_instr, builders): """ Get the AST expressions corresponding to the defaults, kwonly defaults, and annotations for a function created by `make_function_instr`. """ # Integer counts. n_defaults, n_kwonlydefaults, n_annotations = unpack_make_function_arg( make_function_instr.arg ) if n_annotations: # TOS should be a tuple of annotation names. load_annotation_names = builders.pop() annotations = dict(zip( reversed(load_annotation_names.arg), (make_expr(builders) for _ in range(n_annotations - 1)) )) else: annotations = {} kwonlys = {} while n_kwonlydefaults: default_expr = make_expr(builders) key_instr = builders.pop() if not isinstance(key_instr, instrs.LOAD_CONST): raise DecompilationError( "kwonlydefault key is not a LOAD_CONST: %s" % key_instr ) if not isinstance(key_instr.arg, str): raise DecompilationError( "kwonlydefault key builder is not a " "'LOAD_CONST of a string: %s" % key_instr ) kwonlys[key_instr.arg] = default_expr n_kwonlydefaults -= 1 defaults = make_exprs(builders, n_defaults) return defaults, kwonlys, annotations
[ "def", "make_defaults_and_annotations", "(", "make_function_instr", ",", "builders", ")", ":", "# Integer counts.", "n_defaults", ",", "n_kwonlydefaults", ",", "n_annotations", "=", "unpack_make_function_arg", "(", "make_function_instr", ".", "arg", ")", "if", "n_annotati...
34.894737
17.789474
def _maybe_dt_data(data, feature_names, feature_types): """ Validate feature names and types if data table """ if not isinstance(data, DataTable): return data, feature_names, feature_types data_types_names = tuple(lt.name for lt in data.ltypes) bad_fields = [data.names[i] for i, type_name in enumerate(data_types_names) if type_name not in DT_TYPE_MAPPER] if bad_fields: msg = """DataFrame.types for data must be int, float or bool. Did not expect the data types in fields """ raise ValueError(msg + ', '.join(bad_fields)) if feature_names is None: feature_names = data.names # always return stypes for dt ingestion if feature_types is not None: raise ValueError('DataTable has own feature types, cannot pass them in') feature_types = np.vectorize(DT_TYPE_MAPPER2.get)(data_types_names) return data, feature_names, feature_types
[ "def", "_maybe_dt_data", "(", "data", ",", "feature_names", ",", "feature_types", ")", ":", "if", "not", "isinstance", "(", "data", ",", "DataTable", ")", ":", "return", "data", ",", "feature_names", ",", "feature_types", "data_types_names", "=", "tuple", "(",...
38.6
16.28
def batchDF(symbols, fields=None, range_='1m', last=10, token='', version=''): '''Batch several data requests into one invocation https://iexcloud.io/docs/api/#batch-requests Args: symbols (list); List of tickers to request fields (list); List of fields to request range_ (string); Date range for chart last (int); token (string); Access token version (string); API version Returns: DataFrame: results in json ''' x = batch(symbols, fields, range_, last, token, version) ret = {} if isinstance(symbols, str): for field in x.keys(): ret[field] = _MAPPING[field](x[field]) else: for symbol in x.keys(): for field in x[symbol].keys(): if field not in ret: ret[field] = pd.DataFrame() dat = x[symbol][field] dat = _MAPPING[field](dat) dat['symbol'] = symbol ret[field] = pd.concat([ret[field], dat], sort=True) return ret
[ "def", "batchDF", "(", "symbols", ",", "fields", "=", "None", ",", "range_", "=", "'1m'", ",", "last", "=", "10", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "x", "=", "batch", "(", "symbols", ",", "fields", ",", "range_", ",", ...
28.5
19.944444
def get_display_name(self, role): """get the display name of a role""" if role not in self.flatten: raise MissingRole(role) return self.flatten[role]['display_name']
[ "def", "get_display_name", "(", "self", ",", "role", ")", ":", "if", "role", "not", "in", "self", ".", "flatten", ":", "raise", "MissingRole", "(", "role", ")", "return", "self", ".", "flatten", "[", "role", "]", "[", "'display_name'", "]" ]
39.4
5
def get(self, timeout=None): """Return the next available item from the tube. Blocks if tube is empty, until a producer for the tube puts an item on it.""" if timeout: try: result = self._queue.get(True, timeout) except multiprocessing.Queue.Empty: return(False, None) return(True, result) return self._queue.get()
[ "def", "get", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", ":", "try", ":", "result", "=", "self", ".", "_queue", ".", "get", "(", "True", ",", "timeout", ")", "except", "multiprocessing", ".", "Queue", ".", "Empty", ":", "r...
36.818182
12.727273
def maximize(self, bAsync = True): """ Maximize the window. @see: L{minimize}, L{restore} @type bAsync: bool @param bAsync: Perform the request asynchronously. @raise WindowsError: An error occured while processing this request. """ if bAsync: win32.ShowWindowAsync( self.get_handle(), win32.SW_MAXIMIZE ) else: win32.ShowWindow( self.get_handle(), win32.SW_MAXIMIZE )
[ "def", "maximize", "(", "self", ",", "bAsync", "=", "True", ")", ":", "if", "bAsync", ":", "win32", ".", "ShowWindowAsync", "(", "self", ".", "get_handle", "(", ")", ",", "win32", ".", "SW_MAXIMIZE", ")", "else", ":", "win32", ".", "ShowWindow", "(", ...
30.266667
21.2
def _get_renamed_deleted_sourcesapp(self): """ Get renamed and deleted sources lists from receiver . Internal method which queries device via HTTP to get names of renamed input sources. In this method AppCommand.xml is used. """ # renamed_sources and deleted_sources are dicts with "source" as key # and "renamed_source" or deletion flag as value. renamed_sources = {} deleted_sources = {} # Collect tags for AppCommand.xml call tags = ["GetRenameSource", "GetDeletedSource"] # Execute call root = self.exec_appcommand_post(tags) # Check result if root is None: _LOGGER.error("Getting renamed and deleted sources failed.") return (renamed_sources, deleted_sources, False) # Detect "Document Error: Data follows" title if URL does not exist document_error = root.find("./head/title") if document_error is not None: if document_error.text == "Document Error: Data follows": return (renamed_sources, deleted_sources, False) for child in root.findall("./cmd/functionrename/list"): try: renamed_sources[child.find("name").text.strip()] = ( child.find("rename").text.strip()) except AttributeError: continue for child in root.findall("./cmd/functiondelete/list"): try: deleted_sources[child.find("FuncName").text.strip( )] = "DEL" if ( child.find("use").text.strip() == "0") else None except AttributeError: continue return (renamed_sources, deleted_sources, True)
[ "def", "_get_renamed_deleted_sourcesapp", "(", "self", ")", ":", "# renamed_sources and deleted_sources are dicts with \"source\" as key", "# and \"renamed_source\" or deletion flag as value.", "renamed_sources", "=", "{", "}", "deleted_sources", "=", "{", "}", "# Collect tags for Ap...
39.767442
20.604651
def disable_paging(self, command="terminal length 999", delay_factor=1): """Disable paging default to a Cisco CLI method.""" delay_factor = self.select_delay_factor(delay_factor) time.sleep(delay_factor * 0.1) self.clear_buffer() command = self.normalize_cmd(command) log.debug("In disable_paging") log.debug("Command: {0}".format(command)) self.write_channel(command) output = self.read_until_prompt() if self.ansi_escape_codes: output = self.strip_ansi_escape_codes(output) log.debug("{0}".format(output)) log.debug("Exiting disable_paging") return output
[ "def", "disable_paging", "(", "self", ",", "command", "=", "\"terminal length 999\"", ",", "delay_factor", "=", "1", ")", ":", "delay_factor", "=", "self", ".", "select_delay_factor", "(", "delay_factor", ")", "time", ".", "sleep", "(", "delay_factor", "*", "0...
43.933333
9.066667
def tree_to_stream(entries, write): """Write the give list of entries into a stream using its write method :param entries: **sorted** list of tuples with (binsha, mode, name) :param write: write method which takes a data string""" ord_zero = ord('0') bit_mask = 7 # 3 bits set for binsha, mode, name in entries: mode_str = b'' for i in xrange(6): mode_str = bchr(((mode >> (i * 3)) & bit_mask) + ord_zero) + mode_str # END for each 8 octal value # git slices away the first octal if its zero if byte_ord(mode_str[0]) == ord_zero: mode_str = mode_str[1:] # END save a byte # here it comes: if the name is actually unicode, the replacement below # will not work as the binsha is not part of the ascii unicode encoding - # hence we must convert to an utf8 string for it to work properly. # According to my tests, this is exactly what git does, that is it just # takes the input literally, which appears to be utf8 on linux. if isinstance(name, text_type): name = name.encode(defenc) write(b''.join((mode_str, b' ', name, b'\0', binsha)))
[ "def", "tree_to_stream", "(", "entries", ",", "write", ")", ":", "ord_zero", "=", "ord", "(", "'0'", ")", "bit_mask", "=", "7", "# 3 bits set", "for", "binsha", ",", "mode", ",", "name", "in", "entries", ":", "mode_str", "=", "b''", "for", "i", "in", ...
45.692308
19.192308
def time_window_cutoff(sw_time, time_cutoff): """ Allows for cutting the declustering time window at a specific time, outside of which an event of any magnitude is no longer identified as a cluster """ sw_time = np.array( [(time_cutoff / DAYS) if x > (time_cutoff / DAYS) else x for x in sw_time]) return(sw_time)
[ "def", "time_window_cutoff", "(", "sw_time", ",", "time_cutoff", ")", ":", "sw_time", "=", "np", ".", "array", "(", "[", "(", "time_cutoff", "/", "DAYS", ")", "if", "x", ">", "(", "time_cutoff", "/", "DAYS", ")", "else", "x", "for", "x", "in", "sw_ti...
38.777778
15.222222
def bias_correct(params, data, acf=None): """ Calculate and apply a bias correction to the given fit parameters Parameters ---------- params : lmfit.Parameters The model parameters. These will be modified. data : 2d-array The data which was used in the fitting acf : 2d-array ACF of the data. Default = None. Returns ------- None See Also -------- :func:`AegeanTools.fitting.RB_bias` """ bias = RB_bias(data, params, acf=acf) i = 0 for p in params: if 'theta' in p: continue if params[p].vary: params[p].value -= bias[i] i += 1 return
[ "def", "bias_correct", "(", "params", ",", "data", ",", "acf", "=", "None", ")", ":", "bias", "=", "RB_bias", "(", "data", ",", "params", ",", "acf", "=", "acf", ")", "i", "=", "0", "for", "p", "in", "params", ":", "if", "'theta'", "in", "p", "...
19.878788
21.151515
def set_objective(self, expression): """Set objective of problem.""" if isinstance(expression, numbers.Number): # Allow expressions with no variables as objective, # represented as a number expression = Expression(offset=expression) # Clear previous objective for i in range(swiglpk.glp_get_num_cols(self._p)): swiglpk.glp_set_obj_coef(self._p, 1 + i, 0) for variable, value in expression.values(): var_index = self._variables[variable] swiglpk.glp_set_obj_coef(self._p, var_index, float(value)) swiglpk.glp_set_obj_coef(self._p, 0, float(expression.offset))
[ "def", "set_objective", "(", "self", ",", "expression", ")", ":", "if", "isinstance", "(", "expression", ",", "numbers", ".", "Number", ")", ":", "# Allow expressions with no variables as objective,", "# represented as a number", "expression", "=", "Expression", "(", ...
39.176471
19.588235
def user_exists(username): """Check if a user exists""" try: pwd.getpwnam(username) user_exists = True except KeyError: user_exists = False return user_exists
[ "def", "user_exists", "(", "username", ")", ":", "try", ":", "pwd", ".", "getpwnam", "(", "username", ")", "user_exists", "=", "True", "except", "KeyError", ":", "user_exists", "=", "False", "return", "user_exists" ]
23.875
15.125
def convert_msg(self, msg): """ Takes one POEntry object and converts it (adds a dummy translation to it) msg is an instance of polib.POEntry """ source = msg.msgid if not source: # don't translate empty string return plural = msg.msgid_plural if plural: # translate singular and plural foreign_single = self.convert(source) foreign_plural = self.convert(plural) plural = { '0': self.final_newline(source, foreign_single), '1': self.final_newline(plural, foreign_plural), } msg.msgstr_plural = plural else: foreign = self.convert(source) msg.msgstr = self.final_newline(source, foreign)
[ "def", "convert_msg", "(", "self", ",", "msg", ")", ":", "source", "=", "msg", ".", "msgid", "if", "not", "source", ":", "# don't translate empty string", "return", "plural", "=", "msg", ".", "msgid_plural", "if", "plural", ":", "# translate singular and plural"...
34.304348
15.086957
def Run(self): """The main run method of the client.""" for thread in itervalues(self._threads): thread.start() logging.info(START_STRING) while True: dead_threads = [ tn for (tn, t) in iteritems(self._threads) if not t.isAlive() ] if dead_threads: raise FatalError( "These threads are dead: %r. Shutting down..." % dead_threads) time.sleep(10)
[ "def", "Run", "(", "self", ")", ":", "for", "thread", "in", "itervalues", "(", "self", ".", "_threads", ")", ":", "thread", ".", "start", "(", ")", "logging", ".", "info", "(", "START_STRING", ")", "while", "True", ":", "dead_threads", "=", "[", "tn"...
29.142857
21
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Read the data encoding the Get request payload and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. """ super(GetRequestPayload, self).read( input_stream, kmip_version=kmip_version ) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream): self._unique_identifier = primitives.TextString( tag=enums.Tags.UNIQUE_IDENTIFIER ) self._unique_identifier.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.KEY_FORMAT_TYPE, local_stream): self._key_format_type = primitives.Enumeration( enum=enums.KeyFormatType, tag=enums.Tags.KEY_FORMAT_TYPE ) self._key_format_type.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.KEY_COMPRESSION_TYPE, local_stream): self._key_compression_type = primitives.Enumeration( enum=enums.KeyCompressionType, tag=enums.Tags.KEY_COMPRESSION_TYPE ) self._key_compression_type.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next( enums.Tags.KEY_WRAPPING_SPECIFICATION, local_stream ): self._key_wrapping_specification = \ objects.KeyWrappingSpecification() self._key_wrapping_specification.read( local_stream, kmip_version=kmip_version ) self.is_oversized(local_stream)
[ "def", "read", "(", "self", ",", "input_stream", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "super", "(", "GetRequestPayload", ",", "self", ")", ".", "read", "(", "input_stream", ",", "kmip_version", "=", "kmip_version"...
36.4
18.666667
def _GetMergeTaskStorageFilePath(self, task): """Retrieves the path of a task storage file in the merge directory. Args: task (Task): task. Returns: str: path of a task storage file file in the merge directory. """ filename = '{0:s}.plaso'.format(task.identifier) return os.path.join(self._merge_task_storage_path, filename)
[ "def", "_GetMergeTaskStorageFilePath", "(", "self", ",", "task", ")", ":", "filename", "=", "'{0:s}.plaso'", ".", "format", "(", "task", ".", "identifier", ")", "return", "os", ".", "path", ".", "join", "(", "self", ".", "_merge_task_storage_path", ",", "fil...
32
20.272727
def sum_grad_and_var_all_reduce(grad_and_vars, num_workers, alg, gpu_indices, aux_devices=None, num_shards=1): """Apply all-reduce algorithm over specified gradient tensors.""" with tf.name_scope("allreduce"): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) scaled_grads = [g for g, _ in grad_and_vars] if alg == "nccl": from tensorflow.python.ops import nccl_ops summed_grads = nccl_ops.all_sum(scaled_grads) elif alg == "simple": summed_grads = build_reduce_sum(scaled_grads) elif alg == "trivial": summed_grads = build_trivial_sum(scaled_grads) elif alg == "xring": summed_grads = all_reduce.build_ring_all_reduce( scaled_grads, num_workers, num_shards, gpu_indices, tf.add) elif alg == "nccl/xring": summed_grads = all_reduce.build_nccl_then_ring( scaled_grads, num_shards, tf.add) elif alg == "nccl/rechd": summed_grads = all_reduce.build_nccl_then_recursive_hd( scaled_grads, tf.add) elif alg == "nccl/pscpu": summed_grads = all_reduce.build_nccl_then_shuffle( scaled_grads, aux_devices, tf.add, tf.add_n) elif alg == "pscpu/pscpu": summed_grads = all_reduce.build_shuffle_then_shuffle( scaled_grads, aux_devices, # TODO(tucker): devise a way of better specifying the device # for the second level. [aux_devices[0]], tf.add_n) elif alg in ["pscpu", "psgpu"]: summed_grads = all_reduce.build_shuffle_all_reduce( scaled_grads, aux_devices, tf.add_n) else: raise ValueError("unsupported all_reduce alg: ", alg) result = [] for (_, v), g in zip(grad_and_vars, summed_grads): result.append([g, v]) return result
[ "def", "sum_grad_and_var_all_reduce", "(", "grad_and_vars", ",", "num_workers", ",", "alg", ",", "gpu_indices", ",", "aux_devices", "=", "None", ",", "num_shards", "=", "1", ")", ":", "with", "tf", ".", "name_scope", "(", "\"allreduce\"", ")", ":", "# Note tha...
44.833333
14.520833
def _open(self): """Bind, use tls""" try: self.ldap.start_tls_s() #pylint: disable=no-member except ldap.CONNECT_ERROR: #pylint: enable=no-member logging.error('Unable to establish a connection to the LDAP server, ' + \ 'please check the connection string ' + \ 'and ensure the remote certificate is signed by a trusted authority.') raise self.ldap.simple_bind_s(self.user, self.password)
[ "def", "_open", "(", "self", ")", ":", "try", ":", "self", ".", "ldap", ".", "start_tls_s", "(", ")", "#pylint: disable=no-member", "except", "ldap", ".", "CONNECT_ERROR", ":", "#pylint: enable=no-member", "logging", ".", "error", "(", "'Unable to establish a conn...
39.461538
21.846154
def get_pretty_format(self, max_name_length=0): """Returns a nicely formatted string describing the result. Parameters ---------- max_name_length: int [0] The maximum length of the gene set name (in characters). If the gene set name is longer than this number, it will be truncated and "..." will be appended to it, so that the final string exactly meets the length requirement. If 0 (default), no truncation is performed. If not 0, must be at least 3. Returns ------- str The formatted string. Raises ------ ValueError If an invalid length value is specified. """ assert isinstance(max_name_length, (int, np.integer)) if max_name_length < 0 or (1 <= max_name_length <= 2): raise ValueError('max_name_length must be 0 or >= 3.') gs_name = self.gene_set._name if max_name_length > 0 and len(gs_name) > max_name_length: assert max_name_length >= 3 gs_name = gs_name[:(max_name_length - 3)] + '...' param_str = '(%d/%d @ %d/%d, pval=%.1e, fe=%.1fx)' \ % (self.k, self.K, self.n, self.N, self.pval, self.fold_enrichment) return '%s %s' % (gs_name, param_str)
[ "def", "get_pretty_format", "(", "self", ",", "max_name_length", "=", "0", ")", ":", "assert", "isinstance", "(", "max_name_length", ",", "(", "int", ",", "np", ".", "integer", ")", ")", "if", "max_name_length", "<", "0", "or", "(", "1", "<=", "max_name_...
34.631579
22.894737
def _get_result_paths(self, data): """Gets the results for a run of bwa index. bwa index outputs 5 files when the index is created. The filename prefix will be the same as the input fasta, unless overridden with the -p option, and the 5 extensions are listed below: .amb .ann .bwt .pac .sa and these extentions (including the period) are the keys to the dictionary that is returned. """ # determine the names of the files. The name will be the same as the # input fasta file unless overridden with the -p option if self.Parameters['-p'].isOn(): prefix = self.Parameters['-p'].Value else: prefix = data['fasta_in'] # the 5 output file suffixes suffixes = ['.amb', '.ann', '.bwt', '.pac', '.sa'] out_files = {} for suffix in suffixes: out_files[suffix] = ResultPath(prefix + suffix, IsWritten=True) return out_files
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "# determine the names of the files. The name will be the same as the", "# input fasta file unless overridden with the -p option", "if", "self", ".", "Parameters", "[", "'-p'", "]", ".", "isOn", "(", ")", ":", ...
32.032258
22.806452
def prepare_for_reraise(error, exc_info=None): """Prepares the exception for re-raising with reraise method. This method attaches type and traceback info to the error object so that reraise can properly reraise it using this info. """ if not hasattr(error, "_type_"): if exc_info is None: exc_info = sys.exc_info() error._type_ = exc_info[0] error._traceback = exc_info[2] return error
[ "def", "prepare_for_reraise", "(", "error", ",", "exc_info", "=", "None", ")", ":", "if", "not", "hasattr", "(", "error", ",", "\"_type_\"", ")", ":", "if", "exc_info", "is", "None", ":", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "error", ".",...
33.461538
14.230769
def napus(args): """ %prog napus napus.bed brapa.boleracea.i1.blocks diploid.napus.fractionation Extract napus gene loss vs diploid ancestors. We are looking specifically for anything that has the pattern: BR - BO or BR - BO | | AN CN Step 1: extract BR - BO syntenic pairs Step 2: get diploid gene retention patterns from BR or BO as query Step 3: look for if AN or CN is NS(non-syntenic) or NF(not found) and specifically with NS, the NS location is actually the homeologous site. Step 4: categorize gene losses into singleton, or segmental (defined as consecutive losses with a maximum skip of 1 """ from jcvi.utils.cbook import SummaryStats p = OptionParser(napus.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) napusbed, brbo, dpnp = args retention = {} fp = open(dpnp) for row in fp: seqid, query, hit = row.split() retention[query] = hit order = Bed(napusbed).order quartetsfile = "quartets" fp = open(brbo) fw = open(quartetsfile, "w") AL = "AN LOST" CL = "CN LOST" for row in fp: br, bo = row.split() if '.' in (br, bo): continue an, cn = retention[br], retention[bo] row = "\t".join((br, bo, an, cn)) if '.' in (an, cn): #print row continue # label loss candidates antag, anrange = get_tag(an, order) cntag, cnrange = get_tag(cn, order) if range_overlap(anrange, cnrange): if (antag, cntag) == ("NS", None): row = row + "\t{0}|{1}".format(AL, br) if (antag, cntag) == (None, "NS"): row = row + "\t{0}|{1}".format(CL, bo) print(row, file=fw) fw.close() logging.debug("Quartets and gene losses written to `{0}`.".\ format(quartetsfile)) # Parse the quartets file to extract singletons vs.segmental losses fp = open(quartetsfile) fw = open(quartetsfile + ".summary", "w") data = [x.rstrip().split("\t") for x in fp] skip = 1 # max distance between losses g = Grouper() losses = [(len(x) == 5) for x in data] for i, d in enumerate(losses): if not d: continue g.join(i, i) itag = data[i][-1].split("|")[0] for j in xrange(i + 1, i + skip + 1): jtag = data[j][-1].split("|")[0] if j < len(losses) and losses[j] and itag == jtag: g.join(i, j) losses = list(g) singletons = [x for x in losses if len(x) == 1] segments = [x for x in losses if len(x) > 1] ns, nm = len(singletons), len(segments) assert len(losses) == ns + nm grab_tag = lambda pool, tag: \ [x for x in pool if all(data[z][-1].startswith(tag) for z in x)] an_loss_singletons = grab_tag(singletons, AL) cn_loss_singletons = grab_tag(singletons, CL) als, cls = len(an_loss_singletons), len(cn_loss_singletons) an_loss_segments = grab_tag(segments, AL) cn_loss_segments = grab_tag(segments, CL) alm, clm = len(an_loss_segments), len(cn_loss_segments) mixed = len(segments) - alm - clm assert mixed == 0 logging.debug("Singletons: {0} (AN LOSS: {1}, CN LOSS: {2})".\ format(ns, als, cls)) logging.debug("Segments: {0} (AN LOSS: {1}, CN LOSS: {2})".\ format(nm, alm, clm)) print(SummaryStats([len(x) for x in losses]), file=sys.stderr) for x in singletons + segments: print("### LENGTH =", len(x), file=fw) for i in x: print("\t".join(data[i]), file=fw) fw.close()
[ "def", "napus", "(", "args", ")", ":", "from", "jcvi", ".", "utils", ".", "cbook", "import", "SummaryStats", "p", "=", "OptionParser", "(", "napus", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len",...
32.061404
18.026316
def setAutoRangeOff(self): """ Turns off the auto range checkbox. Calls _refreshNodeFromTarget, not _updateTargetFromNode, because setting auto range off does not require a redraw of the target. """ # TODO: catch exceptions. How? # /argos/hdf-eos/DeepBlue-SeaWiFS-1.0_L3_20100101_v002-20110527T191319Z.h5/aerosol_optical_thickness_stddev_ocean if self.getRefreshBlocked(): logger.debug("setAutoRangeOff blocked for {}".format(self.nodeName)) return if self.autoRangeCti: self.autoRangeCti.data = False self._forceRefreshAutoRange()
[ "def", "setAutoRangeOff", "(", "self", ")", ":", "# TODO: catch exceptions. How?", "# /argos/hdf-eos/DeepBlue-SeaWiFS-1.0_L3_20100101_v002-20110527T191319Z.h5/aerosol_optical_thickness_stddev_ocean", "if", "self", ".", "getRefreshBlocked", "(", ")", ":", "logger", ".", "debug", "...
42.333333
22
def findattr(self,attr,connector='parent'): """Returns the attribute named {attr}, from either the self or the self's parents (recursively).""" if (not hasattr(self,attr)): if (not hasattr(self,connector)): return None else: con=getattr(self,connector) if not con: return None if type(con)==type([]): return [x.findattr(attr,connector) for x in con] else: return con.findattr(attr,connector) else: return getattr(self,attr)
[ "def", "findattr", "(", "self", ",", "attr", ",", "connector", "=", "'parent'", ")", ":", "if", "(", "not", "hasattr", "(", "self", ",", "attr", ")", ")", ":", "if", "(", "not", "hasattr", "(", "self", ",", "connector", ")", ")", ":", "return", "...
30.6
15.6
def mpfr_mod(rop, x, y, rnd): """ Given two MPRF numbers x and y, compute x - floor(x / y) * y, rounded if necessary using the given rounding mode. The result is placed in 'rop'. This is the 'remainder' operation, with sign convention compatible with Python's % operator (where x % y has the same sign as y). """ # There are various cases: # # 0. If either argument is a NaN, the result is NaN. # # 1. If x is infinite or y is zero, the result is NaN. # # 2. If y is infinite, return 0 with the sign of y if x is zero, x if x has # the same sign as y, and infinity with the sign of y if it has the # opposite sign. # # 3. If none of the above cases apply then both x and y are finite, # and y is nonzero. If x and y have the same sign, simply # return the result of fmod(x, y). # # 4. Now both x and y are finite, y is nonzero, and x and y have # differing signs. Compute r = fmod(x, y) with sufficient precision # to get an exact result. If r == 0, return 0 with the sign of y # (which will be the opposite of the sign of x). If r != 0, # return r + y, rounded appropriately. if not mpfr.mpfr_number_p(x) or mpfr.mpfr_nan_p(y) or mpfr.mpfr_zero_p(y): return mpfr.mpfr_fmod(rop, x, y, rnd) elif mpfr.mpfr_inf_p(y): x_negative = mpfr.mpfr_signbit(x) y_negative = mpfr.mpfr_signbit(y) if mpfr.mpfr_zero_p(x): mpfr.mpfr_set_zero(rop, -y_negative) return 0 elif x_negative == y_negative: return mpfr.mpfr_set(rop, x, rnd) else: mpfr.mpfr_set_inf(rop, -y_negative) return 0 x_negative = mpfr.mpfr_signbit(x) y_negative = mpfr.mpfr_signbit(y) if x_negative == y_negative: return mpfr.mpfr_fmod(rop, x, y, rnd) else: p = max(mpfr.mpfr_get_prec(x), mpfr.mpfr_get_prec(y)) z = mpfr.Mpfr_t() mpfr.mpfr_init2(z, p) # Doesn't matter what rounding mode we use here; the result # should be exact. ternary = mpfr.mpfr_fmod(z, x, y, rnd) assert ternary == 0 if mpfr.mpfr_zero_p(z): mpfr.mpfr_set_zero(rop, -y_negative) return 0 else: return mpfr.mpfr_add(rop, y, z, rnd)
[ "def", "mpfr_mod", "(", "rop", ",", "x", ",", "y", ",", "rnd", ")", ":", "# There are various cases:", "#", "# 0. If either argument is a NaN, the result is NaN.", "#", "# 1. If x is infinite or y is zero, the result is NaN.", "#", "# 2. If y is infinite, return 0 with the sign o...
37.409836
17.934426
def list_traces( self, project_id, view=None, page_size=None, start_time=None, end_time=None, filter_=None, order_by=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Returns of a list of traces that match the specified filter conditions. Example: >>> from google.cloud import trace_v1 >>> >>> client = trace_v1.TraceServiceClient() >>> >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> >>> # Iterate over all results >>> for element in client.list_traces(project_id): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_traces(project_id).pages: ... for element in page: ... # process element ... pass Args: project_id (str): ID of the Cloud project where the trace data is stored. view (~google.cloud.trace_v1.types.ViewType): Type of data returned for traces in the list. Optional. Default is ``MINIMAL``. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. start_time (Union[dict, ~google.cloud.trace_v1.types.Timestamp]): Start of the time interval (inclusive) during which the trace data was collected from the application. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.trace_v1.types.Timestamp` end_time (Union[dict, ~google.cloud.trace_v1.types.Timestamp]): End of the time interval (inclusive) during which the trace data was collected from the application. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.trace_v1.types.Timestamp` filter_ (str): An optional filter against labels for the request. By default, searches use prefix matching. To specify exact match, prepend a plus symbol (``+``) to the search term. Multiple terms are ANDed. Syntax: - ``root:NAME_PREFIX`` or ``NAME_PREFIX``: Return traces where any root span starts with ``NAME_PREFIX``. - ``+root:NAME`` or ``+NAME``: Return traces where any root span's name is exactly ``NAME``. - ``span:NAME_PREFIX``: Return traces where any span starts with ``NAME_PREFIX``. - ``+span:NAME``: Return traces where any span's name is exactly ``NAME``. - ``latency:DURATION``: Return traces whose overall latency is greater or equal to than ``DURATION``. Accepted units are nanoseconds (``ns``), milliseconds (``ms``), and seconds (``s``). Default is ``ms``. For example, ``latency:24ms`` returns traces whose overall latency is greater than or equal to 24 milliseconds. - ``label:LABEL_KEY``: Return all traces containing the specified label key (exact match, case-sensitive) regardless of the key:value pair's value (including empty values). - ``LABEL_KEY:VALUE_PREFIX``: Return all traces containing the specified label key (exact match, case-sensitive) whose value starts with ``VALUE_PREFIX``. Both a key and a value must be specified. - ``+LABEL_KEY:VALUE``: Return all traces containing a key:value pair exactly matching the specified text. Both a key and a value must be specified. - ``method:VALUE``: Equivalent to ``/http/method:VALUE``. - ``url:VALUE``: Equivalent to ``/http/url:VALUE``. order_by (str): Field used to sort the returned traces. Optional. Can be one of the following: - ``trace_id`` - ``name`` (``name`` field of root span in the trace) - ``duration`` (difference between ``end_time`` and ``start_time`` fields of the root span) - ``start`` (``start_time`` field of the root span) Descending order can be specified by appending ``desc`` to the sort field (for example, ``name desc``). Only one sort field is permitted. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.trace_v1.types.Trace` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "list_traces" not in self._inner_api_calls: self._inner_api_calls[ "list_traces" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_traces, default_retry=self._method_configs["ListTraces"].retry, default_timeout=self._method_configs["ListTraces"].timeout, client_info=self._client_info, ) request = trace_pb2.ListTracesRequest( project_id=project_id, view=view, page_size=page_size, start_time=start_time, end_time=end_time, filter=filter_, order_by=order_by, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("project_id", project_id)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_traces"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="traces", request_token_field="page_token", response_token_field="next_page_token", ) return iterator
[ "def", "list_traces", "(", "self", ",", "project_id", ",", "view", "=", "None", ",", "page_size", "=", "None", ",", "start_time", "=", "None", ",", "end_time", "=", "None", ",", "filter_", "=", "None", ",", "order_by", "=", "None", ",", "retry", "=", ...
46.16568
25.254438
def bar(h1: Histogram1D, ax: Axes, *, errors: bool = False, **kwargs): """Bar plot of 1D histograms.""" show_stats = kwargs.pop("show_stats", False) show_values = kwargs.pop("show_values", False) value_format = kwargs.pop("value_format", None) density = kwargs.pop("density", False) cumulative = kwargs.pop("cumulative", False) label = kwargs.pop("label", h1.name) lw = kwargs.pop("linewidth", kwargs.pop("lw", 0.5)) text_kwargs = pop_kwargs_with_prefix("text_", kwargs) data = get_data(h1, cumulative=cumulative, density=density) if "cmap" in kwargs: cmap = _get_cmap(kwargs) _, cmap_data = _get_cmap_data(data, kwargs) colors = cmap(cmap_data) else: colors = kwargs.pop("color", None) _apply_xy_lims(ax, h1, data, kwargs) _add_ticks(ax, h1, kwargs) if errors: err_data = get_err_data(h1, cumulative=cumulative, density=density) kwargs["yerr"] = err_data if "ecolor" not in kwargs: kwargs["ecolor"] = "black" _add_labels(ax, h1, kwargs) ax.bar(h1.bin_left_edges, data, h1.bin_widths, align="edge", label=label, color=colors, linewidth=lw, **kwargs) if show_values: _add_values(ax, h1, data, value_format=value_format, **text_kwargs) if show_stats: _add_stats_box(h1, ax, stats=show_stats)
[ "def", "bar", "(", "h1", ":", "Histogram1D", ",", "ax", ":", "Axes", ",", "*", ",", "errors", ":", "bool", "=", "False", ",", "*", "*", "kwargs", ")", ":", "show_stats", "=", "kwargs", ".", "pop", "(", "\"show_stats\"", ",", "False", ")", "show_val...
35.945946
18
def named_entity_spans(self): """The spans of named entities.""" if not self.is_tagged(NAMED_ENTITIES): self.tag_named_entities() return self.spans(NAMED_ENTITIES)
[ "def", "named_entity_spans", "(", "self", ")", ":", "if", "not", "self", ".", "is_tagged", "(", "NAMED_ENTITIES", ")", ":", "self", ".", "tag_named_entities", "(", ")", "return", "self", ".", "spans", "(", "NAMED_ENTITIES", ")" ]
39
4.2
def generate(self, request, **kwargs): """ proxy for the tileset.generate method """ # method check to avoid bad requests self.method_check(request, allowed=['get']) # create a basic bundle object for self.get_cached_obj_get. basic_bundle = self.build_bundle(request=request) # using the primary key defined in the url, obtain the tileset tileset = self.cached_obj_get( bundle=basic_bundle, **self.remove_api_resource_names(kwargs)) # Return what the method output, tastypie will handle the serialization return self.create_response(request, tileset.generate())
[ "def", "generate", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "# method check to avoid bad requests", "self", ".", "method_check", "(", "request", ",", "allowed", "=", "[", "'get'", "]", ")", "# create a basic bundle object for self.get_cached_o...
40.375
21.0625
def find_other_sources(self, edge_lim = 0.015, min_val = 5000, ntargets = 250, extend_region_size=3, remove_excess=4, plot_flag = False, plot_window=15): """ Identify apertures for all sources on the postcard, both for the target and potential reference stars Args: edge_lim: The initial limit for the creation of apertures. The aperture will be a region of contiguous pixels with flux values larger than the product of ``edge_lim`` and the brightest pixel value for this star, as long as that product is larger than ``min_val`` (default: ``0.015``) min_val: Threshold for the minimum flux value in the ``integrated_postcard`` for a pixel to be included in the default apertures (default: ``5000``) ntargets: The maximum number of potential reference stars to be included in the analysis (default: ``250``) extend_region_size: After the initial apertures are generated, they will be optionally extended an additional number of pixels following this flag. Safe practice for reasonable apertures is to leave ``min_val`` at a value well above the noise and then extend apertures via this flag until they are of suitable size (default: ``3``) remove_excess: Stars with apertures that touch will be combined into a single aperture. This is done by iterating through the starlist; this flag represents the number of times the list will be iterated through to delete redundant apertures (default: ``4``) plot_flag: If true, a series of diagnostic plots will appear while this function runs to observe apertures for the target star and other stars. (default: ``False``) plot_window: If ``plot_flag`` is ``True``, the size of the region to be plotted around the target star to show the drawn aperture for visualization purposes only (default: ``15``) """ j,i = self.center region = self.integrated_postcard + 0.0 if plot_flag == True: ff = plt.imshow(self.integrated_postcard, interpolation='nearest', cmap='gray', vmax = np.percentile(region, 99.6)) plt.colorbar(ff) plt.show() targets = np.zeros_like(self.integrated_postcard) sizeimg = np.shape(targets)[0] jj = j + 0 ii = i + 0 edge = edge_lim lim = max(min_val, self.integrated_postcard[int(j), int(i)]*edge) maxpt = np.percentile(self.integrated_postcard, 94) bin_img = (region > lim) lab_img, n_features = label(bin_img) key_targ = (lab_img == (lab_img[int(j), int(i)])) tot = np.sum(key_targ) targets[key_targ] = 1 region[key_targ] = 0.0 lim = np.zeros(ntargets) for peaks in range(1,ntargets): k = np.argmax(region) j,i = np.unravel_index(k, region.shape) lim[peaks] = max(maxpt, edge*region[j,i]) bin_img = (region >= lim[peaks]) lab_img, n_features = label(bin_img) key_targ = (lab_img == (lab_img[j,i])) targets[key_targ] = peaks + 1 region[key_targ] = 0.0 lab_img, n_features = label(targets) for i in range(1, ntargets+1): for j in range(extend_region_size): border= mh.labeled.border(targets, 0, i) targets[border*(region < (10)*lim[peaks])] = i for i in range(2, ntargets+1): for j in range(2, ntargets+1): if i != j: border = mh.labeled.border(targets, i, j) if np.sum(border) != 0: targets[targets == j] = i targets = mh.labeled.remove_bordering(targets) for k in range(remove_excess): for i in range(ntargets): if np.sum(self.integrated_postcard[targets == i]) < 0.01: targets[targets > i] -= 1 self.targets = targets if plot_flag == True: plt.imshow(self.targets, interpolation='nearest') plt.show() plt.imshow(((targets == 1)*self.integrated_postcard + (targets == 1)*100000) [jj-plot_window:jj+plot_window,ii-plot_window:ii+plot_window], interpolation='nearest', cmap='gray', vmax=np.percentile(self.integrated_postcard, 99.6)) plt.show() plt.imshow((np.ceil(targets/100.0)*self.integrated_postcard+np.ceil(targets/500.0)*3500000), interpolation='nearest', cmap='gray', vmax=np.percentile(self.integrated_postcard, 99.99)) plt.show()
[ "def", "find_other_sources", "(", "self", ",", "edge_lim", "=", "0.015", ",", "min_val", "=", "5000", ",", "ntargets", "=", "250", ",", "extend_region_size", "=", "3", ",", "remove_excess", "=", "4", ",", "plot_flag", "=", "False", ",", "plot_window", "=",...
45.981132
27.584906
def validate_participation(self): """Ensure participation is of a certain type.""" if self.participation not in self._participation_valid_values: raise ValueError("participation should be one of: {valid}".format( valid=", ".join(self._participation_valid_values) ))
[ "def", "validate_participation", "(", "self", ")", ":", "if", "self", ".", "participation", "not", "in", "self", ".", "_participation_valid_values", ":", "raise", "ValueError", "(", "\"participation should be one of: {valid}\"", ".", "format", "(", "valid", "=", "\"...
52.666667
21
def get_unfrozen_copy(values): """Recursively convert `value`'s tuple values into lists, and frozendicts into dicts. Args: values (frozendict/tuple): the frozendict/tuple. Returns: values (dict/list): the unfrozen copy. """ if isinstance(values, (frozendict, dict)): return {key: get_unfrozen_copy(value) for key, value in values.items()} elif isinstance(values, (list, tuple)): return [get_unfrozen_copy(value) for value in values] # Nothing to unfreeze. return values
[ "def", "get_unfrozen_copy", "(", "values", ")", ":", "if", "isinstance", "(", "values", ",", "(", "frozendict", ",", "dict", ")", ")", ":", "return", "{", "key", ":", "get_unfrozen_copy", "(", "value", ")", "for", "key", ",", "value", "in", "values", "...
30.647059
21
def get_characters(self, *args, **kwargs): """Fetches lists of comic characters with optional filters. get /v1/public/characters/{characterId} :returns: CharacterDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15") >>> print cdw.data.count 1401 >>> for result in cdw.data.results: ... print result.name Aginar Air-Walker (Gabriel Lan) Ajak Ajaxis Akemi """ #pass url string and params string to _call response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text) return CharacterDataWrapper(self, response, kwargs)
[ "def", "get_characters", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "#pass url string and params string to _call", "response", "=", "json", ".", "loads", "(", "self", ".", "_call", "(", "Character", ".", "resource_url", "(", ")", ",", ...
32.782609
19.913043
def get_user_by_userid(self, userid): ''' get user by user id ''' response, status_code = self.__pod__.Users.get_v2_user( sessionToken=self.__session__, uid=userid ).result() self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
[ "def", "get_user_by_userid", "(", "self", ",", "userid", ")", ":", "response", ",", "status_code", "=", "self", ".", "__pod__", ".", "Users", ".", "get_v2_user", "(", "sessionToken", "=", "self", ".", "__session__", ",", "uid", "=", "userid", ")", ".", "...
39.25
12.25
async def on_shutdown(self, app): """ Graceful shutdown handler See https://docs.aiohttp.org/en/stable/web.html#graceful-shutdown """ for ws in self.clients.copy(): await ws.close(code=WSCloseCode.GOING_AWAY, message='Server shutdown') self.shutdown()
[ "async", "def", "on_shutdown", "(", "self", ",", "app", ")", ":", "for", "ws", "in", "self", ".", "clients", ".", "copy", "(", ")", ":", "await", "ws", ".", "close", "(", "code", "=", "WSCloseCode", ".", "GOING_AWAY", ",", "message", "=", "'Server sh...
33
13.4
def noaa(D="", path="", wds_url="", lpd_url="", version=""): """ Convert between NOAA and LiPD files | Example: LiPD to NOAA converter | 1: L = lipd.readLipd() | 2: lipd.noaa(L, "/Users/someuser/Desktop", "https://www1.ncdc.noaa.gov/pub/data/paleo/pages2k/NAm2kHydro-2017/noaa-templates/data-version-1.0.0", "https://www1.ncdc.noaa.gov/pub/data/paleo/pages2k/NAm2kHydro-2017/data-version-1.0.0", "v1-1.0.0") | Example: NOAA to LiPD converter | 1: lipd.readNoaa() | 2: lipd.noaa() :param dict D: Metadata :param str path: Path where output files will be written to :param str wds_url: WDSPaleoUrl, where NOAA template file will be stored on NOAA's FTP server :param str lpd_url: URL where LiPD file will be stored on NOAA's FTP server :param str version: Version of the dataset :return none: """ global files, cwd # When going from NOAA to LPD, use the global "files" variable. # When going from LPD to NOAA, use the data from the LiPD Library. # Choose the mode _mode = noaa_prompt() start = clock() # LiPD mode: Convert LiPD files to NOAA files if _mode == "1": # _project, _version = noaa_prompt_1() if not version or not lpd_url: print("Missing parameters: Please try again and provide all parameters.") return if not D: print("Error: LiPD data must be provided for LiPD -> NOAA conversions") else: if "paleoData" in D: _d = copy.deepcopy(D) D = lpd_to_noaa(_d, wds_url, lpd_url, version, path) else: # For each LiPD file in the LiPD Library for dsn, dat in D.items(): _d = copy.deepcopy(dat) # Process this data through the converter _d = lpd_to_noaa(_d, wds_url, lpd_url, version, path) # Overwrite the data in the LiPD object with our new data. D[dsn] = _d # If no wds url is provided, then remove instances from jsonld metadata if not wds_url: D = rm_wds_url(D) # Write out the new LiPD files, since they now contain the new NOAA URL data if(path): writeLipd(D, path) else: print("Path not provided. Writing to CWD...") writeLipd(D, cwd) # NOAA mode: Convert NOAA files to LiPD files elif _mode == "2": # Pass through the global files list. Use NOAA files directly on disk. noaa_to_lpd(files) else: print("Invalid input. Try again.") end = clock() logger_benchmark.info(log_benchmark("noaa", start, end)) return
[ "def", "noaa", "(", "D", "=", "\"\"", ",", "path", "=", "\"\"", ",", "wds_url", "=", "\"\"", ",", "lpd_url", "=", "\"\"", ",", "version", "=", "\"\"", ")", ":", "global", "files", ",", "cwd", "# When going from NOAA to LPD, use the global \"files\" variable.",...
40.590909
23.80303
def maintainer(self): """ >>> package = yarg.get('yarg') >>> package.maintainer Maintainer(name=u'Kura', email=u'kura@kura.io') """ maintainer = namedtuple('Maintainer', 'name email') return maintainer(name=self._package['maintainer'], email=self._package['maintainer_email'])
[ "def", "maintainer", "(", "self", ")", ":", "maintainer", "=", "namedtuple", "(", "'Maintainer'", ",", "'name email'", ")", "return", "maintainer", "(", "name", "=", "self", ".", "_package", "[", "'maintainer'", "]", ",", "email", "=", "self", ".", "_packa...
40.222222
12.222222
def get_objects(self): """Returns a list of objects coming from the "uids" request parameter """ # Create a mapping of source ARs for copy uids = self.request.form.get("uids", "") if not uids: # check for the `items` parammeter uids = self.request.form.get("items", "") if isinstance(uids, basestring): uids = uids.split(",") unique_uids = OrderedDict().fromkeys(uids).keys() return filter(None, map(self.get_object_by_uid, unique_uids))
[ "def", "get_objects", "(", "self", ")", ":", "# Create a mapping of source ARs for copy", "uids", "=", "self", ".", "request", ".", "form", ".", "get", "(", "\"uids\"", ",", "\"\"", ")", "if", "not", "uids", ":", "# check for the `items` parammeter", "uids", "="...
43.833333
10.5
def run_once(cls, the_callable, userdata=None, delay_until=None): """Class method to run a one-shot task, immediately.""" cls.run_iterations(the_callable, userdata=userdata, run_immediately=True, delay_until=delay_until)
[ "def", "run_once", "(", "cls", ",", "the_callable", ",", "userdata", "=", "None", ",", "delay_until", "=", "None", ")", ":", "cls", ".", "run_iterations", "(", "the_callable", ",", "userdata", "=", "userdata", ",", "run_immediately", "=", "True", ",", "del...
78
30.333333
def path(project, credentials): """Get the path to the project (static method)""" user, oauth_access_token = parsecredentials(credentials) #pylint: disable=unused-variable return settings.ROOT + "projects/" + user + '/' + project + "/"
[ "def", "path", "(", "project", ",", "credentials", ")", ":", "user", ",", "oauth_access_token", "=", "parsecredentials", "(", "credentials", ")", "#pylint: disable=unused-variable", "return", "settings", ".", "ROOT", "+", "\"projects/\"", "+", "user", "+", "'/'", ...
64
24.25
def export_mv_grid(self, session, mv_grid_districts): """ Exports MV grids to database for visualization purposes Parameters ---------- session : sqlalchemy.orm.session.Session Database session mv_grid_districts : List of MV grid_districts (instances of MVGridDistrictDing0 class) whose MV grids are exported. Returns ------- int Description #TODO """ # check arguments if not all(isinstance(_, int) for _ in mv_grid_districts): raise TypeError('`mv_grid_districts` has to be a list of integers.') srid = str(int(cfg_ding0.get('geo', 'srid'))) # delete all existing datasets # db_int.sqla_mv_grid_viz.__table__.create(conn) # create if not exist # change_owner_to(conn, # db_int.sqla_mv_grid_viz.__table_args__['schema'], # db_int.sqla_mv_grid_viz.__tablename__, # 'oeuser') session.query(db_int.sqla_mv_grid_viz).delete() session.commit() # build data array from MV grids (nodes and branches) for grid_district in self.mv_grid_districts(): grid_id = grid_district.mv_grid.id_db # init arrays for nodes mv_stations = [] mv_cable_distributors = [] mv_circuit_breakers = [] lv_load_area_centres = [] lv_stations = [] mv_generators = [] lines = [] # get nodes from grid's graph and append to corresponding array for node in grid_district.mv_grid._graph.nodes(): if isinstance(node, LVLoadAreaCentreDing0): lv_load_area_centres.append((node.geo_data.x, node.geo_data.y)) elif isinstance(node, MVCableDistributorDing0): mv_cable_distributors.append((node.geo_data.x, node.geo_data.y)) elif isinstance(node, MVStationDing0): mv_stations.append((node.geo_data.x, node.geo_data.y)) elif isinstance(node, CircuitBreakerDing0): mv_circuit_breakers.append((node.geo_data.x, node.geo_data.y)) elif isinstance(node, GeneratorDing0): mv_generators.append((node.geo_data.x, node.geo_data.y)) # create shapely obj from stations and convert to # geoalchemy2.types.WKBElement # set to None if no objects found (otherwise SQLAlchemy will throw an error). if lv_load_area_centres: lv_load_area_centres_wkb = from_shape(MultiPoint(lv_load_area_centres), srid=srid) else: lv_load_area_centres_wkb = None if mv_cable_distributors: mv_cable_distributors_wkb = from_shape(MultiPoint(mv_cable_distributors), srid=srid) else: mv_cable_distributors_wkb = None if mv_circuit_breakers: mv_circuit_breakers_wkb = from_shape(MultiPoint(mv_circuit_breakers), srid=srid) else: mv_circuit_breakers_wkb = None if mv_stations: mv_stations_wkb = from_shape(Point(mv_stations), srid=srid) else: mv_stations_wkb = None if mv_generators: mv_generators_wkb = from_shape(MultiPoint(mv_generators), srid=srid) else: mv_generators_wkb = None # get edges (lines) from grid's graph and append to corresponding array for branch in grid_district.mv_grid.graph_edges(): line = branch['adj_nodes'] lines.append(((line[0].geo_data.x, line[0].geo_data.y), (line[1].geo_data.x, line[1].geo_data.y))) # create shapely obj from lines and convert to # geoalchemy2.types.WKBElement mv_lines_wkb = from_shape(MultiLineString(lines), srid=srid) # get nodes from lv grid districts and append to corresponding array for lv_load_area in grid_district.lv_load_areas(): for lv_grid_district in lv_load_area.lv_grid_districts(): station = lv_grid_district.lv_grid.station() if station not in grid_district.mv_grid.graph_isolated_nodes(): lv_stations.append((station.geo_data.x, station.geo_data.y)) lv_stations_wkb = from_shape(MultiPoint(lv_stations), srid=srid) # add dataset to session dataset = db_int.sqla_mv_grid_viz( grid_id=grid_id, geom_mv_station=mv_stations_wkb, geom_mv_cable_dists=mv_cable_distributors_wkb, geom_mv_circuit_breakers=mv_circuit_breakers_wkb, geom_lv_load_area_centres=lv_load_area_centres_wkb, geom_lv_stations=lv_stations_wkb, geom_mv_generators=mv_generators_wkb, geom_mv_lines=mv_lines_wkb) session.add(dataset) # commit changes to db session.commit() # logger.info('=====> MV Grids exported') logger.info('MV Grids exported')
[ "def", "export_mv_grid", "(", "self", ",", "session", ",", "mv_grid_districts", ")", ":", "# check arguments", "if", "not", "all", "(", "isinstance", "(", "_", ",", "int", ")", "for", "_", "in", "mv_grid_districts", ")", ":", "raise", "TypeError", "(", "'`...
42.162602
22.634146
def add_cable_dist(self, lv_cable_dist): """Adds a LV cable_dist to _cable_dists and grid graph if not already existing Parameters ---------- lv_cable_dist : Description #TODO """ if lv_cable_dist not in self._cable_distributors and isinstance(lv_cable_dist, LVCableDistributorDing0): self._cable_distributors.append(lv_cable_dist) self.graph_add_node(lv_cable_dist)
[ "def", "add_cable_dist", "(", "self", ",", "lv_cable_dist", ")", ":", "if", "lv_cable_dist", "not", "in", "self", ".", "_cable_distributors", "and", "isinstance", "(", "lv_cable_dist", ",", "LVCableDistributorDing0", ")", ":", "self", ".", "_cable_distributors", "...
43.416667
19.166667
async def xtrim(self, name: str, max_len: int, approximate=True) -> int: """ [NOTICE] Not officially released yet XTRIM is designed to accept different trimming strategies, even if currently only MAXLEN is implemented. :param name: name of the stream :param max_len: max length of the stream after being trimmed :param approximate: whether redis will limit the stream with given max length exactly, if set to True, there will be a few tens of entries more, but never less than 1000 items: :return: number of entries trimmed """ pieces = ['MAXLEN'] if approximate: pieces.append('~') pieces.append(max_len) return await self.execute_command('XTRIM', name, *pieces)
[ "async", "def", "xtrim", "(", "self", ",", "name", ":", "str", ",", "max_len", ":", "int", ",", "approximate", "=", "True", ")", "->", "int", ":", "pieces", "=", "[", "'MAXLEN'", "]", "if", "approximate", ":", "pieces", ".", "append", "(", "'~'", "...
37.428571
16.571429
def fetch_aggregation_results(self): """ Loops though the self.aggregations dict and adds them to the Search object in order in which they were created. Queries elasticsearch and returns a dict containing the results :returns: a dictionary containing the response from elasticsearch """ self.reset_aggregations() for key, val in self.aggregations.items(): self.search.aggs.bucket(self.parent_agg_counter, val) self.parent_agg_counter += 1 self.search = self.search.extra(size=0) response = self.search.execute() self.flush_aggregations() return response.to_dict()
[ "def", "fetch_aggregation_results", "(", "self", ")", ":", "self", ".", "reset_aggregations", "(", ")", "for", "key", ",", "val", "in", "self", ".", "aggregations", ".", "items", "(", ")", ":", "self", ".", "search", ".", "aggs", ".", "bucket", "(", "s...
35.210526
18.789474
def _add_annots(self, layout, annots): """Adds annotations to the layout object """ if annots: for annot in resolve1(annots): annot = resolve1(annot) if annot.get('Rect') is not None: annot['bbox'] = annot.pop('Rect') # Rename key annot = self._set_hwxy_attrs(annot) try: annot['URI'] = resolve1(annot['A'])['URI'] except KeyError: pass for k, v in six.iteritems(annot): if not isinstance(v, six.string_types): annot[k] = obj_to_string(v) elem = parser.makeelement('Annot', annot) layout.add(elem) return layout
[ "def", "_add_annots", "(", "self", ",", "layout", ",", "annots", ")", ":", "if", "annots", ":", "for", "annot", "in", "resolve1", "(", "annots", ")", ":", "annot", "=", "resolve1", "(", "annot", ")", "if", "annot", ".", "get", "(", "'Rect'", ")", "...
41.684211
12.052632
def diff(old, new): """ Returns differences of two network topologies old and new in NetJSON NetworkGraph compatible format """ protocol = new.protocol version = new.version revision = new.revision metric = new.metric # calculate differences in_both = _find_unchanged(old.graph, new.graph) added_nodes, added_edges = _make_diff(old.graph, new.graph, in_both) removed_nodes, removed_edges = _make_diff(new.graph, old.graph, in_both) changed_edges = _find_changed(old.graph, new.graph, in_both) # create netjson objects # or assign None if no changes if added_nodes.nodes() or added_edges.edges(): added = _netjson_networkgraph(protocol, version, revision, metric, added_nodes.nodes(data=True), added_edges.edges(data=True), dict=True) else: added = None if removed_nodes.nodes() or removed_edges.edges(): removed = _netjson_networkgraph(protocol, version, revision, metric, removed_nodes.nodes(data=True), removed_edges.edges(data=True), dict=True) else: removed = None if changed_edges: changed = _netjson_networkgraph(protocol, version, revision, metric, [], changed_edges, dict=True) else: changed = None return OrderedDict(( ('added', added), ('removed', removed), ('changed', changed) ))
[ "def", "diff", "(", "old", ",", "new", ")", ":", "protocol", "=", "new", ".", "protocol", "version", "=", "new", ".", "version", "revision", "=", "new", ".", "revision", "metric", "=", "new", ".", "metric", "# calculate differences", "in_both", "=", "_fi...
39.714286
18.714286
def __splitAttrs(self, strArgs): ''' Splits the C{View} attributes in C{strArgs} and optionally adds the view id to the C{viewsById} list. Unique Ids ========== It is very common to find C{View}s having B{NO_ID} as the Id. This turns very difficult to use L{self.findViewById()}. To help in this situation this method assigns B{unique Ids}. The B{unique Ids} are generated using the pattern C{id/no_id/<number>} with C{<number>} starting at 1. @type strArgs: str @param strArgs: the string containing the raw list of attributes and values @return: Returns the attributes map. ''' if self.useUiAutomator: raise RuntimeError("This method is not compatible with UIAutomator") # replace the spaces in text:mText to preserve them in later split # they are translated back after the attribute matches textRE = re.compile('%s=%s,' % (self.textProperty, _nd('len'))) m = textRE.search(strArgs) if m: __textStart = m.end() __textLen = int(m.group('len')) __textEnd = m.end() + __textLen s1 = strArgs[__textStart:__textEnd] s2 = s1.replace(' ', WS) strArgs = strArgs.replace(s1, s2, 1) idRE = re.compile("(?P<viewId>id/\S+)") attrRE = re.compile('%s(?P<parens>\(\))?=%s,(?P<val>[^ ]*)' % (_ns('attr'), _nd('len')), flags=re.DOTALL) hashRE = re.compile('%s@%s' % (_ns('class'), _nh('oid'))) attrs = {} viewId = None m = idRE.search(strArgs) if m: viewId = m.group('viewId') if DEBUG: print >>sys.stderr, "found view with id=%s" % viewId for attr in strArgs.split(): m = attrRE.match(attr) if m: __attr = m.group('attr') __parens = '()' if m.group('parens') else '' __len = int(m.group('len')) __val = m.group('val') if WARNINGS and __len != len(__val): warnings.warn("Invalid len: expected: %d found: %d s=%s e=%s" % (__len, len(__val), __val[:50], __val[-50:])) if __attr == self.textProperty: # restore spaces that have been replaced __val = __val.replace(WS, ' ') attrs[__attr + __parens] = __val else: m = hashRE.match(attr) if m: attrs['class'] = m.group('class') attrs['oid'] = m.group('oid') else: if DEBUG: print >>sys.stderr, attr, "doesn't match" if True: # was assignViewById if not viewId: # If the view has NO_ID we are assigning a default id here (id/no_id) which is # immediately incremented if another view with no id was found before to generate # a unique id viewId = "id/no_id/1" if viewId in self.viewsById: # sometimes the view ids are not unique, so let's generate a unique id here i = 1 while True: newId = re.sub('/\d+$', '', viewId) + '/%d' % i if not newId in self.viewsById: break i += 1 viewId = newId if DEBUG: print >>sys.stderr, "adding viewById %s" % viewId # We are assigning a new attribute to keep the original id preserved, which could have # been NO_ID repeated multiple times attrs['uniqueId'] = viewId return attrs
[ "def", "__splitAttrs", "(", "self", ",", "strArgs", ")", ":", "if", "self", ".", "useUiAutomator", ":", "raise", "RuntimeError", "(", "\"This method is not compatible with UIAutomator\"", ")", "# replace the spaces in text:mText to preserve them in later split", "# they are tra...
41.670455
23.147727
def messages(self): """Return remaining messages before limiting.""" return int(math.floor(((self.limit.unit_value - self.level) / self.limit.unit_value) * self.limit.value))
[ "def", "messages", "(", "self", ")", ":", "return", "int", "(", "math", ".", "floor", "(", "(", "(", "self", ".", "limit", ".", "unit_value", "-", "self", ".", "level", ")", "/", "self", ".", "limit", ".", "unit_value", ")", "*", "self", ".", "li...
43.6
24.8
def namedb_create(path, genesis_block): """ Create a sqlite3 db at the given path. Create all the tables and indexes we need. """ global BLOCKSTACK_DB_SCRIPT if os.path.exists( path ): raise Exception("Database '%s' already exists" % path) lines = [l + ";" for l in BLOCKSTACK_DB_SCRIPT.split(";")] con = sqlite3.connect( path, isolation_level=None, timeout=2**30 ) for line in lines: db_query_execute(con, line, ()) con.row_factory = namedb_row_factory # create genesis block namedb_create_token_genesis(con, genesis_block['rows'], genesis_block['history']) return con
[ "def", "namedb_create", "(", "path", ",", "genesis_block", ")", ":", "global", "BLOCKSTACK_DB_SCRIPT", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "Exception", "(", "\"Database '%s' already exists\"", "%", "path", ")", "lines", "=",...
28.272727
20.272727
def Zigrang_Sylvester_1(Re, eD): r'''Calculates Darcy friction factor using the method in Zigrang and Sylvester (1982) [2]_ as shown in [1]_. .. math:: \frac{1}{\sqrt{f_f}} = -4\log\left[\frac{\epsilon}{3.7D} - \frac{5.02}{Re}\log A_5\right] A_5 = \frac{\epsilon}{3.7D} + \frac{13}{Re} Parameters ---------- Re : float Reynolds number, [-] eD : float Relative roughness, [-] Returns ------- fd : float Darcy friction factor [-] Notes ----- Range is 4E3 <= Re <= 1E8; 4E-5 <= eD <= 5E-2. Examples -------- >>> Zigrang_Sylvester_1(1E5, 1E-4) 0.018646892425980794 References ---------- .. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence and Combustion 90, no. 1 (January 1, 2013): 1-27. doi:10.1007/s10494-012-9419-7 .. [2] Zigrang, D. J., and N. D. Sylvester."Explicit Approximations to the Solution of Colebrook's Friction Factor Equation." AIChE Journal 28, no. 3 (May 1, 1982): 514-15. doi:10.1002/aic.690280323. ''' A5 = eD/3.7 + 13/Re ff = (-4*log10(eD/3.7 - 5.02/Re*log10(A5)))**-2 return 4*ff
[ "def", "Zigrang_Sylvester_1", "(", "Re", ",", "eD", ")", ":", "A5", "=", "eD", "/", "3.7", "+", "13", "/", "Re", "ff", "=", "(", "-", "4", "*", "log10", "(", "eD", "/", "3.7", "-", "5.02", "/", "Re", "*", "log10", "(", "A5", ")", ")", ")", ...
28.181818
24.454545
def get_as_nullable_float(self, key): """ Converts map element into a float or returns None if conversion is not possible. :param key: an index of element to get. :return: float value of the element or None if conversion is not supported. """ value = self.get(key) return FloatConverter.to_nullable_float(value)
[ "def", "get_as_nullable_float", "(", "self", ",", "key", ")", ":", "value", "=", "self", ".", "get", "(", "key", ")", "return", "FloatConverter", ".", "to_nullable_float", "(", "value", ")" ]
36
20.6
def get_template(self, template_id): """ Get the template for a given template id. :param template_id: id of the template, str :return: """ template = self.contract_concise.getTemplate(template_id) if template and len(template) == 4: return AgreementTemplate(*template) return None
[ "def", "get_template", "(", "self", ",", "template_id", ")", ":", "template", "=", "self", ".", "contract_concise", ".", "getTemplate", "(", "template_id", ")", "if", "template", "and", "len", "(", "template", ")", "==", "4", ":", "return", "AgreementTemplat...
29
15.333333
def write_contents(self, table, reader): """Write the contents of `table` :Parameters: - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write. - `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source. Returns None """ f = self.FileObjFaker(table, reader.read(table), self.process_row, self.verbose) self.copy_from(f, '"%s"' % table.name, ['"%s"' % c['name'] for c in table.columns])
[ "def", "write_contents", "(", "self", ",", "table", ",", "reader", ")", ":", "f", "=", "self", ".", "FileObjFaker", "(", "table", ",", "reader", ".", "read", "(", "table", ")", ",", "self", ".", "process_row", ",", "self", ".", "verbose", ")", "self"...
54
38.454545
def read (self, files): """Read settings from given config files. @raises: LinkCheckerError on syntax errors in the config file(s) """ assert isinstance(files, list), "Invalid file list %r" % files try: self.read_ok = super(LCConfigParser, self).read(files) if len(self.read_ok) < len(files): failed_files = set(files) - set(self.read_ok) log.warn(LOG_CHECK, "Could not read configuration files %s.", failed_files) # Read all the configuration parameters from the given files. self.read_checking_config() self.read_authentication_config() self.read_filtering_config() self.read_output_config() self.read_plugin_config() except Exception as msg: raise LinkCheckerError( _("Error parsing configuration: %s") % unicode(msg))
[ "def", "read", "(", "self", ",", "files", ")", ":", "assert", "isinstance", "(", "files", ",", "list", ")", ",", "\"Invalid file list %r\"", "%", "files", "try", ":", "self", ".", "read_ok", "=", "super", "(", "LCConfigParser", ",", "self", ")", ".", "...
45.25
16.75
def get_json_response_object(self, datatable): """ Returns the JSON-compatible dictionary that will be serialized for an AJAX response. The value names are in the form "s~" for strings, "i~" for integers, and "a~" for arrays, if you're unfamiliar with the old C-style jargon used in dataTables.js. "aa~" means "array of arrays". In some instances, the author uses "ao~" for "array of objects", an object being a javascript dictionary. """ # Ensure the object list is calculated. # Calling get_records() will do this implicitly, but we want simultaneous access to the # 'total_initial_record_count', and 'unpaged_record_count' values. datatable.populate_records() draw = getattr(self.request, self.request.method).get('draw', None) if draw is not None: draw = escape_uri_path(draw) response_data = { 'draw': draw, 'recordsFiltered': datatable.unpaged_record_count, 'recordsTotal': datatable.total_initial_record_count, 'data': [dict(record, **{ 'DT_RowId': record.pop('pk'), 'DT_RowData': record.pop('_extra_data'), }) for record in datatable.get_records()], } return response_data
[ "def", "get_json_response_object", "(", "self", ",", "datatable", ")", ":", "# Ensure the object list is calculated.", "# Calling get_records() will do this implicitly, but we want simultaneous access to the", "# 'total_initial_record_count', and 'unpaged_record_count' values.", "datatable", ...
46.071429
23.285714
def resolve_invite(invite): """ Resolves an invite from a :class:`Invite`, URL or ID Parameters ----------- invite: Union[:class:`Invite`, :class:`Object`, :class:`str`] The invite. Returns -------- :class:`str` The invite code. """ from .invite import Invite # circular import if isinstance(invite, Invite) or isinstance(invite, Object): return invite.id else: rx = r'(?:https?\:\/\/)?discord(?:\.gg|app\.com\/invite)\/(.+)' m = re.match(rx, invite) if m: return m.group(1) return invite
[ "def", "resolve_invite", "(", "invite", ")", ":", "from", ".", "invite", "import", "Invite", "# circular import", "if", "isinstance", "(", "invite", ",", "Invite", ")", "or", "isinstance", "(", "invite", ",", "Object", ")", ":", "return", "invite", ".", "i...
25.26087
21
def name(self, gender: Optional[Gender] = None) -> str: """Generate a random name. :param gender: Gender's enum object. :return: Name. :Example: John. """ key = self._validate_enum(gender, Gender) names = self._data['names'].get(key) return self.random.choice(names)
[ "def", "name", "(", "self", ",", "gender", ":", "Optional", "[", "Gender", "]", "=", "None", ")", "->", "str", ":", "key", "=", "self", ".", "_validate_enum", "(", "gender", ",", "Gender", ")", "names", "=", "self", ".", "_data", "[", "'names'", "]...
27.75
14.666667
def password_change_done(self, request, extra_context=None): """ Displays the "success" page after a password change. """ from django.contrib.auth.views import password_change_done defaults = { 'extra_context': extra_context or {}, 'template_name': 'cms/password_change_done.html', } if self.password_change_done_template is not None: defaults['template_name'] = self.password_change_done_template return password_change_done(request, **defaults)
[ "def", "password_change_done", "(", "self", ",", "request", ",", "extra_context", "=", "None", ")", ":", "from", "django", ".", "contrib", ".", "auth", ".", "views", "import", "password_change_done", "defaults", "=", "{", "'extra_context'", ":", "extra_context",...
44.583333
17.916667
def write_ply(self, output_file): """Export ``PointCloud`` to PLY file for viewing in MeshLab.""" points = np.hstack([self.coordinates, self.colors]) with open(output_file, 'w') as outfile: outfile.write(self.ply_header.format( vertex_count=len(self.coordinates))) np.savetxt(outfile, points, '%f %f %f %d %d %d')
[ "def", "write_ply", "(", "self", ",", "output_file", ")", ":", "points", "=", "np", ".", "hstack", "(", "[", "self", ".", "coordinates", ",", "self", ".", "colors", "]", ")", "with", "open", "(", "output_file", ",", "'w'", ")", "as", "outfile", ":", ...
57
14.571429
def flag_dipthongs(self, syllables: List[str]) -> List[int]: """ Return a list of syllables that contain a dipthong :param syllables: :return: """ long_positions = [] for idx, syl in enumerate(syllables): for dipthong in self.constants.DIPTHONGS: if dipthong in syllables[idx]: if not string_utils.starts_with_qu(syllables[idx]): long_positions.append(idx) return long_positions
[ "def", "flag_dipthongs", "(", "self", ",", "syllables", ":", "List", "[", "str", "]", ")", "->", "List", "[", "int", "]", ":", "long_positions", "=", "[", "]", "for", "idx", ",", "syl", "in", "enumerate", "(", "syllables", ")", ":", "for", "dipthong"...
35.857143
14.714286
def route_create_or_update(name, address_prefix, next_hop_type, route_table, resource_group, next_hop_ip_address=None, **kwargs): ''' .. versionadded:: 2019.2.0 Create or update a route within a specified route table. :param name: The name of the route to create. :param address_prefix: The destination CIDR to which the route applies. :param next_hop_type: The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal', 'Internet', 'VirtualAppliance', and 'None'. :param next_hop_ip_address: Optional IP address to which packets should be forwarded. Next hop values are only allowed in routes where the next_hop_type is 'VirtualAppliance'. :param route_table: The name of the route table containing the route. :param resource_group: The resource group name assigned to the route table. CLI Example: .. code-block:: bash salt-call azurearm_network.route_create_or_update test-rt '10.0.0.0/8' test-rt-table testgroup ''' netconn = __utils__['azurearm.get_client']('network', **kwargs) try: rt_model = __utils__['azurearm.create_object_model']( 'network', 'Route', address_prefix=address_prefix, next_hop_type=next_hop_type, next_hop_ip_address=next_hop_ip_address, **kwargs ) except TypeError as exc: result = {'error': 'The object model could not be built. ({0})'.format(str(exc))} return result try: route = netconn.routes.create_or_update( resource_group_name=resource_group, route_table_name=route_table, route_name=name, route_parameters=rt_model ) route.wait() rt_result = route.result() result = rt_result.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} except SerializationError as exc: result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))} return result
[ "def", "route_create_or_update", "(", "name", ",", "address_prefix", ",", "next_hop_type", ",", "route_table", ",", "resource_group", ",", "next_hop_ip_address", "=", "None", ",", "*", "*", "kwargs", ")", ":", "netconn", "=", "__utils__", "[", "'azurearm.get_clien...
34.901639
27.590164
def rtt_read(self, buffer_index, num_bytes): """Reads data from the RTT buffer. This method will read at most num_bytes bytes from the specified RTT buffer. The data is automatically removed from the RTT buffer. If there are not num_bytes bytes waiting in the RTT buffer, the entire contents of the RTT buffer will be read. Args: self (JLink): the ``JLink`` instance buffer_index (int): the index of the RTT buffer to read from num_bytes (int): the maximum number of bytes to read Returns: A list of bytes read from RTT. Raises: JLinkRTTException if the underlying JLINK_RTTERMINAL_Read call fails. """ buf = (ctypes.c_ubyte * num_bytes)() bytes_read = self._dll.JLINK_RTTERMINAL_Read(buffer_index, buf, num_bytes) if bytes_read < 0: raise errors.JLinkRTTException(bytes_read) return list(buf)[:bytes_read]
[ "def", "rtt_read", "(", "self", ",", "buffer_index", ",", "num_bytes", ")", ":", "buf", "=", "(", "ctypes", ".", "c_ubyte", "*", "num_bytes", ")", "(", ")", "bytes_read", "=", "self", ".", "_dll", ".", "JLINK_RTTERMINAL_Read", "(", "buffer_index", ",", "...
36.653846
23.307692
def est_gaba_conc(self): """ Estimate gaba concentration based on equation adapted from Sanacora 1999, p1045 Ref: Sanacora, G., Mason, G. F., Rothman, D. L., Behar, K. L., Hyder, F., Petroff, O. A., ... & Krystal, J. H. (1999). Reduced cortical $\gamma$-aminobutyric acid levels in depressed patients determined by proton magnetic resonance spectroscopy. Archives of general psychiatry, 56(11), 1043. """ # need gaba_auc and creatine_auc if not hasattr(self, 'gaba_params'): self.fit_gaba() # estimate [GABA] according to equation9 gaba_conc_est = self.gaba_auc / self.creatine_auc * 1.5 * 9.0 self.gaba_conc_est = gaba_conc_est
[ "def", "est_gaba_conc", "(", "self", ")", ":", "# need gaba_auc and creatine_auc", "if", "not", "hasattr", "(", "self", ",", "'gaba_params'", ")", ":", "self", ".", "fit_gaba", "(", ")", "# estimate [GABA] according to equation9", "gaba_conc_est", "=", "self", ".", ...
37.25
22.25
def toNoUintArray(arr): ''' cast array to the next higher integer array if dtype=unsigned integer ''' d = arr.dtype if d.kind == 'u': arr = arr.astype({1: np.int16, 2: np.int32, 4: np.int64}[d.itemsize]) return arr
[ "def", "toNoUintArray", "(", "arr", ")", ":", "d", "=", "arr", ".", "dtype", "if", "d", ".", "kind", "==", "'u'", ":", "arr", "=", "arr", ".", "astype", "(", "{", "1", ":", "np", ".", "int16", ",", "2", ":", "np", ".", "int32", ",", "4", ":...
27.454545
16.181818
def do_async_recv(self, bufsize): """ Receive any completed frames from the socket. This function should only be called after a read event on a file descriptor. """ data = self.sock.recv(bufsize) if len(data) == 0: raise socket.error('no data to receive') self.recvbuf += data while contains_frame(self.recvbuf): frame, self.recvbuf = pop_frame(self.recvbuf) frame = self.apply_recv_hooks(frame, False) if not self.recv_callback: raise ValueError('no callback installed for %s' % frame) self.recv_callback(frame)
[ "def", "do_async_recv", "(", "self", ",", "bufsize", ")", ":", "data", "=", "self", ".", "sock", ".", "recv", "(", "bufsize", ")", "if", "len", "(", "data", ")", "==", "0", ":", "raise", "socket", ".", "error", "(", "'no data to receive'", ")", "self...
31.9
18.8