code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def printClassTree(self, element=None, showids=False, labels=False, showtype=False): """ Print nicely into stdout the class tree of an ontology Note: indentation is made so that ids up to 3 digits fit in, plus a space. [123]1-- [1]123-- [12]12-- """ TYPE_MARGIN = 11 # length for owl:class etc.. if not element: # first time for x in self.toplayer_classes: printGenericTree(x, 0, showids, labels, showtype, TYPE_MARGIN) else: printGenericTree(element, 0, showids, labels, showtype, TYPE_MARGIN)
Print nicely into stdout the class tree of an ontology Note: indentation is made so that ids up to 3 digits fit in, plus a space. [123]1-- [1]123-- [12]12--
def _priority_key(pep8_result): """Key for sorting PEP8 results. Global fixes should be done first. This is important for things like indentation. """ priority = [ # Fix multiline colon-based before semicolon based. 'e701', # Break multiline statements early. 'e702', # Things that make lines longer. 'e225', 'e231', # Remove extraneous whitespace before breaking lines. 'e201', # Shorten whitespace in comment before resorting to wrapping. 'e262' ] middle_index = 10000 lowest_priority = [ # We need to shorten lines last since the logical fixer can get in a # loop, which causes us to exit early. 'e501' ] key = pep8_result['id'].lower() try: return priority.index(key) except ValueError: try: return middle_index + lowest_priority.index(key) + 1 except ValueError: return middle_index
Key for sorting PEP8 results. Global fixes should be done first. This is important for things like indentation.
def create(self, unique_name=values.unset, friendly_name=values.unset, identity=values.unset, deployment_sid=values.unset, enabled=values.unset): """ Create a new DeviceInstance :param unicode unique_name: A unique, addressable name of this Device. :param unicode friendly_name: A human readable description for this Device. :param unicode identity: An identifier of the Device user. :param unicode deployment_sid: The unique SID of the Deployment group. :param bool enabled: The enabled :returns: Newly created DeviceInstance :rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance """ data = values.of({ 'UniqueName': unique_name, 'FriendlyName': friendly_name, 'Identity': identity, 'DeploymentSid': deployment_sid, 'Enabled': enabled, }) payload = self._version.create( 'POST', self._uri, data=data, ) return DeviceInstance(self._version, payload, fleet_sid=self._solution['fleet_sid'], )
Create a new DeviceInstance :param unicode unique_name: A unique, addressable name of this Device. :param unicode friendly_name: A human readable description for this Device. :param unicode identity: An identifier of the Device user. :param unicode deployment_sid: The unique SID of the Deployment group. :param bool enabled: The enabled :returns: Newly created DeviceInstance :rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance
def load_key_bindings_for_prompt(**kw): """ Create a ``Registry`` object with the defaults key bindings for an input prompt. This activates the key bindings for abort/exit (Ctrl-C/Ctrl-D), incremental search and auto suggestions. (Not for full screen applications.) """ kw.setdefault('enable_abort_and_exit_bindings', True) kw.setdefault('enable_search', True) kw.setdefault('enable_auto_suggest_bindings', True) return load_key_bindings(**kw)
Create a ``Registry`` object with the defaults key bindings for an input prompt. This activates the key bindings for abort/exit (Ctrl-C/Ctrl-D), incremental search and auto suggestions. (Not for full screen applications.)
def run_nested_groups(): """Run the nested groups example. This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all are run in the order; main_phase inner_main_phase inner_teardown_phase teardown_phase """ test = htf.Test( htf.PhaseGroup( main=[ main_phase, htf.PhaseGroup.with_teardown(inner_teardown_phase)( inner_main_phase), ], teardown=[teardown_phase] ) ) test.execute()
Run the nested groups example. This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all are run in the order; main_phase inner_main_phase inner_teardown_phase teardown_phase
def delete_webhook(self, scaling_group, policy, webhook): """ Deletes the specified webhook from the specified policy. """ uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base, utils.get_id(scaling_group), utils.get_id(policy), utils.get_id(webhook)) resp, resp_body = self.api.method_delete(uri) return None
Deletes the specified webhook from the specified policy.
def validate_overwrite_different_input_output(opts): """ Make sure that if overwrite is set to False, the input and output folders are not set to the same location. :param opts: a namespace containing the attributes 'overwrite', 'input', and 'output' :raises ValidationException: if 'input' and 'output' point to the same directory and 'overwrite' is set to False :return: True if 'overwrite' is set to True, or 'input'/'output' are separate directories """ if opts.overwrite or path.abspath(opts.input) != path.abspath(opts.output): return True else: raise ValidationException("Input and output directories are the same, " "but --overwrite / -X flag is not provided.\n" "Do you want to overwrite your input files? " "If so, use the following command:\n" "\tanchorhub -X " + opts.input)
Make sure that if overwrite is set to False, the input and output folders are not set to the same location. :param opts: a namespace containing the attributes 'overwrite', 'input', and 'output' :raises ValidationException: if 'input' and 'output' point to the same directory and 'overwrite' is set to False :return: True if 'overwrite' is set to True, or 'input'/'output' are separate directories
def api_call(method, end_point, params=None, client_id=None, access_token=None): """Call given API end_point with API keys. :param method: HTTP method (e.g. 'get', 'delete'). :param end_point: API endpoint (e.g. 'users/john/sets'). :param params: Dictionary to be sent in the query string (e.g. {'myparam': 'myval'}) :param client_id: Quizlet client ID as string. :param access_token: Quizlet access token as string. client_id and access_token are mutually exclusive but mandatory. """ if bool(client_id) == bool(access_token): raise ValueError('Either client_id or access_token') url = 'https://api.quizlet.com/2.0/{}'.format(end_point) if not params: params = {} if client_id: params['client_id'] = client_id headers = {'Authorization': 'Bearer {}'.format(access_token)} if access_token else None response = requests.request(method, url, params=params, headers=headers) if int(response.status_code / 100) != 2: error_title = '' try: error_title += ', ' + response.json()['error_title'] except ValueError: pass except KeyError: pass raise ValueError( '{} returned {}{}'.format(url, response.status_code, error_title) ) try: return response.json() except json.decoder.JSONDecodeError: pass
Call given API end_point with API keys. :param method: HTTP method (e.g. 'get', 'delete'). :param end_point: API endpoint (e.g. 'users/john/sets'). :param params: Dictionary to be sent in the query string (e.g. {'myparam': 'myval'}) :param client_id: Quizlet client ID as string. :param access_token: Quizlet access token as string. client_id and access_token are mutually exclusive but mandatory.
def pformat(self, prefix=()): ''' Makes a pretty ASCII format of the data, suitable for displaying in a console or saving to a text file. Returns a list of lines. ''' nan = float("nan") def sformat(segment, stat): FMT = "n={0}, mean={1}, p50/95={2}/{3}, max={4}" line_segs = [segment] for s in [stat]: p = s.get_percentiles() p50, p95 = p.get(0.50, nan), p.get(0.95, nan) line_segs.append(FMT.format(s.n, s.mean, p50, p95, s.max)) return '{0}: {1}'.format(*line_segs) lines = [] for path in sorted(self.path_stats.keys()): lines.append('=====================') for seg, stat in zip(path, self.path_stats[path]): lines.append(sformat(seg, stat)) return lines
Makes a pretty ASCII format of the data, suitable for displaying in a console or saving to a text file. Returns a list of lines.
def connect_to_region(region_name): """ Establish connection to AWS API. """ logging.debug("Connecting to AWS region '%s'" % region_name) con = boto.vpc.connect_to_region(region_name) if not con: raise VpcRouteSetError("Could not establish connection to " "region '%s'." % region_name) return con
Establish connection to AWS API.
async def post_heartbeat(self, msg, _context): """Update the status of a service.""" name = msg.get('name') await self.service_manager.send_heartbeat(name)
Update the status of a service.
def get_published_courses_in_account(self, account_id, params={}): """ Return a list of published courses for the passed account ID. """ params["published"] = True return self.get_courses_in_account(account_id, params)
Return a list of published courses for the passed account ID.
def log(self, msg, level=INFO): """Record a line of log in logger :param str msg: content of the messag :param level: logging level :return: None """ logger.log(level, '<{}> - '.format(self._name) + msg)
Record a line of log in logger :param str msg: content of the messag :param level: logging level :return: None
def noisy_layer(self, prefix, action_in, out_size, sigma0, non_linear=True): """ a common dense layer: y = w^{T}x + b a noisy layer: y = (w + \epsilon_w*\sigma_w)^{T}x + (b+\epsilon_b*\sigma_b) where \epsilon are random variables sampled from factorized normal distributions and \sigma are trainable variables which are expected to vanish along the training procedure """ in_size = int(action_in.shape[1]) epsilon_in = tf.random_normal(shape=[in_size]) epsilon_out = tf.random_normal(shape=[out_size]) epsilon_in = self.f_epsilon(epsilon_in) epsilon_out = self.f_epsilon(epsilon_out) epsilon_w = tf.matmul( a=tf.expand_dims(epsilon_in, -1), b=tf.expand_dims(epsilon_out, 0)) epsilon_b = epsilon_out sigma_w = tf.get_variable( name=prefix + "_sigma_w", shape=[in_size, out_size], dtype=tf.float32, initializer=tf.random_uniform_initializer( minval=-1.0 / np.sqrt(float(in_size)), maxval=1.0 / np.sqrt(float(in_size)))) # TF noise generation can be unreliable on GPU # If generating the noise on the CPU, # lowering sigma0 to 0.1 may be helpful sigma_b = tf.get_variable( name=prefix + "_sigma_b", shape=[out_size], dtype=tf.float32, # 0.5~GPU, 0.1~CPU initializer=tf.constant_initializer( sigma0 / np.sqrt(float(in_size)))) w = tf.get_variable( name=prefix + "_fc_w", shape=[in_size, out_size], dtype=tf.float32, initializer=layers.xavier_initializer()) b = tf.get_variable( name=prefix + "_fc_b", shape=[out_size], dtype=tf.float32, initializer=tf.zeros_initializer()) action_activation = tf.nn.xw_plus_b(action_in, w + sigma_w * epsilon_w, b + sigma_b * epsilon_b) if not non_linear: return action_activation return tf.nn.relu(action_activation)
a common dense layer: y = w^{T}x + b a noisy layer: y = (w + \epsilon_w*\sigma_w)^{T}x + (b+\epsilon_b*\sigma_b) where \epsilon are random variables sampled from factorized normal distributions and \sigma are trainable variables which are expected to vanish along the training procedure
def printhtml(csvdiffs): """print the html""" soup = BeautifulSoup() html = Tag(soup, name="html") para1 = Tag(soup, name="p") para1.append(csvdiffs[0][0]) para2 = Tag(soup, name="p") para2.append(csvdiffs[1][0]) table = Tag(soup, name="table") table.attrs.update(dict(border="1")) soup.append(html) html.append(para1) html.append(para2) html.append(table) heading2table(soup, table, csvdiffs[3]) for row in csvdiffs[4:]: row = [str(cell) for cell in row] row2table(soup, table, row) # print soup.prettify() print(soup)
print the html
def entropy(string): """Calculate the entropy of a string.""" entropy = 0 for number in range(256): result = float(string.encode('utf-8').count( chr(number))) / len(string.encode('utf-8')) if result != 0: entropy = entropy - result * math.log(result, 2) return entropy
Calculate the entropy of a string.
def quic_graph_lasso_cv(X, metric): """Run QuicGraphicalLassoCV on data with metric of choice. Compare results with GridSearchCV + quic_graph_lasso. The number of lambdas tested should be much lower with similar final lam_ selected. """ print("QuicGraphicalLassoCV with:") print(" metric: {}".format(metric)) model = QuicGraphicalLassoCV( cv=2, # cant deal w more folds at small size n_refinements=6, n_jobs=1, init_method="cov", score_metric=metric, ) model.fit(X) print(" len(cv_lams): {}".format(len(model.cv_lams_))) print(" lam_scale_: {}".format(model.lam_scale_)) print(" lam_: {}".format(model.lam_)) return model.covariance_, model.precision_, model.lam_
Run QuicGraphicalLassoCV on data with metric of choice. Compare results with GridSearchCV + quic_graph_lasso. The number of lambdas tested should be much lower with similar final lam_ selected.
def param_errors(self, pnames=None): """ Return an array with the parameter errors Parameters ---------- pname : list of string or none If a list of strings, get the Parameter objects with those names If none, get all the Parameter objects Returns ------- ~numpy.array of parameter errors Note that this is a N x 2 array. """ l = self.get_params(pnames) v = [p.errors for p in l] return np.array(v)
Return an array with the parameter errors Parameters ---------- pname : list of string or none If a list of strings, get the Parameter objects with those names If none, get all the Parameter objects Returns ------- ~numpy.array of parameter errors Note that this is a N x 2 array.
def _get_error_context(input_, token): """ Build a context string that defines where on the line the defined error occurs. This consists of the characters ^ at the position and for the length defined by the lexer position and token length """ try: line = input_[token.lexpos: input_.index('\n', token.lexpos)] except ValueError: line = input_[token.lexpos:] i = input_.rfind('\n', 0, token.lexpos) if i < 0: i = 0 line = input_[i:token.lexpos] + line lines = [line.strip('\r\n')] col = token.lexpos - i while len(lines) < 5 and i > 0: end = i i = input_.rfind('\n', 0, i) if i < 0: i = 0 lines.insert(0, input_[i:end].strip('\r\n')) pointer = '' for dummy_ch in str(token.value): pointer += '^' pointline = '' i = 0 while i < col - 1: if lines[-1][i].isspace(): pointline += lines[-1][i] # otherwise, tabs complicate the alignment else: pointline += ' ' i += 1 lines.append(pointline + pointer) return lines
Build a context string that defines where on the line the defined error occurs. This consists of the characters ^ at the position and for the length defined by the lexer position and token length
def get_worksheet_keys(data_dict, result_info_key): """Gets sorted keys from the dict, ignoring result_info_key and 'meta' key Args: data_dict: dict to pull keys from Returns: list of keys in the dict other than the result_info_key """ keys = set(data_dict.keys()) keys.remove(result_info_key) if 'meta' in keys: keys.remove('meta') return sorted(keys)
Gets sorted keys from the dict, ignoring result_info_key and 'meta' key Args: data_dict: dict to pull keys from Returns: list of keys in the dict other than the result_info_key
def start(): ''' Start the server loop ''' from . import app root, apiopts, conf = app.get_app(__opts__) if not apiopts.get('disable_ssl', False): if 'ssl_crt' not in apiopts or 'ssl_key' not in apiopts: logger.error("Not starting '%s'. Options 'ssl_crt' and " "'ssl_key' are required if SSL is not disabled.", __name__) return None verify_certs(apiopts['ssl_crt'], apiopts['ssl_key']) cherrypy.server.ssl_module = 'builtin' cherrypy.server.ssl_certificate = apiopts['ssl_crt'] cherrypy.server.ssl_private_key = apiopts['ssl_key'] if 'ssl_chain' in apiopts.keys(): cherrypy.server.ssl_certificate_chain = apiopts['ssl_chain'] cherrypy.quickstart(root, apiopts.get('root_prefix', '/'), conf)
Start the server loop
def compute_group_count(self, pattern): """Compute the number of regexp match groups when the pattern is provided to the :func:`Cardinality.make_pattern()` method. :param pattern: Item regexp pattern (as string). :return: Number of regexp match groups in the cardinality pattern. """ group_count = self.group_count pattern_repeated = 1 if self.is_many(): pattern_repeated = 2 return group_count + pattern_repeated * pattern_group_count(pattern)
Compute the number of regexp match groups when the pattern is provided to the :func:`Cardinality.make_pattern()` method. :param pattern: Item regexp pattern (as string). :return: Number of regexp match groups in the cardinality pattern.
def create_network(self, name, driver=None, options=None, ipam=None, check_duplicate=None, internal=False, labels=None, enable_ipv6=False, attachable=None, scope=None, ingress=None): """ Create a network. Similar to the ``docker network create``. Args: name (str): Name of the network driver (str): Name of the driver used to create the network options (dict): Driver options as a key-value dictionary ipam (IPAMConfig): Optional custom IP scheme for the network. check_duplicate (bool): Request daemon to check for networks with same name. Default: ``None``. internal (bool): Restrict external access to the network. Default ``False``. labels (dict): Map of labels to set on the network. Default ``None``. enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``. attachable (bool): If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network. scope (str): Specify the network's scope (``local``, ``global`` or ``swarm``) ingress (bool): If set, create an ingress network which provides the routing-mesh in swarm mode. Returns: (dict): The created network reference object Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: A network using the bridge driver: >>> client.create_network("network1", driver="bridge") You can also create more advanced networks with custom IPAM configurations. For example, setting the subnet to ``192.168.52.0/24`` and gateway address to ``192.168.52.254``. .. code-block:: python >>> ipam_pool = docker.types.IPAMPool( subnet='192.168.52.0/24', gateway='192.168.52.254' ) >>> ipam_config = docker.types.IPAMConfig( pool_configs=[ipam_pool] ) >>> docker_client.create_network("network1", driver="bridge", ipam=ipam_config) """ if options is not None and not isinstance(options, dict): raise TypeError('options must be a dictionary') data = { 'Name': name, 'Driver': driver, 'Options': options, 'IPAM': ipam, 'CheckDuplicate': check_duplicate, } if labels is not None: if version_lt(self._version, '1.23'): raise InvalidVersion( 'network labels were introduced in API 1.23' ) if not isinstance(labels, dict): raise TypeError('labels must be a dictionary') data["Labels"] = labels if enable_ipv6: if version_lt(self._version, '1.23'): raise InvalidVersion( 'enable_ipv6 was introduced in API 1.23' ) data['EnableIPv6'] = True if internal: if version_lt(self._version, '1.22'): raise InvalidVersion('Internal networks are not ' 'supported in API version < 1.22') data['Internal'] = True if attachable is not None: if version_lt(self._version, '1.24'): raise InvalidVersion( 'attachable is not supported in API version < 1.24' ) data['Attachable'] = attachable if ingress is not None: if version_lt(self._version, '1.29'): raise InvalidVersion( 'ingress is not supported in API version < 1.29' ) data['Ingress'] = ingress if scope is not None: if version_lt(self._version, '1.30'): raise InvalidVersion( 'scope is not supported in API version < 1.30' ) data['Scope'] = scope url = self._url("/networks/create") res = self._post_json(url, data=data) return self._result(res, json=True)
Create a network. Similar to the ``docker network create``. Args: name (str): Name of the network driver (str): Name of the driver used to create the network options (dict): Driver options as a key-value dictionary ipam (IPAMConfig): Optional custom IP scheme for the network. check_duplicate (bool): Request daemon to check for networks with same name. Default: ``None``. internal (bool): Restrict external access to the network. Default ``False``. labels (dict): Map of labels to set on the network. Default ``None``. enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``. attachable (bool): If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network. scope (str): Specify the network's scope (``local``, ``global`` or ``swarm``) ingress (bool): If set, create an ingress network which provides the routing-mesh in swarm mode. Returns: (dict): The created network reference object Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: A network using the bridge driver: >>> client.create_network("network1", driver="bridge") You can also create more advanced networks with custom IPAM configurations. For example, setting the subnet to ``192.168.52.0/24`` and gateway address to ``192.168.52.254``. .. code-block:: python >>> ipam_pool = docker.types.IPAMPool( subnet='192.168.52.0/24', gateway='192.168.52.254' ) >>> ipam_config = docker.types.IPAMConfig( pool_configs=[ipam_pool] ) >>> docker_client.create_network("network1", driver="bridge", ipam=ipam_config)
def starting_expression(source_code, offset): """Return the expression to complete""" word_finder = worder.Worder(source_code, True) expression, starting, starting_offset = \ word_finder.get_splitted_primary_before(offset) if expression: return expression + '.' + starting return starting
Return the expression to complete
def get_avg_price_stat(self) -> Decimal: """ Calculates the statistical average price for the security, by averaging only the prices paid. Very simple first implementation. """ avg_price = Decimal(0) price_total = Decimal(0) price_count = 0 for account in self.security.accounts: # Ignore trading accounts. if account.type == AccountType.TRADING.name: continue for split in account.splits: # Don't count the non-transactions. if split.quantity == 0: continue price = split.value / split.quantity price_count += 1 price_total += price if price_count: avg_price = price_total / price_count return avg_price
Calculates the statistical average price for the security, by averaging only the prices paid. Very simple first implementation.
def get_current_instruction(self) -> Dict: """Gets the current instruction for this GlobalState. :return: """ instructions = self.environment.code.instruction_list return instructions[self.mstate.pc]
Gets the current instruction for this GlobalState. :return:
def insert_contribution_entries(database, entries): """Insert a set of records of a contribution report in the provided database. Insert a set of new records into the provided database without checking for conflicting entries. @param database: The MongoDB database to operate on. The contributions collection will be used from this database. @type db: pymongo.database.Database @param entries: The entries to insert into the database. @type entries: dict """ entries = map(clean_entry, entries) database.contributions.insert(entries, continue_on_error=True)
Insert a set of records of a contribution report in the provided database. Insert a set of new records into the provided database without checking for conflicting entries. @param database: The MongoDB database to operate on. The contributions collection will be used from this database. @type db: pymongo.database.Database @param entries: The entries to insert into the database. @type entries: dict
def read_rle(file_obj, header, bit_width, debug_logging): """Read a run-length encoded run from the given fo with the given header and bit_width. The count is determined from the header and the width is used to grab the value that's repeated. Yields the value repeated count times. """ count = header >> 1 zero_data = b"\x00\x00\x00\x00" width = (bit_width + 7) // 8 data = file_obj.read(width) data = data + zero_data[len(data):] value = struct.unpack(b"<i", data)[0] if debug_logging: logger.debug("Read RLE group with value %s of byte-width %s and count %s", value, width, count) for _ in range(count): yield value
Read a run-length encoded run from the given fo with the given header and bit_width. The count is determined from the header and the width is used to grab the value that's repeated. Yields the value repeated count times.
def read(self): """Reads the cache file as pickle file.""" def warn(msg, elapsed_time, current_time): desc = self._cache_id_desc() self._warnings( "{0} {1}: {2}s < {3}s", msg, desc, elapsed_time, current_time) file_time = get_time() out = self._out if out is None: if self.verbose: self._warnings("reading {0} from disk", self._cache_id_desc()) with open(self._cache_file, 'rb') as f_in: out = None while True: t_out = f_in.read(CHUNK_SIZE) if not len(t_out): break if out is not None: out += t_out else: out = t_out self._out = out (cache_id_obj, elapsed_time, res) = self._read(out) self.ensure_cache_id(cache_id_obj) real_time = get_time() - file_time if elapsed_time is not None and real_time > elapsed_time: warn("reading cache from disk takes longer than computing!", elapsed_time, real_time) elif self._start_time is not None and elapsed_time is not None: current_time = get_time() - self._start_time if elapsed_time < current_time: warn("reading cache takes longer than computing!", elapsed_time, current_time) self._last_access = get_time() return res
Reads the cache file as pickle file.
def discard_observer(self, observer): """Un-register an observer. Args: observer: The observer to un-register. Returns true if an observer was removed, otherwise False. """ discarded = False key = self.make_key(observer) if key in self.observers: del self.observers[key] discarded = True return discarded
Un-register an observer. Args: observer: The observer to un-register. Returns true if an observer was removed, otherwise False.
def visit_Call(self, nodeCall): """ Be invoked when visiting a node of function call. @param node: currently visiting node """ super(PatternFinder, self).generic_visit(nodeCall) # Capture assignment like 'f = getattr(...)'. if hasattr(nodeCall.func, "func"): # In this case, the statement should be # 'f = getattr(...)()'. nodeCall = nodeCall.func # Make sure the function's name is 'getattr'. if not hasattr(nodeCall.func, "id"): return if nodeCall.func.id != "getattr": return # Capture 'f = getattr(foo, "bar_%s" % baz )' or # 'f = getattr(foo, "bar_" + baz )'. nodeArgument = nodeCall.args[1] if not isinstance(nodeArgument, ast.BinOp): return operation = nodeArgument.op if type(operation) not in [ast.Mod, ast.Add]: return nodePattern = nodeArgument.left if not isinstance(nodePattern, ast.Str): return pattern = nodePattern.s if not ((type(operation) == ast.Add and pattern.endswith("_")) or (pattern.count("%s") == 1 and pattern.endswith("_%s"))): return pattern = pattern.replace("%s", "") if pattern[:1].isalpha() and not pattern[:1].islower(): self.patternsClass.add(pattern) else: self.patternsFunc.add(pattern)
Be invoked when visiting a node of function call. @param node: currently visiting node
def clear(self): ''' Remove all content from the document but do not reset title. Returns: None ''' self._push_all_models_freeze() try: while len(self._roots) > 0: r = next(iter(self._roots)) self.remove_root(r) finally: self._pop_all_models_freeze()
Remove all content from the document but do not reset title. Returns: None
def get_creators(self, *args, **kwargs): """ Returns a full CreatorDataWrapper object for this story. /stories/{storyId}/creators :returns: CreatorDataWrapper -- A new request to API. Contains full results set. """ from .creator import Creator, CreatorDataWrapper return self.get_related_resource(Creator, CreatorDataWrapper, args, kwargs)
Returns a full CreatorDataWrapper object for this story. /stories/{storyId}/creators :returns: CreatorDataWrapper -- A new request to API. Contains full results set.
def get_matlab_value(val): """ Extract a value from a Matlab file From the oct2py project, see https://pythonhosted.org/oct2py/conversions.html """ import numpy as np # Extract each item of a list. if isinstance(val, list): return [get_matlab_value(v) for v in val] # Ignore leaf objects. if not isinstance(val, np.ndarray): return val # Convert user defined classes. if hasattr(val, 'classname'): out = dict() for name in val.dtype.names: out[name] = get_matlab_value(val[name].squeeze().tolist()) cls = type(val.classname, (object,), out) return cls() # Extract struct data. elif val.dtype.names: out = MatlabStruct() for name in val.dtype.names: out[name] = get_matlab_value(val[name].squeeze().tolist()) val = out # Extract cells. elif val.dtype.kind == 'O': val = val.squeeze().tolist() if not isinstance(val, list): val = [val] val = get_matlab_value(val) # Compress singleton values. elif val.size == 1: val = val.item() # Compress empty values. elif val.size == 0: if val.dtype.kind in 'US': val = '' else: val = [] return val
Extract a value from a Matlab file From the oct2py project, see https://pythonhosted.org/oct2py/conversions.html
def weld_variance(array, weld_type): """Returns the variance of the array. Parameters ---------- array : numpy.ndarray or WeldObject Input array. weld_type : WeldType Type of each element in the input array. Returns ------- WeldObject Representation of this computation. """ weld_obj_mean = weld_mean(array, weld_type) obj_id, weld_obj = create_weld_object(array) weld_obj_mean_id = get_weld_obj_id(weld_obj, weld_obj_mean) weld_template = _weld_variance_code weld_obj.weld_code = weld_template.format(array=obj_id, type=weld_type, mean=weld_obj_mean_id) return weld_obj
Returns the variance of the array. Parameters ---------- array : numpy.ndarray or WeldObject Input array. weld_type : WeldType Type of each element in the input array. Returns ------- WeldObject Representation of this computation.
def new_output_file_opt(self, opt, name): """ Add an option and return a new file handle """ fil = File(name) self.add_output_opt(opt, fil) return fil
Add an option and return a new file handle
def walk_directory_directories_relative_path(self, relativePath=""): """ Walk a certain directory in repository and yield all found directories relative path. :parameters: #. relativePath (str): The relative path of the directory. """ # get directory info dict errorMessage = "" relativePath = os.path.normpath(relativePath) dirInfoDict, errorMessage = self.get_directory_info(relativePath) assert dirInfoDict is not None, errorMessage for dname in dict.__getitem__(dirInfoDict, "directories"): yield os.path.join(relativePath, dname)
Walk a certain directory in repository and yield all found directories relative path. :parameters: #. relativePath (str): The relative path of the directory.
def bow(self, tokens, remove_oov=False): """ Create a bow representation of a list of tokens. Parameters ---------- tokens : list. The list of items to change into a bag of words representation. remove_oov : bool. Whether to remove OOV items from the input. If this is True, the length of the returned BOW representation might not be the length of the original representation. Returns ------- bow : generator A BOW representation of the list of items. """ if remove_oov: tokens = [x for x in tokens if x in self.items] for t in tokens: try: yield self.items[t] except KeyError: if self.unk_index is None: raise ValueError("You supplied OOV items but didn't " "provide the index of the replacement " "glyph. Either set remove_oov to True, " "or set unk_index to the index of the " "item which replaces any OOV items.") yield self.unk_index
Create a bow representation of a list of tokens. Parameters ---------- tokens : list. The list of items to change into a bag of words representation. remove_oov : bool. Whether to remove OOV items from the input. If this is True, the length of the returned BOW representation might not be the length of the original representation. Returns ------- bow : generator A BOW representation of the list of items.
def _set_properties(self): """Setup title and label""" self.SetTitle(_("About pyspread")) label = _("pyspread {version}\nCopyright Martin Manns") label = label.format(version=VERSION) self.about_label.SetLabel(label)
Setup title and label
def draw_no_data(self): """Write the no data text to the svg""" no_data = self.node( self.graph.nodes['text_overlay'], 'text', x=self.graph.view.width / 2, y=self.graph.view.height / 2, class_='no_data' ) no_data.text = self.graph.no_data_text
Write the no data text to the svg
def Validate(self, problems=default_problem_reporter): """Validate attribute values and this object's internal consistency. Returns: True iff all validation checks passed. """ found_problem = False found_problem = ((not util.ValidateRequiredFieldsAreNotEmpty( self, self._REQUIRED_FIELD_NAMES, problems)) or found_problem) found_problem = self.ValidateAgencyUrl(problems) or found_problem found_problem = self.ValidateAgencyLang(problems) or found_problem found_problem = self.ValidateAgencyTimezone(problems) or found_problem found_problem = self.ValidateAgencyFareUrl(problems) or found_problem found_problem = self.ValidateAgencyEmail(problems) or found_problem return not found_problem
Validate attribute values and this object's internal consistency. Returns: True iff all validation checks passed.
def get_child_by_name(self, childname): """Get a child node of the current instance by its name. :param childname: the name of the required child node. :type childname: str :returns: the first child node found with name `childname`. :rtype: Node or None """ _childs = [_child for _child in self.childs if _child.name==childname] if len(_childs)>1: logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname)) if len(_childs)==0: _childnode = None else: _childnode = _childs[0] return _childnode
Get a child node of the current instance by its name. :param childname: the name of the required child node. :type childname: str :returns: the first child node found with name `childname`. :rtype: Node or None
def get_output_margin(self, status=None): """Get the output margin (number of rows for the prompt, footer and timing message.""" margin = self.get_reserved_space() + self.get_prompt(self.prompt).count('\n') + 1 if special.is_timing_enabled(): margin += 1 if status: margin += 1 + status.count('\n') return margin
Get the output margin (number of rows for the prompt, footer and timing message.
def _growth_curve_pooling_group(self, distr='glo', as_rural=False): """ Return flood growth curve function based on `amax_records` from a pooling group. :return: Inverse cumulative distribution function with one parameter `aep` (annual exceedance probability) :type: :class:`.GrowthCurve` :param as_rural: assume catchment is fully rural. Default: false. :type as rural: bool """ if not self.donor_catchments: self.find_donor_catchments() gc = GrowthCurve(distr, *self._var_and_skew(self.donor_catchments)) # Record intermediate results self.results_log['distr_name'] = distr.upper() self.results_log['distr_params'] = gc.params return gc
Return flood growth curve function based on `amax_records` from a pooling group. :return: Inverse cumulative distribution function with one parameter `aep` (annual exceedance probability) :type: :class:`.GrowthCurve` :param as_rural: assume catchment is fully rural. Default: false. :type as rural: bool
def _get_connection(self, handle, expect_state=None): """Get a connection object, logging an error if its in an unexpected state """ conndata = self._connections.get(handle) if conndata and expect_state is not None and conndata['state'] != expect_state: self._logger.error("Connection in unexpected state, wanted=%s, got=%s", expect_state, conndata['state']) return conndata
Get a connection object, logging an error if its in an unexpected state
def _get_rescale_factors(self, reference_shape, meta_info): """ Compute the resampling factor for height and width of the input array :param reference_shape: Tuple specifying height and width in pixels of high-resolution array :type reference_shape: tuple of ints :param meta_info: Meta-info dictionary of input eopatch. Defines OGC request and parameters used to create the eopatch :return: Rescale factor for rows and columns :rtype: tuple of floats """ # Figure out resampling size height, width = reference_shape service_type = ServiceType(meta_info['service_type']) rescale = None if service_type == ServiceType.WMS: if (self.cm_size_x is None) and (self.cm_size_y is not None): rescale = (self.cm_size_y / height, self.cm_size_y / height) elif (self.cm_size_x is not None) and (self.cm_size_y is None): rescale = (self.cm_size_x / width, self.cm_size_x / width) else: rescale = (self.cm_size_y / height, self.cm_size_x / width) elif service_type == ServiceType.WCS: # Case where only one resolution for cloud masks is specified in WCS if self.cm_size_y is None: self.cm_size_y = self.cm_size_x elif self.cm_size_x is None: self.cm_size_x = self.cm_size_y hr_res_x, hr_res_y = int(meta_info['size_x'].strip('m')), int(meta_info['size_y'].strip('m')) lr_res_x, lr_res_y = int(self.cm_size_x.strip('m')), int(self.cm_size_y.strip('m')) rescale = (hr_res_y / lr_res_y, hr_res_x / lr_res_x) return rescale
Compute the resampling factor for height and width of the input array :param reference_shape: Tuple specifying height and width in pixels of high-resolution array :type reference_shape: tuple of ints :param meta_info: Meta-info dictionary of input eopatch. Defines OGC request and parameters used to create the eopatch :return: Rescale factor for rows and columns :rtype: tuple of floats
def logprob(self, actions, action_logits): """ Logarithm of probability of given sample """ neg_log_prob = F.nll_loss(action_logits, actions, reduction='none') return -neg_log_prob
Logarithm of probability of given sample
def create_token(key, payload): """Auth token generator payload should be a json encodable data structure """ token = hmac.new(key) token.update(json.dumps(payload)) return token.hexdigest()
Auth token generator payload should be a json encodable data structure
def setting(key, default=None, expected_type=None, qsettings=None): """Helper function to get a value from settings under InaSAFE scope. :param key: Unique key for setting. :type key: basestring :param default: The default value in case of the key is not found or there is an error. :type default: basestring, None, boolean, int, float :param expected_type: The type of object expected. :type expected_type: type :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings :returns: The value of the key in the setting. :rtype: object """ if default is None: default = inasafe_default_settings.get(key, None) full_key = '%s/%s' % (APPLICATION_NAME, key) return general_setting(full_key, default, expected_type, qsettings)
Helper function to get a value from settings under InaSAFE scope. :param key: Unique key for setting. :type key: basestring :param default: The default value in case of the key is not found or there is an error. :type default: basestring, None, boolean, int, float :param expected_type: The type of object expected. :type expected_type: type :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings :returns: The value of the key in the setting. :rtype: object
def complete(self): """Task is complete if completion marker is set and all requirements are complete """ is_complete = super(ORMWrapperTask, self).complete() for req in self.requires(): is_complete &= req.complete() return is_complete
Task is complete if completion marker is set and all requirements are complete
def read_pid_constants(self): """Reads back the PID constants stored on the Grizzly.""" p = self._read_as_int(Addr.PConstant, 4) i = self._read_as_int(Addr.IConstant, 4) d = self._read_as_int(Addr.DConstant, 4) return map(lambda x: x / (2 ** 16), (p, i, d))
Reads back the PID constants stored on the Grizzly.
def get_game_logs(self): """Returns team game logs as a pandas DataFrame""" logs = self.response.json()['resultSets'][0]['rowSet'] headers = self.response.json()['resultSets'][0]['headers'] df = pd.DataFrame(logs, columns=headers) df.GAME_DATE = pd.to_datetime(df.GAME_DATE) return df
Returns team game logs as a pandas DataFrame
def read_azimuth_noise_array(elts): """Read the azimuth noise vectors. The azimuth noise is normalized per swath to account for gain differences between the swaths in EW mode. This is based on the this reference: J. Park, A. A. Korosov, M. Babiker, S. Sandven and J. Won, "Efficient Thermal Noise Removal for Sentinel-1 TOPSAR Cross-Polarization Channel," in IEEE Transactions on Geoscience and Remote Sensing, vol. 56, no. 3, pp. 1555-1565, March 2018. doi: 10.1109/TGRS.2017.2765248 """ y = [] x = [] data = [] for elt in elts: first_pixel = int(elt.find('firstRangeSample').text) last_pixel = int(elt.find('lastRangeSample').text) lines = elt.find('line').text.split() lut = elt.find('noiseAzimuthLut').text.split() pixels = [first_pixel, last_pixel] swath = elt.find('swath').text corr = 1 if swath == 'EW1': corr = 1.5 if swath == 'EW4': corr = 1.2 if swath == 'EW5': corr = 1.5 for pixel in pixels: y += [int(val) for val in lines] x += [pixel] * len(lines) data += [float(val) * corr for val in lut] return np.asarray(data), (x, y)
Read the azimuth noise vectors. The azimuth noise is normalized per swath to account for gain differences between the swaths in EW mode. This is based on the this reference: J. Park, A. A. Korosov, M. Babiker, S. Sandven and J. Won, "Efficient Thermal Noise Removal for Sentinel-1 TOPSAR Cross-Polarization Channel," in IEEE Transactions on Geoscience and Remote Sensing, vol. 56, no. 3, pp. 1555-1565, March 2018. doi: 10.1109/TGRS.2017.2765248
def cookie_dump(key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False): """ :rtype: ``Cookie.SimpleCookie`` """ cookie = SimpleCookie() cookie[key] = value for attr in ('max_age', 'expires', 'path', 'domain', 'secure', 'httponly'): attr_key = attr.replace('_', '-') attr_value = locals()[attr] if attr_value: cookie[key][attr_key] = attr_value return cookie
:rtype: ``Cookie.SimpleCookie``
def tas2eas(Vtas, H): """True Airspeed to Equivalent Airspeed""" rho = density(H) Veas = Vtas * np.sqrt(rho/rho0) return Veas
True Airspeed to Equivalent Airspeed
def lstm_seq2seq_internal_bid_encoder(inputs, targets, hparams, train): """The basic LSTM seq2seq model with bidirectional encoder.""" with tf.variable_scope("lstm_seq2seq_bid_encoder"): if inputs is not None: inputs_length = common_layers.length_from_embedding(inputs) # Flatten inputs. inputs = common_layers.flatten4d3d(inputs) # LSTM encoder. _, final_encoder_state = lstm_bid_encoder( inputs, inputs_length, hparams, train, "encoder") else: inputs_length = None final_encoder_state = None # LSTM decoder. shifted_targets = common_layers.shift_right(targets) # Add 1 to account for the padding added to the left from shift_right targets_length = common_layers.length_from_embedding(shifted_targets) + 1 hparams_decoder = copy.copy(hparams) hparams_decoder.hidden_size = 2 * hparams.hidden_size decoder_outputs, _ = lstm( common_layers.flatten4d3d(shifted_targets), targets_length, hparams_decoder, train, "decoder", initial_state=final_encoder_state) return tf.expand_dims(decoder_outputs, axis=2)
The basic LSTM seq2seq model with bidirectional encoder.
def fragment6(pkt, fragSize): """ Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the expected maximum size of fragments (MTU). The list of packets is returned. If packet does not contain an IPv6ExtHdrFragment class, it is returned in result list. """ pkt = pkt.copy() if IPv6ExtHdrFragment not in pkt: # TODO : automatically add a fragment before upper Layer # at the moment, we do nothing and return initial packet # as single element of a list return [pkt] # If the payload is bigger than 65535, a Jumbo payload must be used, as # an IPv6 packet can't be bigger than 65535 bytes. if len(raw(pkt[IPv6ExtHdrFragment])) > 65535: warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.") # noqa: E501 return [] s = raw(pkt) # for instantiation to get upper layer checksum right if len(s) <= fragSize: return [pkt] # Fragmentable part : fake IPv6 for Fragmentable part length computation fragPart = pkt[IPv6ExtHdrFragment].payload tmp = raw(IPv6(src="::1", dst="::1") / fragPart) fragPartLen = len(tmp) - 40 # basic IPv6 header length fragPartStr = s[-fragPartLen:] # Grab Next Header for use in Fragment Header nh = pkt[IPv6ExtHdrFragment].nh # Keep fragment header fragHeader = pkt[IPv6ExtHdrFragment] del fragHeader.payload # detach payload # Unfragmentable Part unfragPartLen = len(s) - fragPartLen - 8 unfragPart = pkt del pkt[IPv6ExtHdrFragment].underlayer.payload # detach payload # Cut the fragmentable part to fit fragSize. Inner fragments have # a length that is an integer multiple of 8 octets. last Frag MTU # can be anything below MTU lastFragSize = fragSize - unfragPartLen - 8 innerFragSize = lastFragSize - (lastFragSize % 8) if lastFragSize <= 0 or innerFragSize == 0: warning("Provided fragment size value is too low. " + "Should be more than %d" % (unfragPartLen + 8)) return [unfragPart / fragHeader / fragPart] remain = fragPartStr res = [] fragOffset = 0 # offset, incremeted during creation fragId = random.randint(0, 0xffffffff) # random id ... if fragHeader.id is not None: # ... except id provided by user fragId = fragHeader.id fragHeader.m = 1 fragHeader.id = fragId fragHeader.nh = nh # Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ... while True: if (len(remain) > lastFragSize): tmp = remain[:innerFragSize] remain = remain[innerFragSize:] fragHeader.offset = fragOffset # update offset fragOffset += (innerFragSize // 8) # compute new one if IPv6 in unfragPart: unfragPart[IPv6].plen = None tempo = unfragPart / fragHeader / conf.raw_layer(load=tmp) res.append(tempo) else: fragHeader.offset = fragOffset # update offSet fragHeader.m = 0 if IPv6 in unfragPart: unfragPart[IPv6].plen = None tempo = unfragPart / fragHeader / conf.raw_layer(load=remain) res.append(tempo) break return res
Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the expected maximum size of fragments (MTU). The list of packets is returned. If packet does not contain an IPv6ExtHdrFragment class, it is returned in result list.
def prepare_files(self): """Get files from data dump.""" # Prepare files files = {} for f in self.data['files']: k = f['full_name'] if k not in files: files[k] = [] files[k].append(f) # Sort versions for k in files.keys(): files[k].sort(key=lambda x: x['version']) self.files = files
Get files from data dump.
def get_task_param_string(task): """Get all parameters of a task as one string Returns: str: task parameter string """ # get dict str -> str from luigi param_dict = task.to_str_params() # sort keys, serialize items = [] for key in sorted(param_dict.keys()): items.append("'{:s}': '{:s}'".format(key, param_dict[key])) return "{" + ", ".join(items) + "}"
Get all parameters of a task as one string Returns: str: task parameter string
def get_all_invoice_payments(self, params=None): """ Get all invoice payments This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ if not params: params = {} return self._iterate_through_pages( get_function=self.get_invoice_payments_per_page, resource=INVOICE_PAYMENTS, **{'params': params} )
Get all invoice payments This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list
def xlsx_to_csv(self, infile, worksheet=0, delimiter=","): """ Convert xlsx to easier format first, since we want to use the convenience of the CSV library """ wb = load_workbook(self.getInputFile()) sheet = wb.worksheets[worksheet] buffer = StringIO() # extract all rows for n, row in enumerate(sheet.rows): line = [] for cell in row: value = cell.value if type(value) in types.StringTypes: value = value.encode("utf8") if value is None: value = "" line.append(str(value)) print >>buffer, delimiter.join(line) buffer.seek(0) return buffer
Convert xlsx to easier format first, since we want to use the convenience of the CSV library
def _set_callables(modules): ''' Set all Ansible modules callables :return: ''' def _set_function(cmd_name, doc): ''' Create a Salt function for the Ansible module. ''' def _cmd(*args, **kw): ''' Call an Ansible module as a function from the Salt. ''' kwargs = {} if kw.get('__pub_arg'): for _kw in kw.get('__pub_arg', []): if isinstance(_kw, dict): kwargs = _kw break return _caller.call(cmd_name, *args, **kwargs) _cmd.__doc__ = doc return _cmd for mod in modules: setattr(sys.modules[__name__], mod, _set_function(mod, 'Available'))
Set all Ansible modules callables :return:
def split_obj (obj, prefix = None): ''' Split the object, returning a 3-tuple with the flat object, optionally followed by the key for the subobjects and a list of those subobjects. ''' # copy the object, optionally add the prefix before each key new = obj.copy() if prefix is None else { '{}_{}'.format(prefix, k): v for k, v in obj.items() } # try to find the key holding the subobject or a list of subobjects for k, v in new.items(): # list of subobjects if isinstance(v, list): # this is a list of strings or ints, which is a one-to-many # can't deal with that here, but leave the data in case it's # useful downstream if not isinstance(v[0], dict): new[k] = ','.join(v) return new, None, None del new[k] return new, k, v # or just one subobject elif isinstance(v, dict): del new[k] return new, k, [v] return new, None, None
Split the object, returning a 3-tuple with the flat object, optionally followed by the key for the subobjects and a list of those subobjects.
def _sum_wrapper(fn): """ Wrapper to perform row-wise aggregation of list arguments and pass them to a function. The return value of the function is summed over the argument groups. Non-list arguments will be automatically cast to a list. """ def wrapper(*args, **kwargs): v = 0 new_args = _cast_args_to_list(args) for arg in zip(*new_args): v += fn(*arg, **kwargs) return v return wrapper
Wrapper to perform row-wise aggregation of list arguments and pass them to a function. The return value of the function is summed over the argument groups. Non-list arguments will be automatically cast to a list.
def rank(self, method='ordinal', ascending=True, mask=NotSpecified, groupby=NotSpecified): """ Construct a new Factor representing the sorted rank of each column within each row. Parameters ---------- method : str, {'ordinal', 'min', 'max', 'dense', 'average'} The method used to assign ranks to tied elements. See `scipy.stats.rankdata` for a full description of the semantics for each ranking method. Default is 'ordinal'. ascending : bool, optional Whether to return sorted rank in ascending or descending order. Default is True. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, ranks are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- ranks : zipline.pipeline.factors.Rank A new factor that will compute the ranking of the data produced by `self`. Notes ----- The default value for `method` is different from the default for `scipy.stats.rankdata`. See that function's documentation for a full description of the valid inputs to `method`. Missing or non-existent data on a given day will cause an asset to be given a rank of NaN for that day. See Also -------- :func:`scipy.stats.rankdata` :class:`zipline.pipeline.factors.factor.Rank` """ if groupby is NotSpecified: return Rank(self, method=method, ascending=ascending, mask=mask) return GroupedRowTransform( transform=rankdata if ascending else rankdata_1d_descending, transform_args=(method,), factor=self, groupby=groupby, dtype=float64_dtype, missing_value=nan, mask=mask, window_safe=True, )
Construct a new Factor representing the sorted rank of each column within each row. Parameters ---------- method : str, {'ordinal', 'min', 'max', 'dense', 'average'} The method used to assign ranks to tied elements. See `scipy.stats.rankdata` for a full description of the semantics for each ranking method. Default is 'ordinal'. ascending : bool, optional Whether to return sorted rank in ascending or descending order. Default is True. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, ranks are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- ranks : zipline.pipeline.factors.Rank A new factor that will compute the ranking of the data produced by `self`. Notes ----- The default value for `method` is different from the default for `scipy.stats.rankdata`. See that function's documentation for a full description of the valid inputs to `method`. Missing or non-existent data on a given day will cause an asset to be given a rank of NaN for that day. See Also -------- :func:`scipy.stats.rankdata` :class:`zipline.pipeline.factors.factor.Rank`
def get_defining_component(pe_pe): ''' Get the BridgePoint component (C_C) that defines the packeable element *pe_pe*. ''' if pe_pe is None: return None if type(pe_pe).__name__ != 'PE_PE': pe_pe = one(pe_pe).PE_PE[8001]() ep_pkg = one(pe_pe).EP_PKG[8000]() if ep_pkg: return get_defining_component(ep_pkg) return one(pe_pe).C_C[8003]()
Get the BridgePoint component (C_C) that defines the packeable element *pe_pe*.
def translate_config(self, profile, merge=None, replace=None): """ Translate the object to native configuration. In this context, merge and replace means the following: * **Merge** - Elements that exist in both ``self`` and ``merge`` will use by default the values in ``merge`` unless ``self`` specifies a new one. Elements that exist only in ``self`` will be translated as they are and elements present only in ``merge`` will be removed. * **Replace** - All the elements in ``replace`` will either be removed or replaced by elements in ``self``. You can specify one of ``merge``, ``replace`` or none of them. If none of them are set we will just translate configuration. Args: profile (list): Which profiles to use. merge (Root): Object we want to merge with. replace (Root): Object we want to replace. """ result = [] for k, v in self: other_merge = getattr(merge, k) if merge else None other_replace = getattr(replace, k) if replace else None translator = Translator( v, profile, merge=other_merge, replace=other_replace ) result.append(translator.translate()) return "\n".join(result)
Translate the object to native configuration. In this context, merge and replace means the following: * **Merge** - Elements that exist in both ``self`` and ``merge`` will use by default the values in ``merge`` unless ``self`` specifies a new one. Elements that exist only in ``self`` will be translated as they are and elements present only in ``merge`` will be removed. * **Replace** - All the elements in ``replace`` will either be removed or replaced by elements in ``self``. You can specify one of ``merge``, ``replace`` or none of them. If none of them are set we will just translate configuration. Args: profile (list): Which profiles to use. merge (Root): Object we want to merge with. replace (Root): Object we want to replace.
def Logger(name, **kargs): """ Create and return logger """ path_dirs = PathDirs(**kargs) logging.captureWarnings(True) logger = logging.getLogger(name) logger.setLevel(logging.INFO) handler = logging.handlers.WatchedFileHandler(os.path.join( path_dirs.meta_dir, 'vent.log')) handler.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s:%(lineno)-4d - ' '%(levelname)s - %(message)s') handler.setFormatter(formatter) if not len(logger.handlers): logger.addHandler(handler) return logger
Create and return logger
def avg(self): """return the mean value""" # XXX rename this method if len(self.values) > 0: return sum(self.values) / float(len(self.values)) else: return None
return the mean value
def is_translocated(graph: BELGraph, node: BaseEntity) -> bool: """Return true if over any of the node's edges, it is translocated.""" return _node_has_modifier(graph, node, TRANSLOCATION)
Return true if over any of the node's edges, it is translocated.
def annual_reading_counts(kind='all'): """ Returns a list of dicts, one per year of reading. In year order. Each dict is like this (if kind is 'all'): {'year': datetime.date(2003, 1, 1), 'book': 12, # only included if kind is 'all' or 'book' 'periodical': 18, # only included if kind is 'all' or 'periodical' 'total': 30, # only included if kind is 'all' } We use the end_date of a Reading to count when that thing was read. kind is one of 'book', 'periodical' or 'all', for both. """ if kind == 'all': kinds = ['book', 'periodical'] else: kinds = [kind] # This will have keys of years (strings) and dicts of data: # { # '2003': {'books': 12, 'periodicals': 18}, # } counts = OrderedDict() for k in kinds: qs = Reading.objects.exclude(end_date__isnull=True) \ .filter(publication__kind=k) \ .annotate(year=TruncYear('end_date')) \ .values('year') \ .annotate(count=Count('id')) \ .order_by('year') for year_data in qs: year_str = year_data['year'].strftime('%Y') if not year_str in counts: counts[year_str] = { 'year': year_data['year'], } counts[year_str][k] = year_data['count'] # Now translate counts into our final list, with totals, and 0s for kinds # when they have no Readings for that year. counts_list = [] for year_str, data in counts.items(): year_data = { 'year': data['year'], } if kind == 'all': year_data['total'] = 0 for k in kinds: if k in data: year_data[k] = data[k] if kind == 'all': year_data['total'] += data[k] else: year_data[k] = 0 counts_list.append(year_data) return counts_list
Returns a list of dicts, one per year of reading. In year order. Each dict is like this (if kind is 'all'): {'year': datetime.date(2003, 1, 1), 'book': 12, # only included if kind is 'all' or 'book' 'periodical': 18, # only included if kind is 'all' or 'periodical' 'total': 30, # only included if kind is 'all' } We use the end_date of a Reading to count when that thing was read. kind is one of 'book', 'periodical' or 'all', for both.
def get_embedded_tweet(tweet): """ Get the retweeted Tweet OR the quoted Tweet and return it as a dictionary Args: tweet (Tweet): A Tweet object (not simply a dict) Returns: dict (or None, if the Tweet is neither a quote tweet or a Retweet): a dictionary representing the quote Tweet or the Retweet """ if tweet.retweeted_tweet is not None: return tweet.retweeted_tweet elif tweet.quoted_tweet is not None: return tweet.quoted_tweet else: return None
Get the retweeted Tweet OR the quoted Tweet and return it as a dictionary Args: tweet (Tweet): A Tweet object (not simply a dict) Returns: dict (or None, if the Tweet is neither a quote tweet or a Retweet): a dictionary representing the quote Tweet or the Retweet
def get_initial_states(self, input_var, init_state=None): """ :type input_var: T.var :rtype: dict """ initial_states = {} for state in self.state_names: if state != "state" or not init_state: if self._input_type == 'sequence' and input_var.ndim == 2: init_state = T.alloc(np.cast[env.FLOATX](0.), self.hidden_size) else: init_state = T.alloc(np.cast[env.FLOATX](0.), input_var.shape[0], self.hidden_size) initial_states[state] = init_state return initial_states
:type input_var: T.var :rtype: dict
def serialize_array(array, domain=(0, 1), fmt='png', quality=70): """Given an arbitrary rank-3 NumPy array, returns the byte representation of the encoded image. Args: array: NumPy array of dtype uint8 and range 0 to 255 domain: expected range of values in array, see `_normalize_array()` fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer """ normalized = _normalize_array(array, domain=domain) return _serialize_normalized_array(normalized, fmt=fmt, quality=quality)
Given an arbitrary rank-3 NumPy array, returns the byte representation of the encoded image. Args: array: NumPy array of dtype uint8 and range 0 to 255 domain: expected range of values in array, see `_normalize_array()` fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer
def generate_strings(project_base_dir, localization_bundle_path, tmp_directory, exclude_dirs, include_strings_file, special_ui_components_prefix): """ Calls the builtin 'genstrings' command with JTLocalizedString as the string to search for, and adds strings extracted from UI elements internationalized with 'JTL' + removes duplications. """ localization_directory = os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME) if not os.path.exists(localization_directory): os.makedirs(localization_directory) localization_file = os.path.join(localization_directory, LOCALIZATION_FILENAME) # Creating the same directory tree structure in the tmp directory tmp_localization_directory = os.path.join(tmp_directory, DEFAULT_LANGUAGE_DIRECTORY_NAME) tmp_localization_file = os.path.join(tmp_localization_directory, LOCALIZATION_FILENAME) if os.path.isdir(tmp_localization_directory): shutil.rmtree(tmp_localization_directory) os.mkdir(tmp_localization_directory) logging.info("Running genstrings") source_files = extract_source_files(project_base_dir, exclude_dirs) genstrings_cmd = 'genstrings -s JTLocalizedString -o %s %s' % (tmp_localization_directory, " ".join( ['"%s"' % (source_file,) for source_file in source_files])) genstrings_process = subprocess.Popen(genstrings_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, shell=True) genstrings_out, genstrings_err = genstrings_process.communicate() remove_empty_comments_from_file(tmp_localization_file) add_genstrings_comments_to_file(tmp_localization_file, genstrings_err) genstrings_rc = genstrings_process.returncode if genstrings_rc != 0: logging.fatal("genstrings returned %d, aborting run!", genstrings_rc) sys.exit(genstrings_rc) create_localized_strings_from_ib_files(project_base_dir, exclude_dirs, tmp_localization_file, special_ui_components_prefix) if include_strings_file: target = open_strings_file(tmp_localization_file, "a") source = open_strings_file(include_strings_file, "r") target.write(source.read()) source.close() target.close() handle_duplications(tmp_localization_file) if os.path.isfile(localization_file): logging.info("Merging old localizable with new one...") merge_strings_files(localization_file, tmp_localization_file) else: logging.info("No Localizable yet, moving the created file...") shutil.move(tmp_localization_file, localization_file)
Calls the builtin 'genstrings' command with JTLocalizedString as the string to search for, and adds strings extracted from UI elements internationalized with 'JTL' + removes duplications.
def get_satellites_list(self, sat_type): """Get a sorted satellite list: master then spare :param sat_type: type of the required satellites (arbiters, schedulers, ...) :type sat_type: str :return: sorted satellites list :rtype: list[alignak.objects.satellitelink.SatelliteLink] """ satellites_list = [] if sat_type in ['arbiters', 'schedulers', 'reactionners', 'brokers', 'receivers', 'pollers']: for satellite in getattr(self, sat_type): satellites_list.append(satellite) satellites_list = master_then_spare(satellites_list) return satellites_list
Get a sorted satellite list: master then spare :param sat_type: type of the required satellites (arbiters, schedulers, ...) :type sat_type: str :return: sorted satellites list :rtype: list[alignak.objects.satellitelink.SatelliteLink]
def field2parameter(self, field, name="body", default_in="body"): """Return an OpenAPI parameter as a `dict`, given a marshmallow :class:`Field <marshmallow.Field>`. https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#parameterObject """ location = field.metadata.get("location", None) prop = self.field2property(field) return self.property2parameter( prop, name=name, required=field.required, multiple=isinstance(field, marshmallow.fields.List), location=location, default_in=default_in, )
Return an OpenAPI parameter as a `dict`, given a marshmallow :class:`Field <marshmallow.Field>`. https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#parameterObject
def _evaluate(self, indices, norm_distances, out=None): """Evaluate nearest interpolation.""" idx_res = [] for i, yi in zip(indices, norm_distances): if self.variant == 'left': idx_res.append(np.where(yi <= .5, i, i + 1)) else: idx_res.append(np.where(yi < .5, i, i + 1)) idx_res = tuple(idx_res) if out is not None: out[:] = self.values[idx_res] return out else: return self.values[idx_res]
Evaluate nearest interpolation.
def get_argflag(argstr_, default=False, help_='', return_specified=None, need_prefix=True, return_was_specified=False, argv=None, debug=None, **kwargs): """ Checks if the commandline has a flag or a corresponding noflag Args: argstr_ (str, list, or tuple): the flag to look for default (bool): dont use this (default = False) help_ (str): a help string (default = '') return_specified (bool): returns if flag was specified or not (default = False) Returns: tuple: (parsed_val, was_specified) TODO: depricate return_was_specified CommandLine: python -m utool.util_arg --exec-get_argflag --noface --exec-mode python -m utool.util_arg --exec-get_argflag --foo --exec-mode python -m utool.util_arg --exec-get_argflag --no-foo --exec-mode python -m utool.util_arg --exec-get_argflag --foo=True --exec-mode python -m utool.util_arg --exec-get_argflag --foo=False --exec-mode Example: >>> # DISABLE_DOCTEST >>> from utool.util_arg import * # NOQA >>> argstr_ = '--foo' >>> default = False >>> help_ = '' >>> return_specified = True >>> (parsed_val, was_specified) = get_argflag(argstr_, default, help_, return_specified) >>> result = ('(parsed_val, was_specified) = %s' % (str((parsed_val, was_specified)),)) >>> print(result) """ if argv is None: argv = sys.argv assert isinstance(default, bool), 'default must be boolean' argstr_list = meta_util_iter.ensure_iterable(argstr_) #if VERYVERBOSE: # print('[util_arg] checking argstr_list=%r' % (argstr_list,)) # arg registration _register_arg(argstr_list, bool, default, help_) parsed_val = default was_specified = False if debug is None: debug = DEBUG # Check environment variables for default as well as argv import os #""" #set UTOOL_NOCNN=True #export UTOOL_NOCNN True #""" #argv_orig = argv[:] # HACK: make this not happen very time you loop for key, val in os.environ.items(): key = key.upper() sentinal = 'UTOOL_' if key.startswith(sentinal): flag = '--' + key[len(sentinal):].lower().replace('_', '-') if val.upper() in ['TRUE', 'ON']: pass elif val.upper() in ['FALSE', 'OFF']: continue else: continue #flag += '=False' new_argv = [flag] argv = argv[:] + new_argv if debug: print('ENV SPECIFIED COMMAND LINE') print('argv.extend(new_argv=%r)' % (new_argv,)) for argstr in argstr_list: #if VERYVERBOSE: # print('[util_arg] * checking argstr=%r' % (argstr,)) if not (argstr.find('--') == 0 or (argstr.find('-') == 0 and len(argstr) == 2)): raise AssertionError('Invalid argstr: %r' % (argstr,)) if not need_prefix: noprefix = argstr.replace('--', '') if noprefix in argv: parsed_val = True was_specified = True break #if argstr.find('--no') == 0: #argstr = argstr.replace('--no', '--') noarg = argstr.replace('--', '--no') if argstr in argv: parsed_val = True was_specified = True #if VERYVERBOSE: # print('[util_arg] * ...WAS_SPECIFIED. AND PARSED') break elif noarg in argv: parsed_val = False was_specified = True #if VERYVERBOSE: # print('[util_arg] * ...WAS_SPECIFIED. AND NOT PARSED') break elif argstr + '=True' in argv: parsed_val = True was_specified = True break elif argstr + '=False' in argv: parsed_val = False was_specified = True break if return_specified is None: return_specified = return_was_specified if return_specified: return parsed_val, was_specified else: return parsed_val
Checks if the commandline has a flag or a corresponding noflag Args: argstr_ (str, list, or tuple): the flag to look for default (bool): dont use this (default = False) help_ (str): a help string (default = '') return_specified (bool): returns if flag was specified or not (default = False) Returns: tuple: (parsed_val, was_specified) TODO: depricate return_was_specified CommandLine: python -m utool.util_arg --exec-get_argflag --noface --exec-mode python -m utool.util_arg --exec-get_argflag --foo --exec-mode python -m utool.util_arg --exec-get_argflag --no-foo --exec-mode python -m utool.util_arg --exec-get_argflag --foo=True --exec-mode python -m utool.util_arg --exec-get_argflag --foo=False --exec-mode Example: >>> # DISABLE_DOCTEST >>> from utool.util_arg import * # NOQA >>> argstr_ = '--foo' >>> default = False >>> help_ = '' >>> return_specified = True >>> (parsed_val, was_specified) = get_argflag(argstr_, default, help_, return_specified) >>> result = ('(parsed_val, was_specified) = %s' % (str((parsed_val, was_specified)),)) >>> print(result)
def get_col_width(self, col, tab): """Returns column width""" try: return self.col_widths[(col, tab)] except KeyError: return config["default_col_width"]
Returns column width
def callRemote(self, objectPath, methodName, interface=None, destination=None, signature=None, body=None, expectReply=True, autoStart=True, timeout=None, returnSignature=_NO_CHECK_RETURN): """ Calls a method on a remote DBus object and returns a deferred to the result. @type objectPath: C{string} @param objectPath: Path of the remote object @type methodName: C{string} @param methodName: Name of the method to call @type interface: None or C{string} @param interface: If specified, this specifies the interface containing the desired method @type destination: None or C{string} @param destination: If specified, this specifies the bus name containing the remote object @type signature: None or C{string} @param signature: If specified, this specifies the DBus signature of the body of the DBus MethodCall message. This string must be a valid Signature string as defined by the DBus specification. If arguments are supplied to the method call, this parameter must be provided. @type body: C{list} @param body: A C{list} of Python objects to encode. The list content must match the content of the signature parameter @type expectReply: C{bool} @param expectReply: If True (defaults to True) the returned deferred will be called back with the eventual result of the remote call. If False, the deferred will be immediately called back with None. @type autoStart: C{bool} @param autoStart: If True (defaults to True) DBus will attempt to automatically start a service to handle the method call if a service matching the target object is registered but not yet started. @type timeout: None or C{float} @param timeout: If specified and the remote call does not return a value before the timeout expires, the returned Deferred will be errbacked with a L{error.TimeOut} instance. @type returnSignature: C{string} @param returnSignature: If specified, the return values will be validated against the signature string. If the returned values do not mactch, the returned Deferred witl be errbacked with a L{error.RemoteError} instance. @rtype: L{twisted.internet.defer.Deferred} @returns: a Deferred to the result. If expectReply is False, the deferred will be immediately called back with None. """ try: mcall = message.MethodCallMessage( objectPath, methodName, interface=interface, destination=destination, signature=signature, body=body, expectReply=expectReply, autoStart=autoStart, oobFDs=self._toBeSentFDs, ) d = self.callRemoteMessage(mcall, timeout) d.addCallback(self._cbCvtReply, returnSignature) return d except Exception: return defer.fail()
Calls a method on a remote DBus object and returns a deferred to the result. @type objectPath: C{string} @param objectPath: Path of the remote object @type methodName: C{string} @param methodName: Name of the method to call @type interface: None or C{string} @param interface: If specified, this specifies the interface containing the desired method @type destination: None or C{string} @param destination: If specified, this specifies the bus name containing the remote object @type signature: None or C{string} @param signature: If specified, this specifies the DBus signature of the body of the DBus MethodCall message. This string must be a valid Signature string as defined by the DBus specification. If arguments are supplied to the method call, this parameter must be provided. @type body: C{list} @param body: A C{list} of Python objects to encode. The list content must match the content of the signature parameter @type expectReply: C{bool} @param expectReply: If True (defaults to True) the returned deferred will be called back with the eventual result of the remote call. If False, the deferred will be immediately called back with None. @type autoStart: C{bool} @param autoStart: If True (defaults to True) DBus will attempt to automatically start a service to handle the method call if a service matching the target object is registered but not yet started. @type timeout: None or C{float} @param timeout: If specified and the remote call does not return a value before the timeout expires, the returned Deferred will be errbacked with a L{error.TimeOut} instance. @type returnSignature: C{string} @param returnSignature: If specified, the return values will be validated against the signature string. If the returned values do not mactch, the returned Deferred witl be errbacked with a L{error.RemoteError} instance. @rtype: L{twisted.internet.defer.Deferred} @returns: a Deferred to the result. If expectReply is False, the deferred will be immediately called back with None.
def load_delimited(filename, converters, delimiter=r'\s+'): r"""Utility function for loading in data from an annotation file where columns are delimited. The number of columns is inferred from the length of the provided converters list. Examples -------- >>> # Load in a one-column list of event times (floats) >>> load_delimited('events.txt', [float]) >>> # Load in a list of labeled events, separated by commas >>> load_delimited('labeled_events.csv', [float, str], ',') Parameters ---------- filename : str Path to the annotation file converters : list of functions Each entry in column ``n`` of the file will be cast by the function ``converters[n]``. delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- columns : tuple of lists Each list in this tuple corresponds to values in one of the columns in the file. """ # Initialize list of empty lists n_columns = len(converters) columns = tuple(list() for _ in range(n_columns)) # Create re object for splitting lines splitter = re.compile(delimiter) # Note: we do io manually here for two reasons. # 1. The csv module has difficulties with unicode, which may lead # to failures on certain annotation strings # # 2. numpy's text loader does not handle non-numeric data # with _open(filename, mode='r') as input_file: for row, line in enumerate(input_file, 1): # Split each line using the supplied delimiter data = splitter.split(line.strip(), n_columns - 1) # Throw a helpful error if we got an unexpected # of columns if n_columns != len(data): raise ValueError('Expected {} columns, got {} at ' '{}:{:d}:\n\t{}'.format(n_columns, len(data), filename, row, line)) for value, column, converter in zip(data, columns, converters): # Try converting the value, throw a helpful error on failure try: converted_value = converter(value) except: raise ValueError("Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}".format( value, converter.__name__, filename, row, line)) column.append(converted_value) # Sane output if n_columns == 1: return columns[0] else: return columns
r"""Utility function for loading in data from an annotation file where columns are delimited. The number of columns is inferred from the length of the provided converters list. Examples -------- >>> # Load in a one-column list of event times (floats) >>> load_delimited('events.txt', [float]) >>> # Load in a list of labeled events, separated by commas >>> load_delimited('labeled_events.csv', [float, str], ',') Parameters ---------- filename : str Path to the annotation file converters : list of functions Each entry in column ``n`` of the file will be cast by the function ``converters[n]``. delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- columns : tuple of lists Each list in this tuple corresponds to values in one of the columns in the file.
def install_documentation(path="./Litho1pt0-Notebooks"): """Install the example notebooks for litho1pt0 in the given location WARNING: If the path exists, the Notebook files will be written into the path and will overwrite any existing files with which they collide. The default path ("./Litho1pt0-Notebooks") is chosen to make collision less likely / problematic The documentation for litho1pt0 is in the form of jupyter notebooks. Some dependencies exist for the notebooks to be useful: - matplotlib: for some diagrams - cartopy: for plotting map examples litho1pt0 dependencies are explicitly imported into the notebooks including: - stripy (for interpolating on the sphere) - numpy - scipy (for k-d tree point location) """ ## Question - overwrite or not ? shutils fails if directory exists. Notebooks_Path = _pkg_resources.resource_filename('litho1pt0', 'Notebooks') ct = _dir_util.copy_tree(Notebooks_Path, path, preserve_mode=1, preserve_times=1, preserve_symlinks=1, update=0, verbose=1, dry_run=0) return
Install the example notebooks for litho1pt0 in the given location WARNING: If the path exists, the Notebook files will be written into the path and will overwrite any existing files with which they collide. The default path ("./Litho1pt0-Notebooks") is chosen to make collision less likely / problematic The documentation for litho1pt0 is in the form of jupyter notebooks. Some dependencies exist for the notebooks to be useful: - matplotlib: for some diagrams - cartopy: for plotting map examples litho1pt0 dependencies are explicitly imported into the notebooks including: - stripy (for interpolating on the sphere) - numpy - scipy (for k-d tree point location)
def InsertFloatArg(self, string="", **_): """Inserts a Float argument.""" try: float_value = float(string) return self.InsertArg(float_value) except (TypeError, ValueError): raise ParseError("%s is not a valid float." % string)
Inserts a Float argument.
def _delete_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None, append_to_path=None): """ Base level method for removing data from the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._delete_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' if not instance.get(id_field, None): raise AttributeError( '%s does not have a value for the id field \'%s\'' % ( instance.__class__.__name__, id_field ) ) # Generate the url to hit url = '{0}/{1}/{2}/{3}{4}.json?{5}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], append_to_path or '', urllib.urlencode(extra_params), ) # Fetch the data response = requests.delete( url=url, headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return True else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
Base level method for removing data from the API
def emit_reset(self): """Resets the device to a blank state.""" for name in self.layout.axes: params = self.layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS) self.write_event(ecodes.EV_ABS, name, int(sum(params[1:3]) / 2)) for name in self.layout.buttons: self.write_event(ecodes.EV_KEY, name, False) for name in self.layout.hats: self.write_event(ecodes.EV_ABS, name, 0) self.device.syn()
Resets the device to a blank state.
def setup(app) -> Dict[str, Any]: """ Sets up Sphinx extension. """ app.connect("doctree-read", on_doctree_read) app.connect("builder-inited", on_builder_inited) app.add_css_file("uqbar.css") app.add_node( nodes.classifier, override=True, html=(visit_classifier, depart_classifier) ) app.add_node( nodes.definition, override=True, html=(visit_definition, depart_definition) ) app.add_node(nodes.term, override=True, html=(visit_term, depart_term)) return { "version": uqbar.__version__, "parallel_read_safe": True, "parallel_write_safe": True, }
Sets up Sphinx extension.
def payments_for_address(self, address): "return an array of (TX ids, net_payment)" URL = self.api_domain + ("/address/%s?format=json" % address) d = urlopen(URL).read() json_response = json.loads(d.decode("utf8")) response = [] for tx in json_response.get("txs", []): total_out = 0 for tx_out in tx.get("out", []): if tx_out.get("addr") == address: total_out += tx_out.get("value", 0) if total_out > 0: response.append((tx.get("hash"), total_out)) return response
return an array of (TX ids, net_payment)
def quadvgk(feval, fmin, fmax, tol1=1e-5, tol2=1e-5): """ numpy implementation makes use of the code here: http://se.mathworks.com/matlabcentral/fileexchange/18801-quadvgk We here use gaussian kronrod integration already used in gpstuff for evaluating one dimensional integrals. This is vectorised quadrature which means that several functions can be evaluated at the same time over a grid of points. :param f: :param fmin: :param fmax: :param difftol: :return: """ XK = np.array([-0.991455371120813, -0.949107912342759, -0.864864423359769, -0.741531185599394, -0.586087235467691, -0.405845151377397, -0.207784955007898, 0., 0.207784955007898, 0.405845151377397, 0.586087235467691, 0.741531185599394, 0.864864423359769, 0.949107912342759, 0.991455371120813]) WK = np.array([0.022935322010529, 0.063092092629979, 0.104790010322250, 0.140653259715525, 0.169004726639267, 0.190350578064785, 0.204432940075298, 0.209482141084728, 0.204432940075298, 0.190350578064785, 0.169004726639267, 0.140653259715525, 0.104790010322250, 0.063092092629979, 0.022935322010529]) # 7-point Gaussian weightings WG = np.array([0.129484966168870, 0.279705391489277, 0.381830050505119, 0.417959183673469, 0.381830050505119, 0.279705391489277, 0.129484966168870]) NK = WK.size G = np.arange(2,NK,2) tol1 = 1e-4 tol2 = 1e-4 Subs = np.array([[fmin],[fmax]]) # number of functions to evaluate in the feval vector of functions. NF = feval(np.zeros(1)).size Q = np.zeros(NF) neval = 0 while Subs.size > 0: Subs = getSubs(Subs,XK) M = (Subs[1,:] - Subs[0,:]) / 2 C = (Subs[1,:] + Subs[0,:]) / 2 # NM = length(M); NM = M.size # x = reshape(XK * M + ones(NK, 1) * C, 1, []); x = XK[:,None]*M + C x = x.flatten() FV = feval(x) # FV = FV[:,None] Q1 = np.zeros((NF, NM)) Q2 = np.zeros((NF, NM)) # for n=1:NF # F = reshape(FV(n,:), NK, []); # Q1(n,:) = M. * sum((WK * ones(1, NM)). * F); # Q2(n,:) = M. * sum((WG * ones(1, NM)). * F(G,:)); # end # for i in range(NF): # F = FV # F = F.reshape((NK,-1)) # temp_mat = np.sum(np.multiply(WK[:,None]*np.ones((1,NM)), F),axis=0) # Q1[i,:] = np.multiply(M, temp_mat) # temp_mat = np.sum(np.multiply(WG[:,None]*np.ones((1, NM)), F[G-1,:]), axis=0) # Q2[i,:] = np.multiply(M, temp_mat) # ind = np.where(np.logical_or(np.max(np.abs(Q1 -Q2) / Q1) < tol1, (Subs[1,:] - Subs[0,:]) <= tol2) > 0)[0] # Q = Q + np.sum(Q1[:,ind], axis=1) # np.delete(Subs, ind,axis=1) Q1 = np.dot(FV.reshape(NF, NK, NM).swapaxes(2,1),WK)*M Q2 = np.dot(FV.reshape(NF, NK, NM).swapaxes(2,1)[:,:,1::2],WG)*M #ind = np.nonzero(np.logical_or(np.max(np.abs((Q1-Q2)/Q1), 0) < difftol , M < xtol))[0] ind = np.nonzero(np.logical_or(np.max(np.abs((Q1-Q2)), 0) < tol1 , (Subs[1,:] - Subs[0,:]) < tol2))[0] Q = Q + np.sum(Q1[:,ind], axis=1) Subs = np.delete(Subs, ind, axis=1) return Q
numpy implementation makes use of the code here: http://se.mathworks.com/matlabcentral/fileexchange/18801-quadvgk We here use gaussian kronrod integration already used in gpstuff for evaluating one dimensional integrals. This is vectorised quadrature which means that several functions can be evaluated at the same time over a grid of points. :param f: :param fmin: :param fmax: :param difftol: :return:
def get_source_url(obj): """Get the source url for a Trust object. Args: obj (ChainOfTrust or LinkOfTrust): the trust object to inspect Raises: CoTError: if repo and source are defined and don't match Returns: str: the source url. """ source_env_prefix = obj.context.config['source_env_prefix'] task = obj.task log.debug("Getting source url for {} {}...".format(obj.name, obj.task_id)) repo = get_repo(obj.task, source_env_prefix=source_env_prefix) source = task['metadata']['source'] if repo and not verify_repo_matches_url(repo, source): raise CoTError("{name} {task_id}: {source_env_prefix} {repo} doesn't match source {source}!".format( name=obj.name, task_id=obj.task_id, source_env_prefix=source_env_prefix, repo=repo, source=source )) log.info("{} {}: found {}".format(obj.name, obj.task_id, source)) return source
Get the source url for a Trust object. Args: obj (ChainOfTrust or LinkOfTrust): the trust object to inspect Raises: CoTError: if repo and source are defined and don't match Returns: str: the source url.
def build(self, region=None, profile=None): """Get or create the provider for the given region and profile.""" with self.lock: # memoization lookup key derived from region + profile. key = "{}-{}".format(profile, region) try: # assume provider is in provider dictionary. provider = self.providers[key] except KeyError: msg = "Missed memoized lookup ({}), creating new AWS Provider." logger.debug(msg.format(key)) if not region: region = self.region # memoize the result for later. self.providers[key] = Provider( get_session(region=region, profile=profile), region=region, **self.kwargs ) provider = self.providers[key] return provider
Get or create the provider for the given region and profile.
def get_provider_name(driver): """ Return the provider name from the driver class :param driver: obj :return: str """ kls = driver.__class__.__name__ for d, prop in DRIVERS.items(): if prop[1] == kls: return d return None
Return the provider name from the driver class :param driver: obj :return: str
def arange_col(n, dtype=int): """ Returns ``np.arange`` in a column form. :param n: Length of the array. :type n: int :param dtype: Type of the array. :type dtype: type :returns: ``np.arange`` in a column form. :rtype: ndarray """ return np.reshape(np.arange(n, dtype = dtype), (n, 1))
Returns ``np.arange`` in a column form. :param n: Length of the array. :type n: int :param dtype: Type of the array. :type dtype: type :returns: ``np.arange`` in a column form. :rtype: ndarray
def delete_priority_class(self, name, **kwargs): """ delete a PriorityClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_priority_class(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PriorityClass (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_priority_class_with_http_info(name, **kwargs) else: (data) = self.delete_priority_class_with_http_info(name, **kwargs) return data
delete a PriorityClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_priority_class(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PriorityClass (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread.
def shade_jar(self, shading_rules, jar_path): """Shades a jar using the shading rules from the given jvm_binary. This *overwrites* the existing jar file at ``jar_path``. :param shading_rules: predefined rules for shading :param jar_path: The filepath to the jar that should be shaded. """ self.context.log.debug('Shading {}.'.format(jar_path)) with temporary_dir() as tempdir: output_jar = os.path.join(tempdir, os.path.basename(jar_path)) with self.shader.binary_shader_for_rules(output_jar, jar_path, shading_rules) as shade_runner: result = execute_runner(shade_runner, workunit_factory=self.context.new_workunit, workunit_name='jarjar') if result != 0: raise TaskError('Shading tool failed to shade {0} (error code {1})'.format(jar_path, result)) if not os.path.exists(output_jar): raise TaskError('Shading tool returned success for {0}, but ' 'the output jar was not found at {1}'.format(jar_path, output_jar)) atomic_copy(output_jar, jar_path) return jar_path
Shades a jar using the shading rules from the given jvm_binary. This *overwrites* the existing jar file at ``jar_path``. :param shading_rules: predefined rules for shading :param jar_path: The filepath to the jar that should be shaded.
def get_outputs(sym, params, in_shape, in_label): """ Infer output shapes and return dictionary of output name to shape :param :class:`~mxnet.symbol.Symbol` sym: symbol to perform infer shape on :param dic of (str, nd.NDArray) params: :param list of tuple(int, ...) in_shape: list of all input shapes :param in_label: name of label typically used in loss that may be left in graph. This name is removed from list of inputs required by symbol :return: dictionary of output name to shape :rtype: dict of (str, tuple(int, ...)) """ # remove any input listed in params from sym.list_inputs() and bind them to the input shapes provided # by user. Also remove in_label, which is the name of the label symbol that may have been used # as the label for loss during training. inputs = {n: tuple(s) for n, s in zip([n for n in sym.list_inputs() if n not in params and n != in_label], in_shape)} # Add params and their shape to list of inputs inputs.update({n: v.shape for n, v in params.items() if n in sym.list_inputs()}) # Provide input data as well as input params to infer_shape() _, out_shapes, _ = sym.infer_shape(**inputs) out_names = list() for name in sym.list_outputs(): if name.endswith('_output'): out_names.append(name[:-len('_output')]) else: logging.info("output '%s' does not end with '_output'", name) out_names.append(name) assert len(out_shapes) == len(out_names) # bind output shapes with output names graph_outputs = {n: s for n, s in zip(out_names, out_shapes)} return graph_outputs
Infer output shapes and return dictionary of output name to shape :param :class:`~mxnet.symbol.Symbol` sym: symbol to perform infer shape on :param dic of (str, nd.NDArray) params: :param list of tuple(int, ...) in_shape: list of all input shapes :param in_label: name of label typically used in loss that may be left in graph. This name is removed from list of inputs required by symbol :return: dictionary of output name to shape :rtype: dict of (str, tuple(int, ...))
def transfer(self, data): """Transfers data over SPI. Arguments: data: The data to transfer. Returns: The data returned by the SPI device. """ settings = self.transfer_settings settings.spi_tx_size = len(data) self.transfer_settings = settings response = '' for i in range(0, len(data), 60): response += self.sendCommand(commands.SPITransferCommand(data[i:i + 60])).data time.sleep(0.01) while len(response) < len(data): response += self.sendCommand(commands.SPITransferCommand('')).data return ''.join(response)
Transfers data over SPI. Arguments: data: The data to transfer. Returns: The data returned by the SPI device.
def _detectEncoding(self, xml_data, isHTML=False): """Given a document, tries to detect its XML encoding.""" xml_encoding = sniffed_xml_encoding = None try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = self._ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: sniffed_xml_encoding = 'ascii' pass except: xml_encoding_match = None xml_encoding_match = re.compile( '^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) if not xml_encoding_match and isHTML: regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I) xml_encoding_match = regexp.search(xml_data) if xml_encoding_match is not None: xml_encoding = xml_encoding_match.groups()[0].lower() if isHTML: self.declaredHTMLEncoding = xml_encoding if sniffed_xml_encoding and \ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding return xml_data, xml_encoding, sniffed_xml_encoding
Given a document, tries to detect its XML encoding.
def check_publication_state(publication_id): """Check the publication's current state.""" with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute("""\ SELECT "state", "state_messages" FROM publications WHERE id = %s""", (publication_id,)) publication_state, publication_messages = cursor.fetchone() return publication_state, publication_messages
Check the publication's current state.
def by_period(self, field=None, period=None, timezone=None, start=None, end=None): """ Create a date histogram aggregation using the last added aggregation for the current object. Add this date_histogram aggregation into self.aggregations :param field: the index field to create the histogram from :param period: the interval which elasticsearch supports, ex: "month", "week" and such :param timezone: custom timezone :param start: custom start date for the date histogram, default: start date under range :param end: custom end date for the date histogram, default: end date under range :returns: self, which allows the method to be chainable with the other methods """ hist_period = period if period else self.interval_ time_zone = timezone if timezone else "UTC" start_ = start if start else self.start_date end_ = end if end else self.end_date bounds = self.get_bounds(start_, end_) date_field = field if field else "grimoire_creation_date" agg_key = "date_histogram_" + date_field if agg_key in self.aggregations.keys(): agg = self.aggregations[agg_key] else: agg = A("date_histogram", field=date_field, interval=hist_period, time_zone=time_zone, min_doc_count=0, **bounds) child_agg_counter = self.child_agg_counter_dict[agg_key] child_name, child_agg = self.aggregations.popitem() agg.metric(child_agg_counter, child_agg) self.aggregations[agg_key] = agg self.child_agg_counter_dict[agg_key] += 1 return self
Create a date histogram aggregation using the last added aggregation for the current object. Add this date_histogram aggregation into self.aggregations :param field: the index field to create the histogram from :param period: the interval which elasticsearch supports, ex: "month", "week" and such :param timezone: custom timezone :param start: custom start date for the date histogram, default: start date under range :param end: custom end date for the date histogram, default: end date under range :returns: self, which allows the method to be chainable with the other methods