positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def recv_line(self, max_size=None, timeout='default', ending=None): """ Recieve until the next newline , default "\\n". The newline string can be changed by changing ``nc.LINE_ENDING``. The newline will be returned as part of the string. Aliases: recvline, readline, read_line, readln, recvln """ if ending is None: ending = self.LINE_ENDING return self.recv_until(ending, max_size, timeout)
Recieve until the next newline , default "\\n". The newline string can be changed by changing ``nc.LINE_ENDING``. The newline will be returned as part of the string. Aliases: recvline, readline, read_line, readln, recvln
def get_current_environment(self, note=None): """Returns the current environment id from the current shutit_pexpect_session """ shutit_global.shutit_global_object.yield_to_draw() self.handle_note(note) res = self.get_current_shutit_pexpect_session_environment().environment_id self.handle_note_after(note) return res
Returns the current environment id from the current shutit_pexpect_session
def return_markers(self): """Return all the markers (also called triggers or events). Returns ------- list of dict where each dict contains 'name' as str, 'start' and 'end' as float in seconds from the start of the recordings, and 'chan' as list of str with the channels involved (if not of relevance, it's None). Raises ------ FileNotFoundError when it cannot read the events for some reason (don't use other exceptions). """ markers = [] triggers = self._triggers DTYPE_MAX = iinfo(triggers.dtype['sample']).max triggers = triggers[triggers['sample'] != DTYPE_MAX] for trig in triggers: markers.append( {'name': str(trig['code']), 'start': trig['sample'] / self._s_freq, 'end': trig['sample'] / self._s_freq, }) return markers
Return all the markers (also called triggers or events). Returns ------- list of dict where each dict contains 'name' as str, 'start' and 'end' as float in seconds from the start of the recordings, and 'chan' as list of str with the channels involved (if not of relevance, it's None). Raises ------ FileNotFoundError when it cannot read the events for some reason (don't use other exceptions).
def show_system_monitor_output_switch_status_rbridge_id_out(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_system_monitor = ET.Element("show_system_monitor") config = show_system_monitor output = ET.SubElement(show_system_monitor, "output") switch_status = ET.SubElement(output, "switch-status") rbridge_id_out = ET.SubElement(switch_status, "rbridge-id-out") rbridge_id_out.text = kwargs.pop('rbridge_id_out') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _get_filter_ids(cls, request): """Parses the `id` filter paramter from the url query. """ id_query = request.url.query.get('id', None) if id_query is None: return None filter_ids = id_query.split(',') for filter_id in filter_ids: cls._validate_id(filter_id) return filter_ids
Parses the `id` filter paramter from the url query.
def collective_diffusion_coefficient( self ): """ Returns the collective or "jump" diffusion coefficient, D_J. Args: None Returns: (Float): The collective diffusion coefficient, D_J. """ if self.has_run: return self.atoms.collective_dr_squared() / ( 6.0 * self.lattice.time ) else: return None
Returns the collective or "jump" diffusion coefficient, D_J. Args: None Returns: (Float): The collective diffusion coefficient, D_J.
def set_volume(self, volume): """ Set receiver volume via HTTP get command. Volume is send in a format like -50.0. Minimum is -80.0, maximum at 18.0 """ if volume < -80 or volume > 18: raise ValueError("Invalid volume") try: return bool(self.send_get_command( self._urls.command_set_volume % volume)) except requests.exceptions.RequestException: _LOGGER.error("Connection error: set volume command not sent.") return False
Set receiver volume via HTTP get command. Volume is send in a format like -50.0. Minimum is -80.0, maximum at 18.0
def delete(customer, card): """ Delete a card from its id. :param customer: The customer id or object :type customer: string|Customer :param card: The card id or object :type card: string|Card """ if isinstance(customer, resources.Customer): customer = customer.id if isinstance(card, resources.Card): card = card.id http_client = HttpClient() http_client.delete(routes.url(routes.CARD_RESOURCE, resource_id=card, customer_id=customer))
Delete a card from its id. :param customer: The customer id or object :type customer: string|Customer :param card: The card id or object :type card: string|Card
def query_induction(self, nodes: List[Node]) -> List[Edge]: """Get all edges between any of the given nodes (minimum length of 2).""" if len(nodes) < 2: raise ValueError('not enough nodes given to induce over') return self.session.query(Edge).filter(self._edge_both_nodes(nodes)).all()
Get all edges between any of the given nodes (minimum length of 2).
def _typed_from_items(items): """ Construct strongly typed attributes (properties) from a dictionary of name and :class:`~exa.typed.Typed` object pairs. See Also: :func:`~exa.typed.typed` """ dct = {} for name, attr in items: if isinstance(attr, Typed): dct[name] = attr(name) return dct
Construct strongly typed attributes (properties) from a dictionary of name and :class:`~exa.typed.Typed` object pairs. See Also: :func:`~exa.typed.typed`
def remove_namespace(attribute, namespace_splitter=NAMESPACE_SPLITTER, root_only=False): """ Returns attribute with stripped foundations.namespace. Usage:: >>> remove_namespace("grandParent|parent|child") u'child' >>> remove_namespace("grandParent|parent|child", root_only=True) u'parent|child' :param attribute: Attribute. :type attribute: unicode :param namespace_splitter: Namespace splitter character. :type namespace_splitter: unicode :param root_only: Remove only root foundations.namespace. :type root_only: bool :return: Attribute without foundations.namespace. :rtype: unicode """ attribute_tokens = attribute.split(namespace_splitter) stripped_attribute = root_only and namespace_splitter.join(attribute_tokens[1:]) or \ attribute_tokens[len(attribute_tokens) - 1] LOGGER.debug("> Attribute: '{0}', stripped attribute: '{1}'.".format(attribute, stripped_attribute)) return stripped_attribute
Returns attribute with stripped foundations.namespace. Usage:: >>> remove_namespace("grandParent|parent|child") u'child' >>> remove_namespace("grandParent|parent|child", root_only=True) u'parent|child' :param attribute: Attribute. :type attribute: unicode :param namespace_splitter: Namespace splitter character. :type namespace_splitter: unicode :param root_only: Remove only root foundations.namespace. :type root_only: bool :return: Attribute without foundations.namespace. :rtype: unicode
def rule(self): """ Return finite differencing rule. The rule is for a nominal unit step size, and must be scaled later to reflect the local step size. Member methods used ------------------- _fd_matrix Member variables used --------------------- n order method """ step_ratio = self.step_ratio method = self.method if method in ('multicomplex', ) or self.n == 0: return np.ones((1,)) order, method_order = self.n - 1, self._method_order parity = self._parity(method, order, method_order) step = self._richardson_step() num_terms, ix = (order + method_order) // step, order // step fd_rules = FD_RULES.get((step_ratio, parity, num_terms)) if fd_rules is None: fd_mat = self._fd_matrix(step_ratio, parity, num_terms) fd_rules = linalg.pinv(fd_mat) FD_RULES[(step_ratio, parity, num_terms)] = fd_rules if self._flip_fd_rule: return -fd_rules[ix] return fd_rules[ix]
Return finite differencing rule. The rule is for a nominal unit step size, and must be scaled later to reflect the local step size. Member methods used ------------------- _fd_matrix Member variables used --------------------- n order method
def standardize_role(role): """Convert role text into standardized form.""" role = role.lower() if any(c in role for c in {'synthesis', 'give', 'yield', 'afford', 'product', 'preparation of'}): return 'product' return role
Convert role text into standardized form.
async def on_error(self, event_method, *args, **kwargs): """|coro| The default error handler provided by the client. By default this prints to :data:`sys.stderr` however it could be overridden to have a different implementation. Check :func:`discord.on_error` for more details. """ print('Ignoring exception in {}'.format(event_method), file=sys.stderr) traceback.print_exc()
|coro| The default error handler provided by the client. By default this prints to :data:`sys.stderr` however it could be overridden to have a different implementation. Check :func:`discord.on_error` for more details.
def grep(filename, tag, firstOccurrence=False): """Greps the line that starts with a specific `tag` string from inside a file.""" import re try: afile = open(filename, "r") except: print("Error in utils.grep(): cannot open file", filename) exit() content = None for line in afile: if re.search(tag, line): content = line.split() if firstOccurrence: break if content: if len(content) == 2: content = content[1] else: content = content[1:] afile.close() return content
Greps the line that starts with a specific `tag` string from inside a file.
def _compute_layer_name(is_defined_within_template, arn): """ Computes a unique name based on the LayerVersion Arn Format: <Name of the LayerVersion>-<Version of the LayerVersion>-<sha256 of the arn> Parameters ---------- is_defined_within_template bool True if the resource is a Ref to a resource otherwise False arn str ARN of the Resource Returns ------- str A unique name that represents the LayerVersion """ # If the Layer is defined in the template, the arn will represent the LogicalId of the LayerVersion Resource, # which does not require creating a name based on the arn. if is_defined_within_template: return arn try: _, layer_name, layer_version = arn.rsplit(':', 2) except ValueError: raise InvalidLayerVersionArn(arn + " is an Invalid Layer Arn.") return LayerVersion.LAYER_NAME_DELIMETER.join([layer_name, layer_version, hashlib.sha256(arn.encode('utf-8')).hexdigest()[0:10]])
Computes a unique name based on the LayerVersion Arn Format: <Name of the LayerVersion>-<Version of the LayerVersion>-<sha256 of the arn> Parameters ---------- is_defined_within_template bool True if the resource is a Ref to a resource otherwise False arn str ARN of the Resource Returns ------- str A unique name that represents the LayerVersion
def center_of_mass(X, Y): """Get center of mass Parameters ---------- X: 1d array X values Y: 1d array Y values Returns ------- res: number The position of the center of mass in X Notes ----- Uses least squares """ X = np.asarray(X) Y = np.asarray(Y) return (X * Y).sum() / Y.sum()
Get center of mass Parameters ---------- X: 1d array X values Y: 1d array Y values Returns ------- res: number The position of the center of mass in X Notes ----- Uses least squares
def getNumberOfRegularSamples(self): """ Returns the number of regular samples. :returns: number of regular samples :rtype: integer """ analyses = self.getRegularAnalyses() samples = [a.getRequestUID() for a in analyses] # discarding any duplicate values return len(set(samples))
Returns the number of regular samples. :returns: number of regular samples :rtype: integer
def clear(self): """Clear the displayed image.""" self._imgobj = None try: # See if there is an image on the canvas self.canvas.delete_object_by_tag(self._canvas_img_tag) self.redraw() except KeyError: pass
Clear the displayed image.
def definition_name(cls): """Helper method for creating definition name. Names will be generated to include the classes package name, scope (if the class is nested in another definition) and class name. By default, the package name for a definition is derived from its module name. However, this value can be overriden by placing a 'package' attribute in the module that contains the definition class. For example: package = 'some.alternate.package' class MyMessage(Message): ... >>> MyMessage.definition_name() some.alternate.package.MyMessage Returns: Dot-separated fully qualified name of definition. """ outer_definition_name = cls.outer_definition_name() if outer_definition_name is None: return six.text_type(cls.__name__) return u'%s.%s' % (outer_definition_name, cls.__name__)
Helper method for creating definition name. Names will be generated to include the classes package name, scope (if the class is nested in another definition) and class name. By default, the package name for a definition is derived from its module name. However, this value can be overriden by placing a 'package' attribute in the module that contains the definition class. For example: package = 'some.alternate.package' class MyMessage(Message): ... >>> MyMessage.definition_name() some.alternate.package.MyMessage Returns: Dot-separated fully qualified name of definition.
def copy(self, invert=False): """Return a copy of the current instance Parameters ---------- invert: bool The copy will be inverted w.r.t. the original """ if invert: inverted = not self.inverted else: inverted = self.inverted return PolygonFilter(axes=self.axes, points=self.points, name=self.name, inverted=inverted)
Return a copy of the current instance Parameters ---------- invert: bool The copy will be inverted w.r.t. the original
def view_portfolio_losses(token, dstore): """ The losses for the full portfolio, for each realization and loss type, extracted from the event loss table. """ oq = dstore['oqparam'] loss_dt = oq.loss_dt() data = portfolio_loss(dstore).view(loss_dt)[:, 0] rlzids = [str(r) for r in range(len(data))] array = util.compose_arrays(numpy.array(rlzids), data, 'rlz') # this is very sensitive to rounding errors, so I am using a low precision return rst_table(array, fmt='%.5E')
The losses for the full portfolio, for each realization and loss type, extracted from the event loss table.
def rebuildtable(cls): """Regenerate the entire closuretree.""" cls._closure_model.objects.all().delete() cls._closure_model.objects.bulk_create([cls._closure_model( parent_id=x['pk'], child_id=x['pk'], depth=0 ) for x in cls.objects.values("pk")]) for node in cls.objects.all(): node._closure_createlink()
Regenerate the entire closuretree.
def min_cvar(self, s=10000, beta=0.95, random_state=None): """ Find the portfolio weights that minimises the CVaR, via Monte Carlo sampling from the return distribution. :param s: number of bootstrap draws, defaults to 10000 :type s: int, optional :param beta: "significance level" (i. 1 - q), defaults to 0.95 :type beta: float, optional :param random_state: seed for random sampling, defaults to None :type random_state: int, optional :return: asset weights for the Sharpe-maximising portfolio :rtype: dict """ args = (self.returns, s, beta, random_state) result = noisyopt.minimizeSPSA( objective_functions.negative_cvar, args=args, bounds=self.bounds, x0=self.initial_guess, niter=1000, paired=False, ) self.weights = self.normalize_weights(result["x"]) return dict(zip(self.tickers, self.weights))
Find the portfolio weights that minimises the CVaR, via Monte Carlo sampling from the return distribution. :param s: number of bootstrap draws, defaults to 10000 :type s: int, optional :param beta: "significance level" (i. 1 - q), defaults to 0.95 :type beta: float, optional :param random_state: seed for random sampling, defaults to None :type random_state: int, optional :return: asset weights for the Sharpe-maximising portfolio :rtype: dict
def maxId(self): """int: current max id of objects""" if len(self.model.db) == 0: return 0 return max(map(lambda obj: obj["id"], self.model.db))
int: current max id of objects
def __write_p4settings(self, config): """ write perforce settings """ self.logger.info("Writing p4settings...") root_dir = os.path.expanduser(config.get('root_path')) p4settings_path = os.path.join(root_dir, ".p4settings") if os.path.exists(p4settings_path): if self.target.get('overwrite_p4settings', False): self.logger.info("Overwriting existing p4settings...") os.remove(p4settings_path) else: return with open(p4settings_path, "w+") as p4settings_file: p4settings_file.write(p4settings_template % config.to_dict()) if config.get('write_password_p4settings', 'no'): p4settings_file.write("\nP4PASSWD=%s" % config['password'])
write perforce settings
async def unformat(self): """Unformat this partition.""" self._data = await self._handler.unformat( system_id=self.block_device.node.system_id, device_id=self.block_device.id, id=self.id)
Unformat this partition.
def _product(*args, **kwds): """ Generates cartesian product of lists given as arguments From itertools.product documentation """ pools = map(tuple, args) * kwds.get('repeat', 1) result = [[]] for pool in pools: result = [x+[y] for x in result for y in pool] return result
Generates cartesian product of lists given as arguments From itertools.product documentation
def default_spec(self, manager): """ Given the current release-lines structure, return a default Spec. Specifics: * For feature-like issues, only the highest major release is used, so given a ``manager`` with top level keys of ``[1, 2]``, this would return ``Spec(">=2")``. * When ``releases_always_forwardport_features`` is ``True``, that behavior is nullified, and this function always returns the empty ``Spec`` (which matches any and all versions/lines). * For bugfix-like issues, we only consider major release families which have actual releases already. * Thus the core difference here is that features are 'consumed' by upcoming major releases, and bugfixes are not. * When the ``unstable_prehistory`` setting is ``True``, the default spec starts at the oldest non-zero release line. (Otherwise, issues posted after prehistory ends would try being added to the 0.x part of the tree, which makes no sense in unstable-prehistory mode.) """ # TODO: I feel like this + the surrounding bits in add_to_manager() # could be consolidated & simplified... specstr = "" # Make sure truly-default spec skips 0.x if prehistory was unstable. stable_families = manager.stable_families if manager.config.releases_unstable_prehistory and stable_families: specstr = ">={}".format(min(stable_families)) if self.is_featurelike: # TODO: if app->config-><releases_always_forwardport_features or # w/e if True: specstr = ">={}".format(max(manager.keys())) else: # Can only meaningfully limit to minor release buckets if they # actually exist yet. buckets = self.minor_releases(manager) if buckets: specstr = ">={}".format(max(buckets)) return Spec(specstr) if specstr else Spec()
Given the current release-lines structure, return a default Spec. Specifics: * For feature-like issues, only the highest major release is used, so given a ``manager`` with top level keys of ``[1, 2]``, this would return ``Spec(">=2")``. * When ``releases_always_forwardport_features`` is ``True``, that behavior is nullified, and this function always returns the empty ``Spec`` (which matches any and all versions/lines). * For bugfix-like issues, we only consider major release families which have actual releases already. * Thus the core difference here is that features are 'consumed' by upcoming major releases, and bugfixes are not. * When the ``unstable_prehistory`` setting is ``True``, the default spec starts at the oldest non-zero release line. (Otherwise, issues posted after prehistory ends would try being added to the 0.x part of the tree, which makes no sense in unstable-prehistory mode.)
def error_response(self, code, content=''): """Construct and send error response.""" self.send_response(code) self.send_header('Content-Type', 'text/xml') self.add_compliance_header() self.end_headers() self.wfile.write(content)
Construct and send error response.
def inflate(self, value): """ Handles the marshalling from Neo4J POINT to NeomodelPoint :param value: Value returned from the database :type value: Neo4J POINT :return: NeomodelPoint """ if not isinstance(value,neo4j.types.spatial.Point): raise TypeError('Invalid datatype to inflate. Expected POINT datatype, received {}'.format(type(value))) try: value_point_crs = SRID_TO_CRS[value.srid] except KeyError: raise ValueError('Invalid SRID to inflate. ' 'Expected one of {}, received {}'.format(SRID_TO_CRS.keys(), value.srid)) if self._crs != value_point_crs: raise ValueError('Invalid CRS. ' 'Expected POINT defined over {}, received {}'.format(self._crs, value_point_crs)) # cartesian if value.srid == 7203: return NeomodelPoint(x=value.x, y=value.y) # cartesian-3d elif value.srid == 9157: return NeomodelPoint(x=value.x, y=value.y, z=value.z) # wgs-84 elif value.srid == 4326: return NeomodelPoint(longitude=value.longitude, latitude=value.latitude) # wgs-83-3d elif value.srid == 4979: return NeomodelPoint(longitude=value.longitude, latitude=value.latitude, height=value.height)
Handles the marshalling from Neo4J POINT to NeomodelPoint :param value: Value returned from the database :type value: Neo4J POINT :return: NeomodelPoint
def get_cert_types(self): """ Collect the certificate types that are available to the customer. :return: A list of dictionaries of certificate types :rtype: list """ result = self.client.service.getCustomerCertTypes(authData=self.auth) if result.statusCode == 0: return jsend.success({'cert_types': result.types}) else: return self._create_error(result.statusCode)
Collect the certificate types that are available to the customer. :return: A list of dictionaries of certificate types :rtype: list
def bump_repos_version(module_name, new_version, local_only): """ Changes the pinned version number in the requirements files of all repos which have the specified Python module as a dependency. This script assumes that GITHUB_TOKEN is set for GitHub authentication. """ # Make the cloning directory and change directories into it. tmp_dir = tempfile.mkdtemp(dir=os.getcwd()) # Iterate through each repository. for owner, repo_name in REPOS_TO_CHANGE: repo_url = REPO_URL_FORMAT.format(owner, repo_name) gh = GitHubApiUtils(owner, repo_name) os.chdir(tmp_dir) # Clone the repo. ret_code = subprocess.call(['git', 'clone', '{}.git'.format(repo_url)]) if ret_code: logging.error('Failed to clone repo {}'.format(repo_url)) continue # Change into the cloned repo dir. os.chdir(repo_name) # Create a branch, using the version number. branch_name = '{}/{}'.format(module_name, new_version) ret_code = subprocess.call(['git', 'checkout', '-b', branch_name]) if ret_code: logging.error('Failed to create branch in repo {}'.format(repo_url)) continue # Search through all TXT files to find all lines with the module name, changing the pinned version. files_changed = False for root, _dirs, files in os.walk('.'): for file in files: if file.endswith('.txt') and (('requirements' in file) or ('requirements' in root)): found = False filepath = os.path.join(root, file) with open(filepath) as f: if '{}=='.format(module_name) in f.read(): found = True if found: files_changed = True # Change the file in-place. for line in fileinput.input(filepath, inplace=True): if '{}=='.format(module_name) in line: print '{}=={}'.format(module_name, new_version) else: print line, if not files_changed: # Module name wasn't found in the requirements files. logging.info("Module name '{}' not found in repo {} - skipping.".format(module_name, repo_url)) continue # Add/commit the files. ret_code = subprocess.call(['git', 'commit', '-am', 'Updating {} requirement to version {}'.format(module_name, new_version)]) if ret_code: logging.error("Failed to add and commit changed files to repo {}".format(repo_url)) continue if local_only: # For local_only, don't push the branch to the remote and create the PR - leave all changes local for review. continue # Push the branch. ret_code = subprocess.call(['git', 'push', '--set-upstream', 'origin', branch_name]) if ret_code: logging.error("Failed to push branch {} upstream for repo {}".format(branch_name, repo_url)) continue # Create a PR with an automated message. rollback_branch_push = False try: # The GitHub "mention" below does not work via the API - unfortunately... response = gh.create_pull( title='Change {} version.'.format(module_name), body='Change the required version of {} to {}.\n\n@edx-ops/pipeline-team Please review and tag appropriate parties.'.format(module_name, new_version), head=branch_name, base='master' ) except: logging.error('Failed to create PR for repo {} - did you set GITHUB_TOKEN?'.format(repo_url)) rollback_branch_push = True else: logging.info('Created PR #{} for repo {}: {}'.format(response.number, repo_url, response.html_url)) if rollback_branch_push: # Since the PR creation failed, delete the branch in the remote repo as well. ret_code = subprocess.call(['git', 'push', 'origin', '--delete', branch_name]) if ret_code: logging.error("ROLLBACK: Failed to delete upstream branch {} for repo {}".format(branch_name, repo_url)) if not local_only: # Remove the temp directory containing all the cloned repos. shutil.rmtree(tmp_dir)
Changes the pinned version number in the requirements files of all repos which have the specified Python module as a dependency. This script assumes that GITHUB_TOKEN is set for GitHub authentication.
def advance_shards(self): """Poll active shards for records and insert them into the buffer. Rotate exhausted shards. Returns immediately if the buffer isn't empty. """ # Don't poll shards when there are pending records. if self.buffer: return # 0) Collect new records from all active shards. record_shard_pairs = [] for shard in self.active: records = next(shard) if records: record_shard_pairs.extend((record, shard) for record in records) self.buffer.push_all(record_shard_pairs) self.migrate_closed_shards()
Poll active shards for records and insert them into the buffer. Rotate exhausted shards. Returns immediately if the buffer isn't empty.
def complete_automaton(self): """ Adds missing transition states such that δ(q, u) is defined for every state q and any u ∈ S """ self.term_state = object() self.Q.add(self.term_state) for tv in self.Q: for u in self.S: try: self.transition[tv][u] except: self.add_transition(tv, u, self.term_state) for u in self.S: self.add_transition(self.term_state, u, self.term_state)
Adds missing transition states such that δ(q, u) is defined for every state q and any u ∈ S
def connectionMade(self): """Send a HTTP POST command with the appropriate CIM over HTTP headers and payload.""" self.factory.request_xml = str(self.factory.payload) self.sendCommand('POST', '/cimom') self.sendHeader('Host', '%s:%d' % (self.transport.addr[0], self.transport.addr[1])) self.sendHeader('User-Agent', 'pywbem/twisted') self.sendHeader('Content-length', len(self.factory.payload)) self.sendHeader('Content-type', 'application/xml') if self.factory.creds: auth = base64.b64encode('%s:%s' % (self.factory.creds[0], self.factory.creds[1])) self.sendHeader('Authorization', 'Basic %s' % auth) self.sendHeader('CIMOperation', str(self.factory.operation)) self.sendHeader('CIMMethod', str(self.factory.method)) self.sendHeader('CIMObject', str(self.factory.object)) self.endHeaders() # TODO: Figure out why twisted doesn't support unicode. An # exception should be thrown by the str() call if the payload # can't be converted to the current codepage. self.transport.write(str(self.factory.payload))
Send a HTTP POST command with the appropriate CIM over HTTP headers and payload.
def generate_network(user=None, reset=False): """ Assemble the network connections for a given user """ token = collect_token() try: gh = login(token=token) root_user = gh.user(user) except Exception, e: # Failed to login using the token, github3.models.GitHubError raise e graph_nodes = [] graph_edges = [] username = user if user is not None else root_user.login if not is_cached(username_to_file(username)) or reset: graph_nodes.append(username) # @TODO: take care of the 'rate limit exceeding' if imposed try: for person in gh.iter_following(username): graph_nodes.append(str(person)) graph_edges.append((root_user.login, str(person))) for i in range(1, root_user.following): user = gh.user(graph_nodes[i]) user_following_edges = [(user.login, str(person)) for person in gh.iter_following( user) if str(person) in graph_nodes] graph_edges += user_following_edges except Exception, e: raise e generate_gml(username, graph_nodes, graph_edges, True) else: reuse_gml(username) return username
Assemble the network connections for a given user
def _readXputMaps(self, mapCards, directory, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None): """ GSSHA Project Read Map Files from File Method """ if self.mapType in self.MAP_TYPES_SUPPORTED: for card in self.projectCards: if (card.name in mapCards) and self._noneOrNumValue(card.value): filename = card.value.strip('"') # Invoke read method on each map self._invokeRead(fileIO=RasterMapFile, directory=directory, filename=filename, session=session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile) else: for card in self.projectCards: if (card.name in mapCards) and self._noneOrNumValue(card.value): filename = card.value.strip('"') fileExtension = filename.split('.')[1] if fileExtension in self.ALWAYS_READ_AND_WRITE_MAPS: # Invoke read method on each map self._invokeRead(fileIO=RasterMapFile, directory=directory, filename=filename, session=session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile) log.warning('Could not read map files. ' 'MAP_TYPE {0} not supported.'.format(self.mapType))
GSSHA Project Read Map Files from File Method
def get_class(schema_name): """ Retrieve the message class associated with the schema name. If no match is found, the default schema is returned and a warning is logged. Args: schema_name (six.text_type): The name of the :class:`Message` sub-class; this is typically the Python path. Returns: Message: A sub-class of :class:`Message` to create the message from. """ global _registry_loaded if not _registry_loaded: load_message_classes() try: return _schema_name_to_class[schema_name] except KeyError: _log.warning( 'The schema "%s" is not in the schema registry! Either install ' "the package with its schema definition or define a schema. " "Falling back to the default schema...", schema_name, ) return Message
Retrieve the message class associated with the schema name. If no match is found, the default schema is returned and a warning is logged. Args: schema_name (six.text_type): The name of the :class:`Message` sub-class; this is typically the Python path. Returns: Message: A sub-class of :class:`Message` to create the message from.
def load(cls, fpath): """Loads a module and returns its object. :param str|unicode fpath: :rtype: module """ module_name = os.path.splitext(os.path.basename(fpath))[0] sys.path.insert(0, os.path.dirname(fpath)) try: module = import_module(module_name) finally: sys.path = sys.path[1:] return module
Loads a module and returns its object. :param str|unicode fpath: :rtype: module
def _decode_lines(geom): """ Decode a linear MVT geometry into a list of Lines. Each individual linestring in the MVT is extracted to a separate entry in the list of lines. """ lines = [] current_line = [] current_moveto = None # to keep track of the position. we'll adapt the move-to commands to all # be relative to 0,0 at the beginning of each linestring. x = 0 y = 0 end = len(geom) i = 0 while i < end: header = geom[i] cmd = header & 7 run_length = header // 8 if cmd == 1: # move to # flush previous line. if current_moveto: lines.append(Line(current_moveto, EndsAt(x, y), current_line)) current_line = [] assert run_length == 1 x += unzigzag(geom[i+1]) y += unzigzag(geom[i+2]) i += 3 current_moveto = MoveTo(x, y) elif cmd == 2: # line to assert current_moveto # we just copy this run, since it's encoding isn't going to change next_i = i + 1 + run_length * 2 current_line.extend(geom[i:next_i]) # but we still need to decode it to figure out where each move-to # command is in absolute space. for j in xrange(0, run_length): dx = unzigzag(geom[i + 1 + 2 * j]) dy = unzigzag(geom[i + 2 + 2 * j]) x += dx y += dy i = next_i else: raise ValueError('Unhandled command: %d' % cmd) if current_line: assert current_moveto lines.append(Line(current_moveto, EndsAt(x, y), current_line)) return lines
Decode a linear MVT geometry into a list of Lines. Each individual linestring in the MVT is extracted to a separate entry in the list of lines.
def _get_connection_state(self, conn_or_int_id): """Get a connection's state by either conn_id or internal_id This routine must only be called from the internal worker thread. Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id """ key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: raise ArgumentError("You must supply either an int connection id or a string internal id to _get_connection_state", id=key) if key not in table: return self.Disconnected data = table[key] return data['state']
Get a connection's state by either conn_id or internal_id This routine must only be called from the internal worker thread. Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id
def lookup(alias): """ Tries to find a matcher callable associated to the given alias. If an exact match does not exists it will try normalizing it and even removing underscores to find one. """ if alias in matchers: return matchers[alias] else: norm = normalize(alias) if norm in normalized: alias = normalized[norm] return matchers[alias] # Check without snake case if -1 != alias.find('_'): norm = normalize(alias).replace('_', '') return lookup(norm) return None
Tries to find a matcher callable associated to the given alias. If an exact match does not exists it will try normalizing it and even removing underscores to find one.
def _cron_matched(cron, cmd, identifier=None): '''Check if: - we find a cron with same cmd, old state behavior - but also be smart enough to remove states changed crons where we do not removed priorly by a cron.absent by matching on the provided identifier. We assure retrocompatibility by only checking on identifier if and only if an identifier was set on the serialized crontab ''' ret, id_matched = False, None cid = _cron_id(cron) if cid: if not identifier: identifier = SALT_CRON_NO_IDENTIFIER eidentifier = _ensure_string(identifier) # old style second round # after saving crontab, we must check that if # we have not the same command, but the default id # to not set that as a match if ( cron.get('cmd', None) != cmd and cid == SALT_CRON_NO_IDENTIFIER and eidentifier == SALT_CRON_NO_IDENTIFIER ): id_matched = False else: # on saving, be sure not to overwrite a cron # with specific identifier but also track # crons where command is the same # but with the default if that we gonna overwrite if ( cron.get('cmd', None) == cmd and cid == SALT_CRON_NO_IDENTIFIER and identifier ): cid = eidentifier id_matched = eidentifier == cid if ( ((id_matched is None) and cmd == cron.get('cmd', None)) or id_matched ): ret = True return ret
Check if: - we find a cron with same cmd, old state behavior - but also be smart enough to remove states changed crons where we do not removed priorly by a cron.absent by matching on the provided identifier. We assure retrocompatibility by only checking on identifier if and only if an identifier was set on the serialized crontab
def find_snapshots(network, carrier, maximum = True, minimum = True, n = 3): """ Function that returns snapshots with maximum and/or minimum feed-in of selected carrier. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA carrier: str Selected carrier of generators maximum: bool Choose if timestep of maximal feed-in is returned. minimum: bool Choose if timestep of minimal feed-in is returned. n: int Number of maximal/minimal snapshots Returns ------- calc_snapshots : 'pandas.core.indexes.datetimes.DatetimeIndex' List containing snapshots """ if carrier == 'residual load': power_plants = network.generators[network.generators.carrier. isin(['solar', 'wind', 'wind_onshore'])] power_plants_t = network.generators.p_nom[power_plants.index] * \ network.generators_t.p_max_pu[power_plants.index] load = network.loads_t.p_set.sum(axis=1) all_renew = power_plants_t.sum(axis=1) all_carrier = load - all_renew if carrier in ('solar', 'wind', 'wind_onshore', 'wind_offshore', 'run_of_river'): power_plants = network.generators[network.generators.carrier == carrier] power_plants_t = network.generators.p_nom[power_plants.index] * \ network.generators_t.p_max_pu[power_plants.index] all_carrier = power_plants_t.sum(axis=1) if maximum and not minimum: times = all_carrier.sort_values().head(n=n) if minimum and not maximum: times = all_carrier.sort_values().tail(n=n) if maximum and minimum: times = all_carrier.sort_values().head(n=n) times = times.append(all_carrier.sort_values().tail(n=n)) calc_snapshots = all_carrier.index[all_carrier.index.isin(times.index)] return calc_snapshots
Function that returns snapshots with maximum and/or minimum feed-in of selected carrier. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA carrier: str Selected carrier of generators maximum: bool Choose if timestep of maximal feed-in is returned. minimum: bool Choose if timestep of minimal feed-in is returned. n: int Number of maximal/minimal snapshots Returns ------- calc_snapshots : 'pandas.core.indexes.datetimes.DatetimeIndex' List containing snapshots
def write_tables(target, tables, append=False, overwrite=False, **kwargs): """Write an LIGO_LW table to file Parameters ---------- target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into tables : `list`, `tuple` of :class:`~ligo.lw.table.Table` the tables to write append : `bool`, optional, default: `False` if `True`, append to an existing file/table, otherwise `overwrite` overwrite : `bool`, optional, default: `False` if `True`, delete an existing instance of the table type, otherwise append new rows **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate """ from ligo.lw.ligolw import (Document, LIGO_LW, LIGOLWContentHandler) from ligo.lw import utils as ligolw_utils # allow writing directly to XML if isinstance(target, (Document, LIGO_LW)): xmldoc = target # open existing document, if possible elif append: xmldoc = open_xmldoc( target, contenthandler=kwargs.pop('contenthandler', LIGOLWContentHandler)) # fail on existing document and not overwriting elif (not overwrite and isinstance(target, string_types) and os.path.isfile(target)): raise IOError("File exists: {}".format(target)) else: # or create a new document xmldoc = Document() # convert table to format write_tables_to_document(xmldoc, tables, overwrite=overwrite) # write file if isinstance(target, string_types): kwargs.setdefault('gz', target.endswith('.gz')) ligolw_utils.write_filename(xmldoc, target, **kwargs) elif isinstance(target, FILE_LIKE): kwargs.setdefault('gz', target.name.endswith('.gz')) ligolw_utils.write_fileobj(xmldoc, target, **kwargs)
Write an LIGO_LW table to file Parameters ---------- target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into tables : `list`, `tuple` of :class:`~ligo.lw.table.Table` the tables to write append : `bool`, optional, default: `False` if `True`, append to an existing file/table, otherwise `overwrite` overwrite : `bool`, optional, default: `False` if `True`, delete an existing instance of the table type, otherwise append new rows **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate
def double_prompt_for_plaintext_password(): """Get the desired password from the user through a double prompt.""" password = 1 password_repeat = 2 while password != password_repeat: password = getpass.getpass('Enter password: ') password_repeat = getpass.getpass('Repeat password: ') if password != password_repeat: sys.stderr.write('Passwords do not match, try again.\n') return password
Get the desired password from the user through a double prompt.
def DataProcessorsDelete(self, dataProcessorId): """ Delete a data processor in CommonSense. @param dataProcessorId - The id of the data processor that will be deleted. @return (bool) - Boolean indicating whether GroupsPost was successful. """ if self.__SenseApiCall__('/dataprocessors/{id}.json'.format(id = dataProcessorId), 'DELETE'): return True else: self.__error__ = "api call unsuccessful" return False
Delete a data processor in CommonSense. @param dataProcessorId - The id of the data processor that will be deleted. @return (bool) - Boolean indicating whether GroupsPost was successful.
def update(self, instance, validated_data): """ change password """ instance.user.set_password(validated_data["password1"]) instance.user.full_clean() instance.user.save() # mark password reset object as reset instance.reset = True instance.full_clean() instance.save() return instance
change password
def train(ctx, output, corpus, clusters): """Train POS Tagger.""" click.echo('chemdataextractor.pos.train') click.echo('Output: %s' % output) click.echo('Corpus: %s' % corpus) click.echo('Clusters: %s' % clusters) wsj_sents = [] genia_sents = [] if corpus == 'wsj' or corpus == 'wsj+genia': wsj_sents = list(wsj_training.tagged_sents()) # For WSJ, remove all tokens with -NONE- tag for i, wsj_sent in enumerate(wsj_sents): wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-'] if corpus == 'genia' or corpus == 'wsj+genia': genia_sents = list(genia_training.tagged_sents()) # Translate GENIA for i, genia_sent in enumerate(genia_sents): for j, (token, tag) in enumerate(genia_sent): if tag == '(': genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation) elif tag == ')': genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation) elif tag == 'CT': genia_sents[i][j] = (token, 'DT') # Typo? elif tag == 'XT': genia_sents[i][j] = (token, 'DT') # Typo? elif tag == '-': genia_sents[i][j] = (token, ':') # Single hyphen character for dash elif tag == 'N': genia_sents[i][j] = (token, 'NN') # Typo? elif tag == 'PP': genia_sents[i][j] = (token, 'PRP') # Typo? elif tag == '' and token == ')': genia_sents[i][j] = (token, '-RRB-') # Typo? elif tag == '' and token == 'IFN-gamma': genia_sents[i][j] = (token, 'NN') # Typo? elif '|' in tag: genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part # Filter any tags not in the allowed tagset (Shouldn't be any left anyway) genia_sents[i] = [t for t in genia_sent if t[1] in TAGS] if corpus == 'wsj': training_corpus = wsj_sents elif corpus == 'genia': training_corpus = genia_sents elif corpus == 'wsj+genia': training_corpus = wsj_sents + genia_sents else: raise click.ClickException('Invalid corpus') tagger = ChemCrfPosTagger(clusters=clusters) tagger.train(training_corpus, output)
Train POS Tagger.
def _get_response(self, params): """ wrap the call to the requests package """ return self._session.get( self._api_url, params=params, timeout=self._timeout ).json(encoding="utf8")
wrap the call to the requests package
def dest_path(self): """ :return: The destination path. :rtype: str """ if os.path.isabs(self.config.local_path): return self.config.local_path else: return os.path.normpath(os.path.join( os.getcwd(), self.config.local_path ))
:return: The destination path. :rtype: str
def _create_put_request(self, resource, billomat_id, command=None, send_data=None): """ Creates a put request and return the response data """ assert (isinstance(resource, str)) if isinstance(billomat_id, int): billomat_id = str(billomat_id) if not command: command = '' else: command = '/' + command response = self.session.put( url=self.api_url + resource + '/' + billomat_id + command, data=json.dumps(send_data), ) return self._handle_response(response)
Creates a put request and return the response data
def extract_months(time, months): """Extract times within specified months of the year. Parameters ---------- time : xarray.DataArray Array of times that can be represented by numpy.datetime64 objects (i.e. the year is between 1678 and 2262). months : Desired months of the year to include Returns ------- xarray.DataArray of the desired times """ inds = _month_conditional(time, months) return time.sel(time=inds)
Extract times within specified months of the year. Parameters ---------- time : xarray.DataArray Array of times that can be represented by numpy.datetime64 objects (i.e. the year is between 1678 and 2262). months : Desired months of the year to include Returns ------- xarray.DataArray of the desired times
def _get_library_root_key_for_os_path(self, path): """Return library root key if path is within library root paths""" path = os.path.realpath(path) library_root_key = None for library_root_key, library_root_path in self._library_root_paths.items(): rel_path = os.path.relpath(path, library_root_path) if rel_path.startswith('..'): library_root_key = None continue else: break return library_root_key
Return library root key if path is within library root paths
def _parse_config(self, config): """ Parses a tensorflow configuration """ self._batch_size = config['batch_size'] self._im_height = config['im_height'] self._im_width = config['im_width'] self._num_channels = config['channels'] self._output_layer = config['out_layer'] self._feature_layer = config['feature_layer'] self._out_size = None if 'out_size' in config.keys(): self._out_size = config['out_size'] self._input_arr = np.zeros([self._batch_size, self._im_height, self._im_width, self._num_channels]) if self._model_dir is None: self._net_data = np.load(config['caffe_weights']).item() self._mean = np.load(config['mean_file']) self._model_filename = None else: self._net_data = None self._mean = np.load(os.path.join(self._model_dir, 'mean.npy')) self._model_filename = os.path.join(self._model_dir, 'model.ckpt')
Parses a tensorflow configuration
def get_sys_info(): "Returns system information as a dict" blob = [] # commit = cc._git_hash # blob.append(('commit', commit)) try: (sysname, nodename, release, version, machine, processor) = platform.uname() blob.extend([ ("python", "%d.%d.%d.%s.%s" % sys.version_info[:]), ("python-bits", struct.calcsize("P") * 8), ("OS", "%s" % (sysname)), ("OS-release", "%s" % (release)), # ("Version", "%s" % (version)), ("machine", "%s" % (machine)), ("processor", "%s" % (processor)), # ("byteorder", "%s" % sys.byteorder), ("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")), ("LANG", "%s" % os.environ.get('LANG', "None")), ("LOCALE", "%s.%s" % locale.getlocale()), ]) except Exception: pass return blob
Returns system information as a dict
def remove_routes(self, item, routes): """Removes item from matching routes""" for route in routes: items = self._routes.get(route) try: items.remove(item) LOG.debug('removed item from route %s', route) except ValueError: pass if not items: self._routes.pop(route) LOG.debug('removed route %s', route)
Removes item from matching routes
def _import(self, record_key, record_data, overwrite=True, encryption='', last_modified=0.0, **kwargs): ''' a helper method for other storage clients to import into s3 :param record_key: string with key for record :param record_data: byte data for body of record :param overwrite: [optional] boolean to overwrite existing records :param encryption: [optional] string with encryption type add to metadata :param kwargs: [optional] keyword arguments from other import methods :return: boolean indicating whether record was imported ''' # define keyword arguments from time import time create_kwargs = { 'bucket_name': self.bucket_name, 'record_key': record_key, 'record_data': record_data, 'overwrite': overwrite, 'record_metadata': { 'last_modified': str(time()) } } # add encryption and last_modified if encryption: create_kwargs['record_metadata']['encryption'] = encryption if last_modified: create_kwargs['record_metadata']['last_modified'] = str(last_modified) # add record mimetype and encoding import mimetypes guess_mimetype, guess_encoding = mimetypes.guess_type(record_key) if not guess_mimetype: if record_key.find('.yaml') or record_key.find('.yml'): guess_mimetype = 'application/x-yaml' if record_key.find('.drep'): guess_mimetype = 'application/x-drep' if guess_mimetype: create_kwargs['record_mimetype'] = guess_mimetype if guess_encoding: create_kwargs['record_encoding'] = guess_encoding # create record try: self.s3.create_record(**create_kwargs) except ValueError as err: if str(err).find('already contains') > -1: self.s3.iam.printer('%s already exists in %s collection. Skipping.' % (record_key, self.bucket_name)) return False # elif str(err).find('exceeds maximum record') > -1: # self.s3.iam.printer('%s exceeds the maximum size for files on S3. Skipping.' % record_key) else: raise except: raise return True
a helper method for other storage clients to import into s3 :param record_key: string with key for record :param record_data: byte data for body of record :param overwrite: [optional] boolean to overwrite existing records :param encryption: [optional] string with encryption type add to metadata :param kwargs: [optional] keyword arguments from other import methods :return: boolean indicating whether record was imported
def memoize(func): """ Decorator to cause a function to cache it's results for each combination of inputs and return the cached result on subsequent calls. Does not support named arguments or arg values that are not hashable. >>> @memoize ... def foo(x): ... print('running function with', x) ... return x+3 ... >>> foo(10) running function with 10 13 >>> foo(10) 13 >>> foo(11) running function with 11 14 >>> @memoize ... def range_tuple(limit): ... print('running function') ... return tuple(i for i in range(limit)) ... >>> range_tuple(3) running function (0, 1, 2) >>> range_tuple(3) (0, 1, 2) >>> @memoize ... def range_iter(limit): ... print('running function') ... return (i for i in range(limit)) ... >>> range_iter(3) Traceback (most recent call last): TypeError: Can't memoize a generator or non-hashable object! """ func._result_cache = {} # pylint: disable-msg=W0212 @wraps(func) def _memoized_func(*args, **kwargs): key = (args, tuple(sorted(kwargs.items()))) if key in func._result_cache: # pylint: disable-msg=W0212 return func._result_cache[key] # pylint: disable-msg=W0212 else: result = func(*args, **kwargs) if isinstance(result, GeneratorType) or not isinstance(result, Hashable): raise TypeError("Can't memoize a generator or non-hashable object!") func._result_cache[key] = result # pylint: disable-msg=W0212 return result return _memoized_func
Decorator to cause a function to cache it's results for each combination of inputs and return the cached result on subsequent calls. Does not support named arguments or arg values that are not hashable. >>> @memoize ... def foo(x): ... print('running function with', x) ... return x+3 ... >>> foo(10) running function with 10 13 >>> foo(10) 13 >>> foo(11) running function with 11 14 >>> @memoize ... def range_tuple(limit): ... print('running function') ... return tuple(i for i in range(limit)) ... >>> range_tuple(3) running function (0, 1, 2) >>> range_tuple(3) (0, 1, 2) >>> @memoize ... def range_iter(limit): ... print('running function') ... return (i for i in range(limit)) ... >>> range_iter(3) Traceback (most recent call last): TypeError: Can't memoize a generator or non-hashable object!
def pool_create(self, name, description, is_public): """Function to create a pool (Require login) (UNTESTED). Parameters: name (str): The name. description (str): A description of the pool. is_public (int): 1 or 0, whether or not the pool is public. """ params = {'pool[name]': name, 'pool[description]': description, 'pool[is_public]': is_public} return self._get('pool/create', params, method='POST')
Function to create a pool (Require login) (UNTESTED). Parameters: name (str): The name. description (str): A description of the pool. is_public (int): 1 or 0, whether or not the pool is public.
def render_field_description(field): """ Render a field description as HTML. """ if hasattr(field, 'description') and field.description != '': html = """<p class="help-block">{field.description}</p>""" html = html.format( field=field ) return HTMLString(html) return ''
Render a field description as HTML.
def check_runtime_remaining(the_session, the_helper, the_snmp_value): """ OID .1.3.6.1.4.1.318.1.1.1.2.2.3.0 MIB excerpt The UPS battery run time remaining before battery exhaustion. SNMP value is in TimeTicks aka hundredths of a second """ a_minute_value = calc_minutes_from_ticks(the_snmp_value) the_helper.add_metric( label=the_helper.options.type, value=a_minute_value, warn=the_helper.options.warning, crit=the_helper.options.critical, uom="Minutes") the_helper.check_all_metrics() the_helper.set_summary("Remaining runtime on battery is {} minutes".format(a_minute_value))
OID .1.3.6.1.4.1.318.1.1.1.2.2.3.0 MIB excerpt The UPS battery run time remaining before battery exhaustion. SNMP value is in TimeTicks aka hundredths of a second
def _varx(ins): """ Defines a memory space with a default CONSTANT expression 1st parameter is the var name 2nd parameter is the type-size (u8 or i8 for byte, u16 or i16 for word, etc) 3rd parameter is the list of expressions. All of them will be converted to the type required. """ output = [] output.append('%s:' % ins.quad[1]) q = eval(ins.quad[3]) if ins.quad[2] in ('i8', 'u8'): size = 'B' elif ins.quad[2] in ('i16', 'u16'): size = 'W' elif ins.quad[2] in ('i32', 'u32'): size = 'W' z = list() for expr in q: z.extend(['(%s) & 0xFFFF' % expr, '(%s) >> 16' % expr]) q = z else: raise InvalidIC(ins.quad, 'Unimplemented vard size: %s' % ins.quad[2]) for x in q: output.append('DEF%s %s' % (size, x)) return output
Defines a memory space with a default CONSTANT expression 1st parameter is the var name 2nd parameter is the type-size (u8 or i8 for byte, u16 or i16 for word, etc) 3rd parameter is the list of expressions. All of them will be converted to the type required.
def read_data(self, variable_instance): """ read values from the device """ if self.inst is None: return if variable_instance.visavariable.device_property.upper() == 'vrms_chan1': return self.parse_value(self.inst.query(':MEAS:ITEM? VRMS,CHAN1')) return None
read values from the device
def get_visible_elements(self, locator, params=None, timeout=None): """ Get elements both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance """ return self.get_present_elements(locator, params, timeout, True)
Get elements both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance
def mass1_from_mass2_eta(mass2, eta, force_real=True): """Returns the primary mass from the secondary mass and symmetric mass ratio. """ return mass_from_knownmass_eta(mass2, eta, known_is_secondary=True, force_real=force_real)
Returns the primary mass from the secondary mass and symmetric mass ratio.
def from_client_config(cls, client_config, scopes, **kwargs): """Creates a :class:`requests_oauthlib.OAuth2Session` from client configuration loaded from a Google-format client secrets file. Args: client_config (Mapping[str, Any]): The client configuration in the Google `client secrets`_ format. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Returns: Flow: The constructed Flow instance. Raises: ValueError: If the client configuration is not in the correct format. .. _client secrets: https://developers.google.com/api-client-library/python/guide /aaa_client_secrets """ if 'web' in client_config: client_type = 'web' elif 'installed' in client_config: client_type = 'installed' else: raise ValueError( 'Client secrets must be for a web or installed app.') session, client_config = ( google_auth_oauthlib.helpers.session_from_client_config( client_config, scopes, **kwargs)) redirect_uri = kwargs.get('redirect_uri', None) return cls(session, client_type, client_config, redirect_uri)
Creates a :class:`requests_oauthlib.OAuth2Session` from client configuration loaded from a Google-format client secrets file. Args: client_config (Mapping[str, Any]): The client configuration in the Google `client secrets`_ format. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Returns: Flow: The constructed Flow instance. Raises: ValueError: If the client configuration is not in the correct format. .. _client secrets: https://developers.google.com/api-client-library/python/guide /aaa_client_secrets
def load_models_in_model_repo(self, global_model_repo=None, encoding='utf-8'): """ load all registered models (called explicitly from the user and not as an automatic activity). Normally this is done automatically while reference resolution of one loaded model. However, if you wish to load all models you can call this and get a model repository. The metamodels must be identifiable via the MetaModelProvider. Returns: a GlobalModelRepository with the loaded models """ import textx.scoping if not global_model_repo: global_model_repo = textx.scoping.GlobalModelRepository() for filename_pattern in self.filename_pattern_list: global_model_repo.load_models_using_filepattern( filename_pattern, model=None, glob_args=self.glob_args, is_main_model=True, encoding=encoding ) return global_model_repo
load all registered models (called explicitly from the user and not as an automatic activity). Normally this is done automatically while reference resolution of one loaded model. However, if you wish to load all models you can call this and get a model repository. The metamodels must be identifiable via the MetaModelProvider. Returns: a GlobalModelRepository with the loaded models
def queryAll(self, queryString): ''' Retrieves data from specified objects, whether or not they have been deleted. ''' self._setHeaders('queryAll') return self._sforce.service.queryAll(queryString)
Retrieves data from specified objects, whether or not they have been deleted.
async def send_document(self, path, entity): """Sends the file located at path to the desired entity as a document""" await self.send_file( entity, path, force_document=True, progress_callback=self.upload_progress_callback ) print('Document sent!')
Sends the file located at path to the desired entity as a document
def list_directory(self, mdir, limit=None, marker=None): """ListDirectory https://apidocs.joyent.com/manta/api.html#ListDirectory @param mdir {str} A manta path, e.g. '/trent/stor/mydir'. @param limit {int} Limits the number of records to come back (default and max is 1000). @param marker {str} Key name at which to start the next listing. @returns Directory entries (dirents). E.g.: [{u'mtime': u'2012-12-11T01:54:07Z', u'name': u'play', u'type': u'directory'}, ...] """ res, dirents = self.list_directory2(mdir, limit=limit, marker=marker) return dirents
ListDirectory https://apidocs.joyent.com/manta/api.html#ListDirectory @param mdir {str} A manta path, e.g. '/trent/stor/mydir'. @param limit {int} Limits the number of records to come back (default and max is 1000). @param marker {str} Key name at which to start the next listing. @returns Directory entries (dirents). E.g.: [{u'mtime': u'2012-12-11T01:54:07Z', u'name': u'play', u'type': u'directory'}, ...]
def in_place( self, mode='r', buffering=-1, encoding=None, errors=None, newline=None, backup_extension=None, ): """ A context in which a file may be re-written in-place with new content. Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable` replaces `readable`. If an exception occurs, the old file is restored, removing the written data. Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are allowed. A :exc:`ValueError` is raised on invalid modes. For example, to add line numbers to a file:: p = Path(filename) assert p.isfile() with p.in_place() as (reader, writer): for number, line in enumerate(reader, 1): writer.write('{0:3}: '.format(number))) writer.write(line) Thereafter, the file at `filename` will have line numbers in it. """ import io if set(mode).intersection('wa+'): raise ValueError('Only read-only file modes can be used') # move existing file to backup, create new file with same permissions # borrowed extensively from the fileinput module backup_fn = self + (backup_extension or os.extsep + 'bak') try: os.unlink(backup_fn) except os.error: pass os.rename(self, backup_fn) readable = io.open( backup_fn, mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) try: perm = os.fstat(readable.fileno()).st_mode except OSError: writable = open( self, 'w' + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) else: os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC if hasattr(os, 'O_BINARY'): os_mode |= os.O_BINARY fd = os.open(self, os_mode, perm) writable = io.open( fd, "w" + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) try: if hasattr(os, 'chmod'): os.chmod(self, perm) except OSError: pass try: yield readable, writable except Exception: # move backup back readable.close() writable.close() try: os.unlink(self) except os.error: pass os.rename(backup_fn, self) raise else: readable.close() writable.close() finally: try: os.unlink(backup_fn) except os.error: pass
A context in which a file may be re-written in-place with new content. Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable` replaces `readable`. If an exception occurs, the old file is restored, removing the written data. Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are allowed. A :exc:`ValueError` is raised on invalid modes. For example, to add line numbers to a file:: p = Path(filename) assert p.isfile() with p.in_place() as (reader, writer): for number, line in enumerate(reader, 1): writer.write('{0:3}: '.format(number))) writer.write(line) Thereafter, the file at `filename` will have line numbers in it.
def update(self, *args, **kwargs): """d.update([E, ]**F) -> None. Update D from mapping/iterable E and F. Overwrite the values in `d` with the keys from `E` and `F`. If any key in `value` is invalid in `d`, ``KeyError`` is raised. This method is atomic - either all values in `value` are set in `d`, or none are. ``update`` does not commit a partially-updated version to disk. For kwargs, Seismic Unix-style names are supported. `BinField` and `TraceField` are not, because there are name collisions between them, although this restriction may be lifted in the future. Notes ----- .. versionchanged:: 1.3 Support for common dict operations (update, keys, values) .. versionchanged:: 1.6 Atomicity guarantee .. versionchanged:: 1.6 `**kwargs` support Examples -------- >>> e = { 1: 10, 9: 5 } >>> d.update(e) >>> l = [ (105, 11), (169, 4) ] >>> d.update(l) >>> d.update(e, iline=189, xline=193, hour=5) >>> d.update(sx=7) """ if len(args) > 1: msg = 'update expected at most 1 non-keyword argument, got {}' raise TypeError(msg.format(len(args))) buf = bytearray(self.buf) # Implementation largely borrowed from collections.mapping # If E present and has a .keys() method: for k in E: D[k] = E[k] # If E present and lacks .keys() method: for (k, v) in E: D[k] = v # In either case, this is followed by: for k, v in F.items(): D[k] = v if len(args) == 1: other = args[0] if isinstance(other, collections.Mapping): for key in other: self.putfield(buf, int(key), other[key]) elif hasattr(other, "keys"): for key in other.keys(): self.putfield(buf, int(key), other[key]) else: for key, value in other: self.putfield(buf, int(key), value) for key, value in kwargs.items(): self.putfield(buf, int(self._kwargs[key]), value) self.buf = buf self.flush()
d.update([E, ]**F) -> None. Update D from mapping/iterable E and F. Overwrite the values in `d` with the keys from `E` and `F`. If any key in `value` is invalid in `d`, ``KeyError`` is raised. This method is atomic - either all values in `value` are set in `d`, or none are. ``update`` does not commit a partially-updated version to disk. For kwargs, Seismic Unix-style names are supported. `BinField` and `TraceField` are not, because there are name collisions between them, although this restriction may be lifted in the future. Notes ----- .. versionchanged:: 1.3 Support for common dict operations (update, keys, values) .. versionchanged:: 1.6 Atomicity guarantee .. versionchanged:: 1.6 `**kwargs` support Examples -------- >>> e = { 1: 10, 9: 5 } >>> d.update(e) >>> l = [ (105, 11), (169, 4) ] >>> d.update(l) >>> d.update(e, iline=189, xline=193, hour=5) >>> d.update(sx=7)
def max_frequency (sig,FS): """Compute max frequency along the specified axes. Parameters ---------- sig: ndarray input from which max frequency is computed. FS: int sampling frequency Returns ------- f_max: int 0.95 of max_frequency using cumsum. """ f, fs = plotfft(sig, FS, doplot=False) t = cumsum(fs) ind_mag = find (t>t[-1]*0.95)[0] f_max=f[ind_mag] return f_max
Compute max frequency along the specified axes. Parameters ---------- sig: ndarray input from which max frequency is computed. FS: int sampling frequency Returns ------- f_max: int 0.95 of max_frequency using cumsum.
def stdout_to_results(s): """Turns the multi-line output of a benchmark process into a sequence of BenchmarkResult instances.""" results = s.strip().split('\n') return [BenchmarkResult(*r.split()) for r in results]
Turns the multi-line output of a benchmark process into a sequence of BenchmarkResult instances.
def random_redshift_array( log, sampleNumber, lowerRedshiftLimit, upperRedshiftLimit, redshiftResolution, pathToOutputPlotDirectory, plot=False): """ *Generate a NumPy array of random distances given a sample number and distance limit* **Key Arguments:** - ``log`` -- logger - ``sampleNumber`` -- the sample number, i.e. array size - ``lowerRedshiftLimit`` -- the lower redshift limit of the volume to be included - ``upperRedshiftLimit`` -- the upper redshift limit of the volume to be included - ``redshiftResolution`` -- the resolution of the redshift distribution - ``pathToOutputPlotDirectory`` -- path to the output directory (provided by the user) - ``plot`` -- generate plot? **Return:** - ``redshiftArray`` -- an array of random redshifts within the volume limit """ ################ > IMPORTS ################ ## STANDARD LIB ## ## THIRD PARTY ## import matplotlib.pyplot as plt import numpy as np import numpy.random as npr ## LOCAL APPLICATION ## import dryxPython.astrotools as da redshiftDistribution = np.arange( 0., upperRedshiftLimit, redshiftResolution) closestNumber = lambda n, l: min(l, key=lambda x: abs(x - n)) # GIVEN THE REDSHIFT LIMIT - DETERMINE THE VOLUME LIMIT distanceDictionary = da.convert_redshift_to_distance(upperRedshiftLimit) upperMpcLimit = distanceDictionary["dl_mpc"] upperVolumeLimit = (4. / 3.) * np.pi * upperMpcLimit ** 3 if lowerRedshiftLimit == 0.: lowerVolumeLimit = 0. else: distanceDictionary = da.convert_redshift_to_distance( lowerRedshiftLimit) lowerMpcLimit = distanceDictionary["dl_mpc"] lowerVolumeLimit = (4. / 3.) * np.pi * lowerMpcLimit ** 3 volumeShell = upperVolumeLimit - lowerVolumeLimit # GENERATE A LIST OF RANDOM DISTANCES redshiftList = [] for i in range(sampleNumber): randomVolume = lowerVolumeLimit + npr.random() * volumeShell randomDistance = (randomVolume * (3. / 4.) / np.pi) ** (1. / 3.) randomRedshift = da.convert_mpc_to_redshift(randomDistance) randomRedshift = closestNumber(randomRedshift, redshiftDistribution) # log.debug('randomDistance %s' % (randomDistance,)) redshiftList.append(randomRedshift) redshiftArray = np.array(redshiftList) # log.info('redshiftArray %s' % (redshiftArray,)) if plot: # FORCE SQUARE FIGURE AND SQUARE AXES LOOKS BETTER FOR POLAR fig = plt.figure( num=None, figsize=(8, 8), dpi=None, facecolor=None, edgecolor=None, frameon=True) ax = fig.add_axes( [0.1, 0.1, 0.8, 0.8], polar=True) thetaList = [] twoPi = 2. * np.pi for i in range(sampleNumber): thetaList.append(twoPi * npr.random()) thetaArray = np.array(thetaList) plt.scatter( thetaArray, redshiftArray, s=10, c='b', marker='o', cmap=None, norm=None, vmin=None, vmax=None, alpha=None, linewidths=None, edgecolor='w', verts=None, hold=None) title = "SN Redshift Distribution" plt.title(title) fileName = pathToOutputPlotDirectory + title.replace(" ", "_") + ".png" plt.savefig(fileName) plt.clf() # clear figure return redshiftArray
*Generate a NumPy array of random distances given a sample number and distance limit* **Key Arguments:** - ``log`` -- logger - ``sampleNumber`` -- the sample number, i.e. array size - ``lowerRedshiftLimit`` -- the lower redshift limit of the volume to be included - ``upperRedshiftLimit`` -- the upper redshift limit of the volume to be included - ``redshiftResolution`` -- the resolution of the redshift distribution - ``pathToOutputPlotDirectory`` -- path to the output directory (provided by the user) - ``plot`` -- generate plot? **Return:** - ``redshiftArray`` -- an array of random redshifts within the volume limit
def rows(self): """Returns a numpy array of the rows name""" bf = self.copy() result = bf.query.executeQuery(format="soa") return result["_rowName"]
Returns a numpy array of the rows name
def create(dotted, shortname, longname): """ Creates new OID in the database @param dotted - dotted-decimal representation of new OID @param shortname - short name for new OID @param longname - long name for new OID @returns Oid object corresponding to new OID This function should be used with exreme care. Whenever possible, it is better to add new OIDs via OpenSSL configuration file Results of calling this function twice for same OIDor for Oid alredy in database are undefined """ if pyver > 2: dotted = dotted.encode('ascii') shortname = shortname.encode('utf-8') longname = longname.encode('utf-8') nid = libcrypto.OBJ_create(dotted, shortname, longname) if nid == 0: raise LibCryptoError("Problem adding new OID to the database") return Oid(nid)
Creates new OID in the database @param dotted - dotted-decimal representation of new OID @param shortname - short name for new OID @param longname - long name for new OID @returns Oid object corresponding to new OID This function should be used with exreme care. Whenever possible, it is better to add new OIDs via OpenSSL configuration file Results of calling this function twice for same OIDor for Oid alredy in database are undefined
def url(self): """ Returns the URL for the current transformation, which can be used to retrieve the file. If security is enabled, signature and policy parameters will be included *returns* [String] ```python transform = client.upload(filepath='/path/to/file') transform.url() # https://cdn.filestackcontent.com/TRANSFORMS/FILE_HANDLE ``` """ return utils.get_transform_url( self._transformation_tasks, external_url=self.external_url, handle=self.handle, security=self.security, apikey=self.apikey )
Returns the URL for the current transformation, which can be used to retrieve the file. If security is enabled, signature and policy parameters will be included *returns* [String] ```python transform = client.upload(filepath='/path/to/file') transform.url() # https://cdn.filestackcontent.com/TRANSFORMS/FILE_HANDLE ```
def setFilepath( self, filepath ): """ Sets the filepath for this button to the inputed path. :param filepath | <str> """ self._filepath = nativestring(filepath) self.setIcon(QIcon(filepath)) if ( not self.signalsBlocked() ): self.filepathChanged.emit(filepath)
Sets the filepath for this button to the inputed path. :param filepath | <str>
def consistent_with_call(self, call): """ check to see if a new call would be consistent to add to this Axes instance checks include: * compatible units in all directions * compatible independent-variable (if applicable) """ if len(self.calls) == 0: return True, '' msg = [] if not _consistent_allow_none(call._axorder, self._axorder): msg.append('inconsistent axorder, {} != {}'.format(call.axorder, self.axorder)) if not _consistent_allow_none(call._axpos, self._axpos): msg.append('inconsistent axpos, {} != {}'.format(call.axpos, self.axpos)) if call._axorder == self._axorder and call._axorder is not None: # then despite other conflicts, attempt to put on same axes return True, '' if call._axpos == self._axpos and call._axpos is not None: # then despite other conflicts, attempt to put on same axes return True, '' # TODO: include s, c, fc, ec, etc and make these checks into loops if call.x.unit.physical_type != self.x.unit.physical_type: msg.append('inconsitent xunit, {} != {}'.format(call.x.unit, self.x.unit)) if call.y.unit.physical_type != self.y.unit.physical_type: msg.append('inconsitent yunit, {} != {}'.format(call.y.unit, self.y.unit)) if call.z.unit.physical_type != self.z.unit.physical_type: msg.append('inconsitent zunit, {} != {}'.format(call.z.unit, self.z.unit)) if call.i.unit.physical_type != self.i.unit.physical_type: msg.append('inconsistent iunit, {} != {}'.format(call.i.unit, self.i.unit)) if call.i.is_reference or self.i.is_reference: if call.i.reference != self.i.reference: msg.append('inconsistent i reference, {} != {}'.format(call.i.reference, self.i.reference)) if not _consistent_allow_none(call.title, self.title): msg.append('inconsistent axes title, {} != {}'.format(call.title, self.title)) # here we send the protected _label so that we get None instead of empty string if not _consistent_allow_none(call.x._label, self.x._label): msg.append('inconsitent xlabel, {} != {}'.format(call.x.label, self.x.label)) if not _consistent_allow_none(call.y._label, self.y._label): msg.append('inconsitent ylabel, {} != {}'.format(call.y.label, self.y.label)) if not _consistent_allow_none(call.z._label, self.z._label): msg.append('inconsitent zlabel, {} != {}'.format(call.z.label, self.z.label)) if len(msg): return False, ', '.join(msg) else: return True, ''
check to see if a new call would be consistent to add to this Axes instance checks include: * compatible units in all directions * compatible independent-variable (if applicable)
def __marshal_matches(matched): """Convert matches to JSON format. :param matched: a list of matched identities :returns json_matches: a list of matches in JSON format """ json_matches = [] for m in matched: identities = [i.uuid for i in m] if len(identities) == 1: continue json_match = { 'identities': identities, 'processed': False } json_matches.append(json_match) return json_matches
Convert matches to JSON format. :param matched: a list of matched identities :returns json_matches: a list of matches in JSON format
def get_parser(): """Get parser for mpu.""" from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('--version', action='version', version='mpu {}'.format(mpu.__version__)) subparsers = parser.add_subparsers(help='Python package commands') package_parser = subparsers.add_parser('package') mpu.package.cli.get_parser(package_parser) return parser
Get parser for mpu.
def get_row_list(self, row_idx): """ get a feature vector for the nth row :param row_idx: which row :return: a list of feature values, ordered by column_names """ try: row = self._rows[row_idx] except TypeError: row = self._rows[self._row_name_idx[row_idx]] if isinstance(row, list): extra = [ self._default_value ] * (len(self._column_name_list) - len(row)) return row + extra else: if row_idx not in self._row_memo: self._row_memo[row_idx] = [ row[k] if k in row else self._default_value for k in self._column_name_list ] return self._row_memo[row_idx]
get a feature vector for the nth row :param row_idx: which row :return: a list of feature values, ordered by column_names
def clone(self, snapshot_name_or_id=None, mode=library.CloneMode.machine_state, options=None, name=None, uuid=None, groups=None, basefolder='', register=True): """Clone this Machine Options: snapshot_name_or_id - value can be either ISnapshot, name, or id mode - set the CloneMode value options - define the CloneOptions options name - define a name of the new VM uuid - set the uuid of the new VM groups - specify which groups the new VM will exist under basefolder - specify which folder to set the VM up under register - register this VM with the server Note: Default values create a linked clone from the current machine state Return a IMachine object for the newly cloned vm """ if options is None: options = [library.CloneOptions.link] if groups is None: groups = [] vbox = virtualbox.VirtualBox() if snapshot_name_or_id is not None: if isinstance(snapshot_name_or_id, basestring): snapshot = self.find_snapshot(snapshot_name_or_id) else: snapshot = snapshot_name_or_id vm = snapshot.machine else: # linked clone can only be created from a snapshot... # try grabbing the current_snapshot if library.CloneOptions.link in options: vm = self.current_snapshot.machine else: vm = self if name is None: name = "%s Clone" % vm.name # Build the settings file create_flags = '' if uuid is not None: create_flags = "UUID=%s" % uuid primary_group = '' if groups: primary_group = groups[0] # Make sure this settings file does not already exist test_name = name settings_file = '' for i in range(1, 1000): settings_file = vbox.compose_machine_filename(test_name, primary_group, create_flags, basefolder) if not os.path.exists(os.path.dirname(settings_file)): break test_name = "%s (%s)" % (name, i) name = test_name # Create the new machine and clone it! vm_clone = vbox.create_machine(settings_file, name, groups, '', create_flags) progress = vm.clone_to(vm_clone, mode, options) progress.wait_for_completion(-1) if register: vbox.register_machine(vm_clone) return vm_clone
Clone this Machine Options: snapshot_name_or_id - value can be either ISnapshot, name, or id mode - set the CloneMode value options - define the CloneOptions options name - define a name of the new VM uuid - set the uuid of the new VM groups - specify which groups the new VM will exist under basefolder - specify which folder to set the VM up under register - register this VM with the server Note: Default values create a linked clone from the current machine state Return a IMachine object for the newly cloned vm
def acquire_reader(self): """ Acquire a read lock, several threads can hold this type of lock. """ with self.mutex: while self.rwlock < 0 or self.rwlock == self.max_reader_concurrency or self.writers_waiting: self.readers_ok.wait() self.rwlock += 1
Acquire a read lock, several threads can hold this type of lock.
def update(self, component_id, name=None, status=None, description=None, link=None, order=None, group_id=None, enabled=True): """Update a component :param int component_id: Component ID :param str name: Name of the component (optional) :param int status: Status of the component; 1-4 :param str description: Description of the component (optional) :param str link: A hyperlink to the component (optional) :param int order: Order of the component (optional) :param int group_id: The group ID of the component (optional) :param bool enabled: Whether the component is enabled (optional) :return: Updated component data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#components .. seealso:: https://docs.cachethq.io/docs/component-statuses """ data = ApiParams() data['component'] = component_id data['name'] = name data['status'] = status data['description'] = description data['link'] = link data['order'] = order data['group_id'] = group_id data['enabled'] = enabled return self._put('components/%s' % component_id, data=data)['data']
Update a component :param int component_id: Component ID :param str name: Name of the component (optional) :param int status: Status of the component; 1-4 :param str description: Description of the component (optional) :param str link: A hyperlink to the component (optional) :param int order: Order of the component (optional) :param int group_id: The group ID of the component (optional) :param bool enabled: Whether the component is enabled (optional) :return: Updated component data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#components .. seealso:: https://docs.cachethq.io/docs/component-statuses
def update_insight(self, project_key, insight_id, **kwargs): """Update an insight. **Note that only elements included in the request will be updated. All omitted elements will remain untouched. :param project_key: Projrct identifier, in the form of projectOwner/projectid :type project_key: str :param insight_id: Insight unique identifier. :type insight_id: str :param title: Insight title :type title: str :param description: Insight description. :type description: str, optional :param image_url: If image-based, the URL of the image :type image_url: str :param embed_url: If embed-based, the embeddable URL :type embed_url: str :param source_link: Permalink to source code or platform this insight was generated with. Allows others to replicate the steps originally used to produce the insight. :type source_link: str, optional :param data_source_links: One or more permalinks to the data sources used to generate this insight. Allows others to access the data originally used to produce the insight. :type data_source_links: array :returns: message object :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.update_insight( ... 'username/test-project', 'insightid' ... title='demo atadotworld'}) # doctest: +SKIP """ request = self.__build_insight_obj( lambda: _swagger.InsightPatchRequest(), kwargs) project_owner, project_id = parse_dataset_key(project_key) try: self._insights_api.update_insight(project_owner, project_id, insight_id, body=request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
Update an insight. **Note that only elements included in the request will be updated. All omitted elements will remain untouched. :param project_key: Projrct identifier, in the form of projectOwner/projectid :type project_key: str :param insight_id: Insight unique identifier. :type insight_id: str :param title: Insight title :type title: str :param description: Insight description. :type description: str, optional :param image_url: If image-based, the URL of the image :type image_url: str :param embed_url: If embed-based, the embeddable URL :type embed_url: str :param source_link: Permalink to source code or platform this insight was generated with. Allows others to replicate the steps originally used to produce the insight. :type source_link: str, optional :param data_source_links: One or more permalinks to the data sources used to generate this insight. Allows others to access the data originally used to produce the insight. :type data_source_links: array :returns: message object :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.update_insight( ... 'username/test-project', 'insightid' ... title='demo atadotworld'}) # doctest: +SKIP
def safe_listget(list_, index, default='?'): """ depricate """ if index >= len(list_): return default ret = list_[index] if ret is None: return default return ret
depricate
def walk_json_dict(coll): """ Flatten a parsed SBP object into a dicts and lists, which are compatible for JSON output. Parameters ---------- coll : dict """ if isinstance(coll, dict): return dict((k, walk_json_dict(v)) for (k, v) in iter(coll.items())) elif isinstance(coll, bytes): return coll.decode('ascii') elif hasattr(coll, '__iter__') and not isinstance(coll, str): return [walk_json_dict(seq) for seq in coll] else: return coll
Flatten a parsed SBP object into a dicts and lists, which are compatible for JSON output. Parameters ---------- coll : dict
def setup_environment_from_config_file(): """ Imports the environmental configuration settings from the config file, if present, and sets the environment variables to test it. """ from os.path import exists config_file = get_config_file() if not exists(config_file): return try: config = _ConfigParser.SafeConfigParser() config.read(config_file) __section = "Environment" if config.has_section(__section): items = config.items(__section) for k, v in items: try: os.environ[k.upper()] = v except Exception as e: print(("WARNING: Error setting environment variable " "'%s = %s' from config file '%s': %s.") % (k, str(v), config_file, str(e)) ) except Exception as e: print("WARNING: Error reading config file '%s': %s." % (config_file, str(e)))
Imports the environmental configuration settings from the config file, if present, and sets the environment variables to test it.
def to_line(self): """ Returns a string in OpenSSH known_hosts file format, or None if the object is not in a valid state. A trailing newline is included. """ if self.valid: return '%s %s %s\n' % (','.join(self.hostnames), self.key.get_name(), self.key.get_base64()) return None
Returns a string in OpenSSH known_hosts file format, or None if the object is not in a valid state. A trailing newline is included.
def imf(m): ''' Returns ------- N(M)dM for given mass according to Kroupa IMF, vectorization available via vimf() ''' m1 = 0.08; m2 = 0.50 a1 = 0.30; a2 = 1.30; a3 = 2.3 const2 = m1**-a1 -m1**-a2 const3 = m2**-a2 -m2**-a3 if m < 0.08: alpha = 0.3 const = -const2 -const3 elif m < 0.50: alpha = 1.3 const = -const3 else: alpha = 2.3 const = 0.0 # print m,alpha, const, m**-alpha + const return m**-alpha + const
Returns ------- N(M)dM for given mass according to Kroupa IMF, vectorization available via vimf()
def _get_assumptions(t): """ Given a constraint, _get_assumptions() returns a set of constraints that are implicitly assumed to be true. For example, `x <= 10` would return `x >= 0`. """ if t.op in ('__le__', '__lt__', 'ULE', 'ULT'): return [ t.args[0] >= 0 ] elif t.op in ('__ge__', '__gt__', 'UGE', 'UGT'): return [ t.args[0] <= 2**len(t.args[0])-1 ] elif t.op in ('SLE', 'SLT'): return [ _all_operations.SGE(t.args[0], -(1 << (len(t.args[0])-1))) ] elif t.op in ('SGE', 'SGT'): return [ _all_operations.SLE(t.args[0], (1 << (len(t.args[0])-1)) - 1) ] else: return [ ]
Given a constraint, _get_assumptions() returns a set of constraints that are implicitly assumed to be true. For example, `x <= 10` would return `x >= 0`.
def on_msg(self, callback, remove=False): """(Un)Register a custom msg receive callback. Parameters ---------- callback: callable callback will be passed three arguments when a message arrives:: callback(widget, content, buffers) remove: bool True if the callback should be unregistered.""" self._msg_callbacks.register_callback(callback, remove=remove)
(Un)Register a custom msg receive callback. Parameters ---------- callback: callable callback will be passed three arguments when a message arrives:: callback(widget, content, buffers) remove: bool True if the callback should be unregistered.
def find_kernel_specs_for_envs(self): """Returns a dict mapping kernel names to resource directories.""" data = self._get_env_data() return {name: data[name][0] for name in data}
Returns a dict mapping kernel names to resource directories.
def check_elasticsearch(record, *args, **kwargs): """Return permission that check if the record exists in ES index. :params record: A record object. :returns: A object instance with a ``can()`` method. """ def can(self): """Try to search for given record.""" search = request._methodview.search_class() search = search.get_record(str(record.id)) return search.count() == 1 return type('CheckES', (), {'can': can})()
Return permission that check if the record exists in ES index. :params record: A record object. :returns: A object instance with a ``can()`` method.
def encode (self, s): """Encode string with output encoding.""" assert isinstance(s, unicode) return s.encode(self.output_encoding, self.codec_errors)
Encode string with output encoding.
def bulk_lookup_rdap(addresses=None, inc_raw=False, retry_count=3, depth=0, excluded_entities=None, rate_limit_timeout=60, socket_timeout=10, asn_timeout=240, proxy_openers=None): """ The function for bulk retrieving and parsing whois information for a list of IP addresses via HTTP (RDAP). This bulk lookup method uses bulk ASN Whois lookups first to retrieve the ASN for each IP. It then optimizes RDAP queries to achieve the fastest overall time, accounting for rate-limiting RIRs. Args: addresses (:obj:`list` of :obj:`str`): IP addresses to lookup. inc_raw (:obj:`bool`, optional): Whether to include the raw whois results in the returned dictionary. Defaults to False. retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. depth (:obj:`int`): How many levels deep to run queries when additional referenced objects are found. Defaults to 0. excluded_entities (:obj:`list` of :obj:`str`): Entity handles to not perform lookups. Defaults to None. rate_limit_timeout (:obj:`int`): The number of seconds to wait before retrying when a rate limit notice is returned via rdap+json. Defaults to 60. socket_timeout (:obj:`int`): The default timeout for socket connections in seconds. Defaults to 10. asn_timeout (:obj:`int`): The default timeout for bulk ASN lookups in seconds. Defaults to 240. proxy_openers (:obj:`list` of :obj:`OpenerDirector`): Proxy openers for single/rotating proxy support. Defaults to None. Returns: namedtuple: :results (dict): IP address keys with the values as dictionaries returned by IPWhois.lookup_rdap(). :stats (dict): Stats for the lookups: :: { 'ip_input_total' (int) - The total number of addresses originally provided for lookup via the addresses argument. 'ip_unique_total' (int) - The total number of unique addresses found in the addresses argument. 'ip_lookup_total' (int) - The total number of addresses that lookups were attempted for, excluding any that failed ASN registry checks. 'lacnic' (dict) - { 'failed' (list) - The addresses that failed to lookup. Excludes any that failed initially, but succeeded after futher retries. 'rate_limited' (list) - The addresses that encountered rate-limiting. Unless an address is also in 'failed', it eventually succeeded. 'total' (int) - The total number of addresses belonging to this RIR that lookups were attempted for. } 'ripencc' (dict) - Same as 'lacnic' above. 'apnic' (dict) - Same as 'lacnic' above. 'afrinic' (dict) - Same as 'lacnic' above. 'arin' (dict) - Same as 'lacnic' above. 'unallocated_addresses' (list) - The addresses that are unallocated/failed ASN lookups. These can be addresses that are not listed for one of the 5 RIRs (other). No attempt was made to perform an RDAP lookup for these. } Raises: ASNLookupError: The ASN bulk lookup failed, cannot proceed with bulk RDAP lookup. """ if not isinstance(addresses, list): raise ValueError('addresses must be a list of IP address strings') # Initialize the dicts/lists results = {} failed_lookups_dict = {} rated_lookups = [] stats = { 'ip_input_total': len(addresses), 'ip_unique_total': 0, 'ip_lookup_total': 0, 'lacnic': {'failed': [], 'rate_limited': [], 'total': 0}, 'ripencc': {'failed': [], 'rate_limited': [], 'total': 0}, 'apnic': {'failed': [], 'rate_limited': [], 'total': 0}, 'afrinic': {'failed': [], 'rate_limited': [], 'total': 0}, 'arin': {'failed': [], 'rate_limited': [], 'total': 0}, 'unallocated_addresses': [] } asn_parsed_results = {} if proxy_openers is None: proxy_openers = [None] proxy_openers_copy = iter(proxy_openers) # Make sure addresses is unique unique_ip_list = list(unique_everseen(addresses)) # Get the unique count to return stats['ip_unique_total'] = len(unique_ip_list) # This is needed for iteration order rir_keys_ordered = ['lacnic', 'ripencc', 'apnic', 'afrinic', 'arin'] # First query the ASN data for all IPs, can raise ASNLookupError, no catch bulk_asn = get_bulk_asn_whois(unique_ip_list, timeout=asn_timeout) # ASN results are returned as string, parse lines to list and remove first asn_result_list = bulk_asn.split('\n') del asn_result_list[0] # We need to instantiate IPASN, which currently needs a Net object, # IP doesn't matter here net = Net('1.2.3.4') ipasn = IPASN(net) # Iterate each IP ASN result, and add valid RIR results to # asn_parsed_results for RDAP lookups for asn_result in asn_result_list: temp = asn_result.split('|') # Not a valid entry, move on to next if len(temp) == 1: continue ip = temp[1].strip() # We need this since ASN bulk lookup is returning duplicates # This is an issue on the Cymru end if ip in asn_parsed_results.keys(): # pragma: no cover continue try: results = ipasn.parse_fields_whois(asn_result) except ASNRegistryError: # pragma: no cover continue # Add valid IP ASN result to asn_parsed_results for RDAP lookup asn_parsed_results[ip] = results stats[results['asn_registry']]['total'] += 1 # Set the list of IPs that are not allocated/failed ASN lookup stats['unallocated_addresses'] = list(k for k in addresses if k not in asn_parsed_results) # Set the total lookup count after unique IP and ASN result filtering stats['ip_lookup_total'] = len(asn_parsed_results) # Track the total number of LACNIC queries left. This is tracked in order # to ensure the 9 priority LACNIC queries/min don't go into infinite loop lacnic_total_left = stats['lacnic']['total'] # Set the start time, this value is updated when the rate limit is reset old_time = time.time() # Rate limit tracking dict for all RIRs rate_tracker = { 'lacnic': {'time': old_time, 'count': 0}, 'ripencc': {'time': old_time, 'count': 0}, 'apnic': {'time': old_time, 'count': 0}, 'afrinic': {'time': old_time, 'count': 0}, 'arin': {'time': old_time, 'count': 0} } # Iterate all of the IPs to perform RDAP lookups until none are left while len(asn_parsed_results) > 0: # Sequentially run through each RIR to minimize lookups in a row to # the same RIR. for rir in rir_keys_ordered: # If there are still LACNIC IPs left to lookup and the rate limit # hasn't been reached, skip to find a LACNIC IP to lookup if ( rir != 'lacnic' and lacnic_total_left > 0 and (rate_tracker['lacnic']['count'] != 9 or (time.time() - rate_tracker['lacnic']['time'] ) >= rate_limit_timeout ) ): # pragma: no cover continue # If the RIR rate limit has been reached and hasn't expired, # move on to the next RIR if ( rate_tracker[rir]['count'] == 9 and ( (time.time() - rate_tracker[rir]['time'] ) < rate_limit_timeout) ): # pragma: no cover continue # If the RIR rate limit has expired, reset the count/timer # and perform the lookup elif ((time.time() - rate_tracker[rir]['time'] ) >= rate_limit_timeout): # pragma: no cover rate_tracker[rir]['count'] = 0 rate_tracker[rir]['time'] = time.time() # Create a copy of the lookup IP dict so we can modify on # successful/failed queries. Loop each IP until it matches the # correct RIR in the parent loop, and attempt lookup tmp_dict = asn_parsed_results.copy() for ip, asn_data in tmp_dict.items(): # Check to see if IP matches parent loop RIR for lookup if asn_data['asn_registry'] == rir: log.debug('Starting lookup for IP: {0} ' 'RIR: {1}'.format(ip, rir)) # Add to count for rate-limit tracking only for LACNIC, # since we have not seen aggressive rate-limiting from the # other RIRs yet if rir == 'lacnic': rate_tracker[rir]['count'] += 1 # Get the next proxy opener to use, or None try: opener = next(proxy_openers_copy) # Start at the beginning if all have been used except StopIteration: proxy_openers_copy = iter(proxy_openers) opener = next(proxy_openers_copy) # Instantiate the objects needed for the RDAP lookup net = Net(ip, timeout=socket_timeout, proxy_opener=opener) rdap = RDAP(net) try: # Perform the RDAP lookup. retry_count is set to 0 # here since we handle that in this function results = rdap.lookup( inc_raw=inc_raw, retry_count=0, asn_data=asn_data, depth=depth, excluded_entities=excluded_entities ) log.debug('Successful lookup for IP: {0} ' 'RIR: {1}'.format(ip, rir)) # Lookup was successful, add to result. Set the nir # key to None as this is not supported # (yet - requires more queries) results[ip] = results results[ip]['nir'] = None # Remove the IP from the lookup queue del asn_parsed_results[ip] # If this was LACNIC IP, reduce the total left count if rir == 'lacnic': lacnic_total_left -= 1 log.debug( '{0} total lookups left, {1} LACNIC lookups left' ''.format(str(len(asn_parsed_results)), str(lacnic_total_left)) ) # If this IP failed previously, remove it from the # failed return dict if ( ip in failed_lookups_dict.keys() ): # pragma: no cover del failed_lookups_dict[ip] # Break out of the IP list loop, we need to change to # the next RIR break except HTTPLookupError: # pragma: no cover log.debug('Failed lookup for IP: {0} ' 'RIR: {1}'.format(ip, rir)) # Add the IP to the failed lookups dict if not there if ip not in failed_lookups_dict.keys(): failed_lookups_dict[ip] = 1 # This IP has already failed at least once, increment # the failure count until retry_count reached, then # stop trying else: failed_lookups_dict[ip] += 1 if failed_lookups_dict[ip] == retry_count: del asn_parsed_results[ip] stats[rir]['failed'].append(ip) if rir == 'lacnic': lacnic_total_left -= 1 # Since this IP failed, we don't break to move to next # RIR, we check the next IP for this RIR continue except HTTPRateLimitError: # pragma: no cover # Add the IP to the rate-limited lookups dict if not # there if ip not in rated_lookups: rated_lookups.append(ip) stats[rir]['rate_limited'].append(ip) log.debug('Rate limiting triggered for IP: {0} ' 'RIR: {1}'.format(ip, rir)) # Since rate-limit was reached, reset the timer and # max out the count rate_tracker[rir]['time'] = time.time() rate_tracker[rir]['count'] = 9 # Break out of the IP list loop, we need to change to # the next RIR break return_tuple = namedtuple('return_tuple', ['results', 'stats']) return return_tuple(results, stats)
The function for bulk retrieving and parsing whois information for a list of IP addresses via HTTP (RDAP). This bulk lookup method uses bulk ASN Whois lookups first to retrieve the ASN for each IP. It then optimizes RDAP queries to achieve the fastest overall time, accounting for rate-limiting RIRs. Args: addresses (:obj:`list` of :obj:`str`): IP addresses to lookup. inc_raw (:obj:`bool`, optional): Whether to include the raw whois results in the returned dictionary. Defaults to False. retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. depth (:obj:`int`): How many levels deep to run queries when additional referenced objects are found. Defaults to 0. excluded_entities (:obj:`list` of :obj:`str`): Entity handles to not perform lookups. Defaults to None. rate_limit_timeout (:obj:`int`): The number of seconds to wait before retrying when a rate limit notice is returned via rdap+json. Defaults to 60. socket_timeout (:obj:`int`): The default timeout for socket connections in seconds. Defaults to 10. asn_timeout (:obj:`int`): The default timeout for bulk ASN lookups in seconds. Defaults to 240. proxy_openers (:obj:`list` of :obj:`OpenerDirector`): Proxy openers for single/rotating proxy support. Defaults to None. Returns: namedtuple: :results (dict): IP address keys with the values as dictionaries returned by IPWhois.lookup_rdap(). :stats (dict): Stats for the lookups: :: { 'ip_input_total' (int) - The total number of addresses originally provided for lookup via the addresses argument. 'ip_unique_total' (int) - The total number of unique addresses found in the addresses argument. 'ip_lookup_total' (int) - The total number of addresses that lookups were attempted for, excluding any that failed ASN registry checks. 'lacnic' (dict) - { 'failed' (list) - The addresses that failed to lookup. Excludes any that failed initially, but succeeded after futher retries. 'rate_limited' (list) - The addresses that encountered rate-limiting. Unless an address is also in 'failed', it eventually succeeded. 'total' (int) - The total number of addresses belonging to this RIR that lookups were attempted for. } 'ripencc' (dict) - Same as 'lacnic' above. 'apnic' (dict) - Same as 'lacnic' above. 'afrinic' (dict) - Same as 'lacnic' above. 'arin' (dict) - Same as 'lacnic' above. 'unallocated_addresses' (list) - The addresses that are unallocated/failed ASN lookups. These can be addresses that are not listed for one of the 5 RIRs (other). No attempt was made to perform an RDAP lookup for these. } Raises: ASNLookupError: The ASN bulk lookup failed, cannot proceed with bulk RDAP lookup.