code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def start(self): ''' Start listening for messages. ''' log.debug('Creating the TCP server') if ':' in self.address: self.skt = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) else: self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if self.reuse_port: self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if hasattr(socket, 'SO_REUSEPORT'): self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) else: log.error('SO_REUSEPORT not supported') try: self.skt.bind((self.address, int(self.port))) except socket.error as msg: error_string = 'Unable to bind to port {} on {}: {}'.format(self.port, self.address, msg) log.error(error_string, exc_info=True) raise BindException(error_string) log.debug('Accepting max %d parallel connections', self.max_clients) self.skt.listen(self.max_clients) self.thread_serve = threading.Thread(target=self._serve_clients) self.thread_serve.start()
Start listening for messages.
def cursor_batch(self, table_name, start_timeperiod, end_timeperiod): """ method returns batched DB cursor """ raise NotImplementedError('method cursor_batch must be implemented by {0}'.format(self.__class__.__name__))
method returns batched DB cursor
def login(self, email=None, password=None): """ Interactive login using the `cloudgenix.API` object. This function is more robust and handles SAML and MSP accounts. Expects interactive capability. if this is not available, use `cloudenix.API.post.login` directly. **Parameters:**: - **email**: Email to log in for, will prompt if not entered. - **password**: Password to log in with, will prompt if not entered. Ignored for SAML v2.0 users. **Returns:** Bool. In addition the function will mutate the `cloudgenix.API` constructor items as needed. """ # if email not given in function, or if first login fails, prompt. if email is None: # If user is not set, pull from cache. If not in cache, prompt. if self._parent_class.email: email = self._parent_class.email else: email = compat_input("login: ") if password is None: # if pass not given on function, or if first login fails, prompt. if self._parent_class._password: password = self._parent_class._password else: password = getpass.getpass() # Try and login # For SAML 2.0 support, set the Referer URL prior to logging in. # add referer header to the session. self._parent_class.add_headers({'Referer': "{}/v2.0/api/login".format(self._parent_class.controller)}) # call the login API. response = self._parent_class.post.login({"email": email, "password": password}) if response.cgx_status: # Check for SAML 2.0 login if not response.cgx_content.get('x_auth_token'): urlpath = response.cgx_content.get("urlpath", "") request_id = response.cgx_content.get("requestId", "") if urlpath and request_id: # SAML 2.0 print('SAML 2.0: To finish login open the following link in a browser\n\n{0}\n\n'.format(urlpath)) found_auth_token = False for i in range(20): print('Waiting for {0} seconds for authentication...'.format((20 - i) * 5)) saml_response = self.check_sso_login(email, request_id) if saml_response.cgx_status and saml_response.cgx_content.get('x_auth_token'): found_auth_token = True break # wait before retry. time.sleep(5) if not found_auth_token: print("Login time expired! Please re-login.\n") # log response when debug try: api_logger.debug("LOGIN_FAIL_RESPONSE = %s", json.dumps(response, indent=4)) except (TypeError, ValueError): # not JSON response, don't pretty print log. api_logger.debug("LOGIN_FAIL_RESPONSE = %s", str(response)) # print login error print('Login failed, please try again', response) # Flush command-line entered login info if failure. self._parent_class.email = None self._parent_class.password = None return False api_logger.info('Login successful:') # if we got here, we either got an x_auth_token in the original login, or # we got an auth_token cookie set via SAML. Figure out which. auth_token = response.cgx_content.get('x_auth_token') if auth_token: # token in the original login (not saml) means region parsing has not been done. # do now, and recheck if cookie needs set. auth_region = self._parent_class.parse_region(response) self._parent_class.update_region_to_controller(auth_region) self._parent_class.reparse_login_cookie_after_region_update(response) # debug info if needed api_logger.debug("AUTH_TOKEN=%s", response.cgx_content.get('x_auth_token')) # Step 2: Get operator profile for tenant ID and other info. if self.interactive_update_profile_vars(): # pull tenant detail if self._parent_class.tenant_id: # add tenant values to API() object if self.interactive_tenant_update_vars(): # Step 3: Check for ESP/MSP. If so, ask which tenant this session should be for. if self._parent_class.is_esp: # ESP/MSP! choose_status, chosen_client_id = self.interactive_client_choice() if choose_status: # attempt to login as client clogin_resp = self._parent_class.post.login_clients(chosen_client_id, {}) if clogin_resp.cgx_status: # login successful, update profile and tenant info c_profile = self.interactive_update_profile_vars() t_profile = self.interactive_tenant_update_vars() if c_profile and t_profile: # successful full client login. self._parent_class._password = None # remove referer header prior to continuing. self._parent_class.remove_header('Referer') return True else: if t_profile: print("ESP Client Tenant detail retrieval failed.") # clear password out of memory self._parent_class.email = None self._parent_class._password = None # remove referer header prior to continuing. self._parent_class.remove_header('Referer') return False else: print("ESP Client Login failed.") # clear password out of memory self._parent_class.email = None self._parent_class._password = None # remove referer header prior to continuing. self._parent_class.remove_header('Referer') return False else: print("ESP Client Choice failed.") # clear password out of memory self._parent_class.email = None self._parent_class._password = None # remove referer header prior to continuing. self._parent_class.remove_header('Referer') return False # successful! # clear password out of memory self._parent_class._password = None # remove referer header prior to continuing. self._parent_class.remove_header('Referer') return True else: print("Tenant detail retrieval failed.") # clear password out of memory self._parent_class.email = None self._parent_class._password = None # remove referer header prior to continuing. self._parent_class.remove_header('Referer') return False else: # Profile detail retrieval failed self._parent_class.email = None self._parent_class._password = None return False api_logger.info("EMAIL = %s", self._parent_class.email) api_logger.info("USER_ID = %s", self._parent_class._user_id) api_logger.info("USER ROLES = %s", json.dumps(self._parent_class.roles)) api_logger.info("TENANT_ID = %s", self._parent_class.tenant_id) api_logger.info("TENANT_NAME = %s", self._parent_class.tenant_name) api_logger.info("TOKEN_SESSION = %s", self._parent_class.token_session) # remove referer header prior to continuing. self._parent_class.remove_header('Referer') else: # log response when debug api_logger.debug("LOGIN_FAIL_RESPONSE = %s", json.dumps(response.cgx_content, indent=4)) # print login error print('Login failed, please try again:', response.cgx_content) # Flush command-line entered login info if failure. self._parent_class.email = None self._parent_class.password = None # remove referer header prior to continuing. self._parent_class.remove_header('Referer') return False
Interactive login using the `cloudgenix.API` object. This function is more robust and handles SAML and MSP accounts. Expects interactive capability. if this is not available, use `cloudenix.API.post.login` directly. **Parameters:**: - **email**: Email to log in for, will prompt if not entered. - **password**: Password to log in with, will prompt if not entered. Ignored for SAML v2.0 users. **Returns:** Bool. In addition the function will mutate the `cloudgenix.API` constructor items as needed.
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): """Yield pieces of data from a file-like object until EOF.""" while True: chunk = file.read(size) if not chunk: break yield chunk
Yield pieces of data from a file-like object until EOF.
def get_discrete_grid(self): """ Computes a Numpy array with the grid of points that results after crossing the possible outputs of the discrete variables """ sets_grid = [] for d in self.space: if d.type == 'discrete': sets_grid.extend([d.domain]*d.dimensionality) return np.array(list(itertools.product(*sets_grid)))
Computes a Numpy array with the grid of points that results after crossing the possible outputs of the discrete variables
def setbit(self, name, offset, val): """ Flag the ``offset`` in ``name`` as ``value``. Returns a boolean indicating the previous value of ``offset``. Like **Redis.SETBIT** :param string name: the key name :param int offset: the bit position :param bool val: the bit value :return: the previous bit (False or True) at the ``offset`` :rtype: bool >>> ssdb.set('bit_test', 1) True >>> ssdb.setbit('bit_test', 1, 1) False >>> ssdb.get('bit_test') 3 >>> ssdb.setbit('bit_test', 2, 1) False >>> ssdb.get('bit_test') 7 """ val = int(get_boolean('val', val)) offset = get_positive_integer('offset', offset) return self.execute_command('setbit', name, offset, val)
Flag the ``offset`` in ``name`` as ``value``. Returns a boolean indicating the previous value of ``offset``. Like **Redis.SETBIT** :param string name: the key name :param int offset: the bit position :param bool val: the bit value :return: the previous bit (False or True) at the ``offset`` :rtype: bool >>> ssdb.set('bit_test', 1) True >>> ssdb.setbit('bit_test', 1, 1) False >>> ssdb.get('bit_test') 3 >>> ssdb.setbit('bit_test', 2, 1) False >>> ssdb.get('bit_test') 7
def add_class(self, node): """visit one class and add it to diagram""" self.linker.visit(node) self.classdiagram.add_object(self.get_title(node), node)
visit one class and add it to diagram
def parse( idp_metadata, required_sso_binding=OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT, required_slo_binding=OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT, entity_id=None): """ Parse the Identity Provider metadata and return a dict with extracted data. If there are multiple <IDPSSODescriptor> tags, parse only the first. Parse only those SSO endpoints with the same binding as given by the `required_sso_binding` parameter. Parse only those SLO endpoints with the same binding as given by the `required_slo_binding` parameter. If the metadata specifies multiple SSO endpoints with the required binding, extract only the first (the same holds true for SLO endpoints). :param idp_metadata: XML of the Identity Provider Metadata. :type idp_metadata: string :param required_sso_binding: Parse only POST or REDIRECT SSO endpoints. :type required_sso_binding: one of OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT or OneLogin_Saml2_Constants.BINDING_HTTP_POST :param required_slo_binding: Parse only POST or REDIRECT SLO endpoints. :type required_slo_binding: one of OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT or OneLogin_Saml2_Constants.BINDING_HTTP_POST :param entity_id: Specify the entity_id of the EntityDescriptor that you want to parse a XML that contains multiple EntityDescriptor. :type entity_id: string :returns: settings dict with extracted data :rtype: dict """ data = {} dom = fromstring(idp_metadata, forbid_dtd=True) entity_desc_path = '//md:EntityDescriptor' if entity_id: entity_desc_path += "[@entityID='%s']" % entity_id entity_descriptor_nodes = OneLogin_Saml2_Utils.query(dom, entity_desc_path) idp_entity_id = want_authn_requests_signed = idp_name_id_format = idp_sso_url = idp_slo_url = certs = None if len(entity_descriptor_nodes) > 0: entity_descriptor_node = entity_descriptor_nodes[0] idp_descriptor_nodes = OneLogin_Saml2_Utils.query(entity_descriptor_node, './md:IDPSSODescriptor') if len(idp_descriptor_nodes) > 0: idp_descriptor_node = idp_descriptor_nodes[0] idp_entity_id = entity_descriptor_node.get('entityID', None) want_authn_requests_signed = entity_descriptor_node.get('WantAuthnRequestsSigned', None) name_id_format_nodes = OneLogin_Saml2_Utils.query(idp_descriptor_node, './md:NameIDFormat') if len(name_id_format_nodes) > 0: idp_name_id_format = OneLogin_Saml2_Utils.element_text(name_id_format_nodes[0]) sso_nodes = OneLogin_Saml2_Utils.query( idp_descriptor_node, "./md:SingleSignOnService[@Binding='%s']" % required_sso_binding ) if len(sso_nodes) > 0: idp_sso_url = sso_nodes[0].get('Location', None) slo_nodes = OneLogin_Saml2_Utils.query( idp_descriptor_node, "./md:SingleLogoutService[@Binding='%s']" % required_slo_binding ) if len(slo_nodes) > 0: idp_slo_url = slo_nodes[0].get('Location', None) signing_nodes = OneLogin_Saml2_Utils.query(idp_descriptor_node, "./md:KeyDescriptor[not(contains(@use, 'encryption'))]/ds:KeyInfo/ds:X509Data/ds:X509Certificate") encryption_nodes = OneLogin_Saml2_Utils.query(idp_descriptor_node, "./md:KeyDescriptor[not(contains(@use, 'signing'))]/ds:KeyInfo/ds:X509Data/ds:X509Certificate") if len(signing_nodes) > 0 or len(encryption_nodes) > 0: certs = {} if len(signing_nodes) > 0: certs['signing'] = [] for cert_node in signing_nodes: certs['signing'].append(''.join(OneLogin_Saml2_Utils.element_text(cert_node).split())) if len(encryption_nodes) > 0: certs['encryption'] = [] for cert_node in encryption_nodes: certs['encryption'].append(''.join(OneLogin_Saml2_Utils.element_text(cert_node).split())) data['idp'] = {} if idp_entity_id is not None: data['idp']['entityId'] = idp_entity_id if idp_sso_url is not None: data['idp']['singleSignOnService'] = {} data['idp']['singleSignOnService']['url'] = idp_sso_url data['idp']['singleSignOnService']['binding'] = required_sso_binding if idp_slo_url is not None: data['idp']['singleLogoutService'] = {} data['idp']['singleLogoutService']['url'] = idp_slo_url data['idp']['singleLogoutService']['binding'] = required_slo_binding if certs is not None: if (len(certs) == 1 and (('signing' in certs and len(certs['signing']) == 1) or ('encryption' in certs and len(certs['encryption']) == 1))) or \ (('signing' in certs and len(certs['signing']) == 1) and ('encryption' in certs and len(certs['encryption']) == 1 and certs['signing'][0] == certs['encryption'][0])): if 'signing' in certs: data['idp']['x509cert'] = certs['signing'][0] else: data['idp']['x509cert'] = certs['encryption'][0] else: data['idp']['x509certMulti'] = certs if want_authn_requests_signed is not None: data['security'] = {} data['security']['authnRequestsSigned'] = want_authn_requests_signed if idp_name_id_format: data['sp'] = {} data['sp']['NameIDFormat'] = idp_name_id_format return data
Parse the Identity Provider metadata and return a dict with extracted data. If there are multiple <IDPSSODescriptor> tags, parse only the first. Parse only those SSO endpoints with the same binding as given by the `required_sso_binding` parameter. Parse only those SLO endpoints with the same binding as given by the `required_slo_binding` parameter. If the metadata specifies multiple SSO endpoints with the required binding, extract only the first (the same holds true for SLO endpoints). :param idp_metadata: XML of the Identity Provider Metadata. :type idp_metadata: string :param required_sso_binding: Parse only POST or REDIRECT SSO endpoints. :type required_sso_binding: one of OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT or OneLogin_Saml2_Constants.BINDING_HTTP_POST :param required_slo_binding: Parse only POST or REDIRECT SLO endpoints. :type required_slo_binding: one of OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT or OneLogin_Saml2_Constants.BINDING_HTTP_POST :param entity_id: Specify the entity_id of the EntityDescriptor that you want to parse a XML that contains multiple EntityDescriptor. :type entity_id: string :returns: settings dict with extracted data :rtype: dict
def create(self, request): """Creates a new wiki page for the specified PullRequest instance. The page gets initialized with basic information about the pull request, the tests that will be run, etc. Returns the URL on the wiki. :arg request: the PullRequest instance with testing information. """ self._site_login(request.repo) self.prefix = "{}_Pull_Request_{}".format(request.repo.name, request.pull.number) #We add the link to the main repo page during this creation; we also create #the full unit test report page here. self._edit_main(request) return self._create_new(request)
Creates a new wiki page for the specified PullRequest instance. The page gets initialized with basic information about the pull request, the tests that will be run, etc. Returns the URL on the wiki. :arg request: the PullRequest instance with testing information.
def split_comp_info(self, catalog_name, split_ver, split_key): """ Return the info for a particular split key """ return self._split_comp_info_dicts["%s_%s" % (catalog_name, split_ver)][split_key]
Return the info for a particular split key
def _identity_stmt(self, stmt: Statement, sctx: SchemaContext) -> None: """Handle identity statement.""" if not sctx.schema_data.if_features(stmt, sctx.text_mid): return id = (stmt.argument, sctx.schema_data.namespace(sctx.text_mid)) adj = sctx.schema_data.identity_adjs.setdefault(id, IdentityAdjacency()) for bst in stmt.find_all("base"): bid = sctx.schema_data.translate_pname(bst.argument, sctx.text_mid) adj.bases.add(bid) badj = sctx.schema_data.identity_adjs.setdefault( bid, IdentityAdjacency()) badj.derivs.add(id) sctx.schema_data.identity_adjs[id] = adj
Handle identity statement.
def build_tqdm_outer(self, desc, total): """ Extension point. Override to provide custom options to outer progress bars (Epoch loop) :param desc: Description :param total: Number of epochs :return: new progress bar """ return self.tqdm(desc=desc, total=total, leave=self.leave_outer, initial=self.initial)
Extension point. Override to provide custom options to outer progress bars (Epoch loop) :param desc: Description :param total: Number of epochs :return: new progress bar
def render_view(parser, token): """ Return an string version of a View with as_string method. First argument is the name of the view. Any other arguments should be keyword arguments and will be passed to the view. Example: {% render_view viewname var1=xx var2=yy %} """ bits = token.split_contents() n = len(bits) if n < 2: raise TemplateSyntaxError("'%s' takes at least one view as argument") viewname = bits[1] kwargs = {} if n > 2: for bit in bits[2:]: match = kwarg_re.match(bit) if not match: raise TemplateSyntaxError("Malformed arguments to render_view tag") name, value = match.groups() if name: kwargs[name] = parser.compile_filter(value) return StringNode(viewname, kwargs)
Return an string version of a View with as_string method. First argument is the name of the view. Any other arguments should be keyword arguments and will be passed to the view. Example: {% render_view viewname var1=xx var2=yy %}
def _set_attribute(self, name, value): """Make sure namespace gets updated when setting attributes.""" setattr(self, name, value) self.namespace.update({name: getattr(self, name)})
Make sure namespace gets updated when setting attributes.
def _cont_norm(fluxes, ivars, cont): """ Continuum-normalize a continuous segment of spectra. Parameters ---------- fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes contmask: boolean mask True indicates that pixel is continuum Returns ------- norm_fluxes: numpy ndarray normalized pixel intensities norm_ivars: numpy ndarray rescaled inverse variances """ nstars = fluxes.shape[0] npixels = fluxes.shape[1] norm_fluxes = np.ones(fluxes.shape) norm_ivars = np.zeros(ivars.shape) bad = cont == 0. norm_fluxes = np.ones(fluxes.shape) norm_fluxes[~bad] = fluxes[~bad] / cont[~bad] norm_ivars = cont**2 * ivars return norm_fluxes, norm_ivars
Continuum-normalize a continuous segment of spectra. Parameters ---------- fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes contmask: boolean mask True indicates that pixel is continuum Returns ------- norm_fluxes: numpy ndarray normalized pixel intensities norm_ivars: numpy ndarray rescaled inverse variances
def upload_plugin(self, plugin_path): """ Provide plugin path for upload into Jira e.g. useful for auto deploy :param plugin_path: :return: """ files = { 'plugin': open(plugin_path, 'rb') } headers = { 'X-Atlassian-Token': 'nocheck' } upm_token = self.request(method='GET', path='rest/plugins/1.0/', headers=headers, trailing=True).headers[ 'upm-token'] url = 'rest/plugins/1.0/?token={upm_token}'.format(upm_token=upm_token) return self.post(url, files=files, headers=headers)
Provide plugin path for upload into Jira e.g. useful for auto deploy :param plugin_path: :return:
def infer(self, sensationList, reset=True, objectName=None): """ Infer on given sensations. The provided sensationList is a list of sensations, and each sensation is a mapping from cortical column to a tuple of two SDR's respectively corresponding to the location in object space and the feature. For example, the input can look as follows, if we are inferring a simple object with two sensations (with very few active bits for simplicity): sensationList = [ { 0: (set([1, 5, 10]), set([6, 12, 52]), # location, feature for CC0 1: (set([6, 2, 15]), set([64, 1, 5]), # location, feature for CC1 }, { 0: (set([5, 46, 50]), set([8, 10, 11]), # location, feature for CC0 1: (set([1, 6, 45]), set([12, 17, 23]), # location, feature for CC1 }, ] In many uses cases, this object can be created by implementations of ObjectMachines (cf htm.research.object_machine_factory), through their method providedObjectsToInfer. If the object is known by the caller, an object name can be specified as an optional argument, and must match the objects given while learning. Parameters: ---------------------------- @param sensationList (list) List of sensations, in the canonical format specified above @param reset (bool) If set to True (which is the default value), the network will be reset after learning. @param objectName (str) Name of the objects (must match the names given during learning). """ self._unsetLearningMode() statistics = collections.defaultdict(list) for sensations in sensationList: # feed all columns with sensations for col in xrange(self.numColumns): location, feature = sensations[col] self.sensorInputs[col].addDataToQueue(list(feature), 0, 0) self.externalInputs[col].addDataToQueue(list(location), 0, 0) self.network.run(1) self._updateInferenceStats(statistics, objectName) if reset: # send reset signal self._sendReset() # save statistics statistics["numSteps"] = len(sensationList) statistics["object"] = objectName if objectName is not None else "Unknown" self.statistics.append(statistics)
Infer on given sensations. The provided sensationList is a list of sensations, and each sensation is a mapping from cortical column to a tuple of two SDR's respectively corresponding to the location in object space and the feature. For example, the input can look as follows, if we are inferring a simple object with two sensations (with very few active bits for simplicity): sensationList = [ { 0: (set([1, 5, 10]), set([6, 12, 52]), # location, feature for CC0 1: (set([6, 2, 15]), set([64, 1, 5]), # location, feature for CC1 }, { 0: (set([5, 46, 50]), set([8, 10, 11]), # location, feature for CC0 1: (set([1, 6, 45]), set([12, 17, 23]), # location, feature for CC1 }, ] In many uses cases, this object can be created by implementations of ObjectMachines (cf htm.research.object_machine_factory), through their method providedObjectsToInfer. If the object is known by the caller, an object name can be specified as an optional argument, and must match the objects given while learning. Parameters: ---------------------------- @param sensationList (list) List of sensations, in the canonical format specified above @param reset (bool) If set to True (which is the default value), the network will be reset after learning. @param objectName (str) Name of the objects (must match the names given during learning).
def is_authorization_expired(self): """Checks if the authorization token (access_token) has expired. :return: If expired. :rtype: ``bool`` """ if not self.auth.token_expiration: return True return (datetime.datetime.utcnow() > self.auth.token_expiration)
Checks if the authorization token (access_token) has expired. :return: If expired. :rtype: ``bool``
def purgeCache(self, *args, **kwargs): """ Purge Worker Cache Publish a purge-cache message to purge caches named `cacheName` with `provisionerId` and `workerType` in the routing-key. Workers should be listening for this message and purge caches when they see it. This method takes input: ``v1/purge-cache-request.json#`` This method is ``stable`` """ return self._makeApiCall(self.funcinfo["purgeCache"], *args, **kwargs)
Purge Worker Cache Publish a purge-cache message to purge caches named `cacheName` with `provisionerId` and `workerType` in the routing-key. Workers should be listening for this message and purge caches when they see it. This method takes input: ``v1/purge-cache-request.json#`` This method is ``stable``
def safe_main(): """A safe version of the main function (that catches ProgramError).""" try: main() except KeyboardInterrupt: logger.info("Cancelled by user") sys.exit(0) except ProgramError as e: logger.error(e.message) parser.error(e.message)
A safe version of the main function (that catches ProgramError).
async def stdout_writer(): """ This is a bit complex, as stdout can be a pipe or a file. If it is a file, we cannot use :meth:`asycnio.BaseEventLoop.connect_write_pipe`. """ if sys.stdout.seekable(): # it’s a file return sys.stdout.buffer.raw if os.isatty(sys.stdin.fileno()): # it’s a tty, use fd 0 fd_to_use = 0 else: fd_to_use = 1 twrite, pwrite = await loop.connect_write_pipe( asyncio.streams.FlowControlMixin, os.fdopen(fd_to_use, "wb"), ) swrite = asyncio.StreamWriter( twrite, pwrite, None, loop, ) return swrite
This is a bit complex, as stdout can be a pipe or a file. If it is a file, we cannot use :meth:`asycnio.BaseEventLoop.connect_write_pipe`.
def generate_shared_access_signature(self, services, resource_types, permission, expiry, start=None, ip=None, protocol=None): ''' Generates a shared access signature for the account. Use the returned signature with the sas_token parameter of the service or to create a new account object. :param Services services: Specifies the services accessible with the account SAS. You can combine values to provide access to more than one service. :param ResourceTypes resource_types: Specifies the resource types that are accessible with the account SAS. You can combine values to provide access to more than one resource type. :param AccountPermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. You can combine values to provide more than one permission. :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: datetime or str :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type start: datetime or str :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. Note that HTTP only is not a permitted value. ''' _validate_not_none('self.account_name', self.account_name) _validate_not_none('self.account_key', self.account_key) sas = SharedAccessSignature(self.account_name, self.account_key) return sas.generate_account(services, resource_types, permission, expiry, start=start, ip=ip, protocol=protocol)
Generates a shared access signature for the account. Use the returned signature with the sas_token parameter of the service or to create a new account object. :param Services services: Specifies the services accessible with the account SAS. You can combine values to provide access to more than one service. :param ResourceTypes resource_types: Specifies the resource types that are accessible with the account SAS. You can combine values to provide access to more than one resource type. :param AccountPermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. You can combine values to provide more than one permission. :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: datetime or str :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type start: datetime or str :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. Note that HTTP only is not a permitted value.
def _handle_backend_error(self, exception, idp): """ See super class satosa.frontends.base.FrontendModule :type exception: satosa.exception.SATOSAAuthenticationError :type idp: saml.server.Server :rtype: satosa.response.Response :param exception: The SATOSAAuthenticationError :param idp: The saml frontend idp server :return: A response """ loaded_state = self.load_state(exception.state) relay_state = loaded_state["relay_state"] resp_args = loaded_state["resp_args"] error_resp = idp.create_error_response(resp_args["in_response_to"], resp_args["destination"], Exception(exception.message)) http_args = idp.apply_binding(resp_args["binding"], str(error_resp), resp_args["destination"], relay_state, response=True) satosa_logging(logger, logging.DEBUG, "HTTPargs: %s" % http_args, exception.state) return make_saml_response(resp_args["binding"], http_args)
See super class satosa.frontends.base.FrontendModule :type exception: satosa.exception.SATOSAAuthenticationError :type idp: saml.server.Server :rtype: satosa.response.Response :param exception: The SATOSAAuthenticationError :param idp: The saml frontend idp server :return: A response
def prehook(self, **kwargs): """Launch local smpd.""" cmd = ['smpd', '-s'] logger.info("Starting smpd: "+" ".join(cmd)) rc = subprocess.call(cmd) return rc
Launch local smpd.
def _preprocess(self, valid_features=["pcp", "tonnetz", "mfcc", "cqt", "tempogram"]): """This method obtains the actual features.""" # Use specific feature if self.feature_str not in valid_features: raise RuntimeError("Feature %s in not valid for algorithm: %s " "(valid features are %s)." % (self.feature_str, __name__, valid_features)) else: try: F = self.features.features except KeyError: raise RuntimeError("Feature %s in not supported by MSAF" % (self.feature_str)) return F
This method obtains the actual features.
def load_file_contents(cls, file_contents, seed_values=None): """Loads config from the given string payloads. A handful of seed values will be set to act as if specified in the loaded config file's DEFAULT section, and be available for use in substitutions. The caller may override some of these seed values. :param list[FileContents] file_contents: Load from these FileContents. Later instances take precedence over earlier ones. If empty, returns an empty config. :param seed_values: A dict with optional override seed values for buildroot, pants_workdir, pants_supportdir and pants_distdir. """ @contextmanager def opener(file_content): with io.BytesIO(file_content.content) as fh: yield fh return cls._meta_load(opener, file_contents, seed_values)
Loads config from the given string payloads. A handful of seed values will be set to act as if specified in the loaded config file's DEFAULT section, and be available for use in substitutions. The caller may override some of these seed values. :param list[FileContents] file_contents: Load from these FileContents. Later instances take precedence over earlier ones. If empty, returns an empty config. :param seed_values: A dict with optional override seed values for buildroot, pants_workdir, pants_supportdir and pants_distdir.
def init_map(self): """ Add markers, polys, callouts, etc..""" d = self.declaration if d.show_location: self.set_show_location(d.show_location) if d.show_traffic: self.set_show_traffic(d.show_traffic) if d.show_indoors: self.set_show_indoors(d.show_indoors) if d.show_buildings: self.set_show_buildings(d.show_buildings) #: Local ref access is faster mapview = self.map mid = mapview.getId() #: Connect signals #: Camera mapview.onCameraChange.connect(self.on_camera_changed) mapview.onCameraMoveStarted.connect(self.on_camera_move_started) mapview.onCameraMoveCanceled.connect(self.on_camera_move_stopped) mapview.onCameraIdle.connect(self.on_camera_move_stopped) mapview.setOnCameraChangeListener(mid) mapview.setOnCameraMoveStartedListener(mid) mapview.setOnCameraMoveCanceledListener(mid) mapview.setOnCameraIdleListener(mid) #: Clicks mapview.onMapClick.connect(self.on_map_clicked) mapview.setOnMapClickListener(mid) mapview.onMapLongClick.connect(self.on_map_long_clicked) mapview.setOnMapLongClickListener(mid) #: Markers mapview.onMarkerClick.connect(self.on_marker_clicked) mapview.setOnMarkerClickListener(self.map.getId()) mapview.onMarkerDragStart.connect(self.on_marker_drag_start) mapview.onMarkerDrag.connect(self.on_marker_drag) mapview.onMarkerDragEnd.connect(self.on_marker_drag_end) mapview.setOnMarkerDragListener(mid) #: Info window mapview.onInfoWindowClick.connect(self.on_info_window_clicked) mapview.onInfoWindowLongClick.connect(self.on_info_window_long_clicked) mapview.onInfoWindowClose.connect(self.on_info_window_closed) mapview.setOnInfoWindowClickListener(mid) mapview.setOnInfoWindowCloseListener(mid) mapview.setOnInfoWindowLongClickListener(mid) #: Polys mapview.onPolygonClick.connect(self.on_poly_clicked) mapview.onPolylineClick.connect(self.on_poly_clicked) mapview.setOnPolygonClickListener(mid) mapview.setOnPolylineClickListener(mid) #: Circle mapview.onCircleClick.connect(self.on_circle_clicked) mapview.setOnCircleClickListener(mid)
Add markers, polys, callouts, etc..
def _open_ds_from_store(fname, store_mod=None, store_cls=None, **kwargs): """Open a dataset and return it""" if isinstance(fname, xr.Dataset): return fname if not isstring(fname): try: # test iterable fname[0] except TypeError: pass else: if store_mod is not None and store_cls is not None: if isstring(store_mod): store_mod = repeat(store_mod) if isstring(store_cls): store_cls = repeat(store_cls) fname = [_open_store(sm, sc, f) for sm, sc, f in zip(store_mod, store_cls, fname)] kwargs['engine'] = None kwargs['lock'] = False return open_mfdataset(fname, **kwargs) if store_mod is not None and store_cls is not None: fname = _open_store(store_mod, store_cls, fname) return open_dataset(fname, **kwargs)
Open a dataset and return it
def convert_user_type(cls, name, value): """ Converts a user type to RECORD that contains n fields, where n is the number of attributes. Each element in the user type class will be converted to its corresponding data type in BQ. """ names = value._fields values = [cls.convert_value(name, getattr(value, name)) for name in names] return cls.generate_data_dict(names, values)
Converts a user type to RECORD that contains n fields, where n is the number of attributes. Each element in the user type class will be converted to its corresponding data type in BQ.
def merge_offsets_metadata(topics, *offsets_responses): """Merge the offset metadata dictionaries from multiple responses. :param topics: list of topics :param offsets_responses: list of dict topic: partition: offset :returns: dict topic: partition: offset """ result = dict() for topic in topics: partition_offsets = [ response[topic] for response in offsets_responses if topic in response ] result[topic] = merge_partition_offsets(*partition_offsets) return result
Merge the offset metadata dictionaries from multiple responses. :param topics: list of topics :param offsets_responses: list of dict topic: partition: offset :returns: dict topic: partition: offset
def _buildTemplates(self): """ OVERRIDING THIS METHOD from Factory """ # INDEX - MAIN PAGE contents = self._renderTemplate("html-multi/index.html", extraContext={"theme": self.theme, "index_page_flag" : True}) FILE_NAME = "index.html" main_url = self._save2File(contents, FILE_NAME, self.output_path) # DASHBOARD contents = self._renderTemplate("html-multi/statistics.html", extraContext={"theme": self.theme}) FILE_NAME = "statistics.html" self._save2File(contents, FILE_NAME, self.output_path) # VIZ LIST if False: contents = self._renderTemplate("html-multi/viz_list.html", extraContext={"theme": self.theme}) FILE_NAME = "visualizations.html" self._save2File(contents, FILE_NAME, self.output_path) browser_output_path = self.output_path # ENTITIES A-Z extra_context = {"ontograph": self.ontospy_graph, "theme": self.theme} contents = self._renderTemplate("html-multi/browser/browser_entities_az.html", extraContext=extra_context) FILE_NAME = "entities-az.html" self._save2File(contents, FILE_NAME, browser_output_path) if self.ontospy_graph.all_classes: # CLASSES = ENTITIES TREE extra_context = {"ontograph": self.ontospy_graph, "theme": self.theme, "treetype" : "classes", 'treeTable' : formatHTML_EntityTreeTable(self.ontospy_graph.ontologyClassTree())} contents = self._renderTemplate("html-multi/browser/browser_entities_tree.html", extraContext=extra_context) FILE_NAME = "entities-tree-classes.html" self._save2File(contents, FILE_NAME, browser_output_path) # BROWSER PAGES - CLASSES ====== for entity in self.ontospy_graph.all_classes: extra_context = {"main_entity": entity, "main_entity_type": "class", "theme": self.theme, "ontograph": self.ontospy_graph } extra_context.update(self.highlight_code(entity)) contents = self._renderTemplate("html-multi/browser/browser_classinfo.html", extraContext=extra_context) FILE_NAME = entity.slug + ".html" self._save2File(contents, FILE_NAME, browser_output_path) if self.ontospy_graph.all_properties: # PROPERTIES = ENTITIES TREE extra_context = {"ontograph": self.ontospy_graph, "theme": self.theme, "treetype" : "properties", 'treeTable' : formatHTML_EntityTreeTable(self.ontospy_graph.ontologyPropTree())} contents = self._renderTemplate("html-multi/browser/browser_entities_tree.html", extraContext=extra_context) FILE_NAME = "entities-tree-properties.html" self._save2File(contents, FILE_NAME, browser_output_path) # BROWSER PAGES - PROPERTIES ====== for entity in self.ontospy_graph.all_properties: extra_context = {"main_entity": entity, "main_entity_type": "property", "theme": self.theme, "ontograph": self.ontospy_graph } extra_context.update(self.highlight_code(entity)) contents = self._renderTemplate("html-multi/browser/browser_propinfo.html", extraContext=extra_context) FILE_NAME = entity.slug + ".html" self._save2File(contents, FILE_NAME, browser_output_path) if self.ontospy_graph.all_skos_concepts: # CONCEPTS = ENTITIES TREE extra_context = {"ontograph": self.ontospy_graph, "theme": self.theme, "treetype" : "concepts", 'treeTable' : formatHTML_EntityTreeTable(self.ontospy_graph.ontologyConceptTree())} contents = self._renderTemplate("html-multi/browser/browser_entities_tree.html", extraContext=extra_context) FILE_NAME = "entities-tree-concepts.html" self._save2File(contents, FILE_NAME, browser_output_path) # BROWSER PAGES - CONCEPTS ====== for entity in self.ontospy_graph.all_skos_concepts: extra_context = {"main_entity": entity, "main_entity_type": "concept", "theme": self.theme, "ontograph": self.ontospy_graph } extra_context.update(self.highlight_code(entity)) contents = self._renderTemplate("html-multi/browser/browser_conceptinfo.html", extraContext=extra_context) FILE_NAME = entity.slug + ".html" self._save2File(contents, FILE_NAME, browser_output_path) if self.ontospy_graph.all_shapes: # SHAPES = ENTITIES TREE extra_context = {"ontograph": self.ontospy_graph, "theme": self.theme, "treetype" : "shapes", 'treeTable' : formatHTML_EntityTreeTable(self.ontospy_graph.ontologyShapeTree()) } contents = self._renderTemplate("html-multi/browser/browser_entities_tree.html", extraContext=extra_context) FILE_NAME = "entities-tree-shapes.html" self._save2File(contents, FILE_NAME, browser_output_path) # BROWSER PAGES - SHAPES ====== for entity in self.ontospy_graph.all_shapes: extra_context = {"main_entity": entity, "main_entity_type": "shape", "theme": self.theme, "ontograph": self.ontospy_graph } extra_context.update(self.highlight_code(entity)) contents = self._renderTemplate("html-multi/browser/browser_shapeinfo.html", extraContext=extra_context) FILE_NAME = entity.slug + ".html" self._save2File(contents, FILE_NAME, browser_output_path) return main_url
OVERRIDING THIS METHOD from Factory
def create_surface_grid(nr_electrodes=None, spacing=None, electrodes_x=None, depth=None, left=None, right=None, char_lengths=None, lines=None, debug=False, workdir=None): """This is a simple wrapper for cr_trig_create to create simple surface grids. Automatically generated electrode positions are rounded to the third digit. Parameters ---------- nr_electrodes: int, optional the number of surface electrodes spacing: float, optional the spacing between electrodes, usually in [m], required if nr of electrodes is given electrodes_x: array, optional x-electrode positions can be provided here, e.g., for non-equidistant electrode distances depth: float, optional the depth of the grid. If not given, this is computed as half the maximum distance between electrodes left: float, optional the space allocated left of the first electrode. If not given, compute as a fourth of the maximum inter-electrode distance right: float, optional the space allocated right of the first electrode. If not given, compute as a fourth of the maximum inter-electrode distance char_lengths: float|list of 4 floats, optional characteristic lengths, as used by cr_trig_create lines: list of floats, optional at the given depths, add horizontal lines in the grid. Note that all positive values will be multiplied by -1! debug: bool, optional default: False. If true, don't hide the output of cr_trig_create workdir: string, optional if set, use this directory to create the grid. Don't delete files afterwards. Returns ------- grid: :class:`crtomo.grid.crt_grid` instance the generated grid Examples -------- >>> from crtomo.grid import crt_grid >>> grid = crt_grid.create_surface_grid(40, spacing=0.25, depth=5, ... left=2, right=2, char_lengths=[0.1, 0.5, 0.1, 0.5], ... lines=[0.4, 0.8], debug=False, workdir=None) >>> import pylab as plt >>> fig, ax = plt.subplots() >>> grid.plot_grid_to_ax(ax) """ # check if all required information are present if(electrodes_x is None and (nr_electrodes is None or spacing is None)): raise Exception( 'You must provide either the parameter "electrodes_" or ' + 'the parameters "nr_electrodes" AND "spacing"' ) if electrodes_x is None: electrodes = np.array( [(x, 0.0) for x in np.arange(0.0, nr_electrodes)] ) electrodes[:, 0] = electrodes[:, 0] * spacing electrodes = np.round(electrodes, 3) else: nr_electrodes = len(electrodes_x) electrodes = np.hstack((electrodes_x, np.zeros_like(electrodes_x))) max_distance = np.abs( np.max(electrodes[:, 0]) - np.min(electrodes[:, 0]) ) minx = electrodes[:, 0].min() maxx = electrodes[:, 0].max() if left is None: left = max_distance / 4 if right is None: right = max_distance / 4 if depth is None: depth = max_distance / 2 # min/max coordinates of final grid minimum_x = minx - left maximum_x = maxx + left minimum_z = -depth maximum_z = 0 boundary_noflow = 11 boundary_mixed = 12 # prepare extra lines extra_lines = [] add_boundary_nodes_left = [] add_boundary_nodes_right = [] if lines is not None: lines = np.array(lines) lines[np.where(np.array(lines) < 0)] *= -1 lines = sorted(lines) for line_depth in lines: extra_lines.append( (minimum_x, -line_depth, maximum_x, -line_depth) ) add_boundary_nodes_left.append( (minimum_x, -line_depth, boundary_mixed) ) add_boundary_nodes_right.append( (maximum_x, -line_depth, boundary_mixed) ) # reverse direction of right nodes add_boundary_nodes_left = np.array(add_boundary_nodes_left)[::-1] add_boundary_nodes_right = np.array(add_boundary_nodes_right) surface_electrodes = np.hstack(( electrodes, boundary_noflow * np.ones((electrodes.shape[0], 1)) )) # import IPython # IPython.embed() boundaries = np.vstack(( (minimum_x, 0, boundary_noflow), surface_electrodes, (maximum_x, maximum_z, boundary_mixed), )) if len(add_boundary_nodes_right) != 0: boundaries = np.vstack(( boundaries, add_boundary_nodes_right, )) boundaries = np.vstack(( boundaries, (maximum_x, minimum_z, boundary_mixed), (minimum_x, minimum_z, boundary_mixed), )) if len(add_boundary_nodes_left) != 0: boundaries = np.vstack( ( add_boundary_nodes_left, ) ) if char_lengths is None: char_lengths = [spacing / 3.0, ] if workdir is None: tempdir_obj = tempfile.TemporaryDirectory() tempdir = tempdir_obj.name else: if not os.path.isdir(workdir): os.makedirs(workdir) tempdir = workdir np.savetxt( tempdir + os.sep + 'electrodes.dat', electrodes, fmt='%.3f %.3f' ) np.savetxt(tempdir + os.sep + 'boundaries.dat', boundaries, fmt='%.4f %.4f %i') np.savetxt( tempdir + os.sep + 'char_length.dat', np.atleast_1d(char_lengths) ) if extra_lines: np.savetxt( tempdir + os.sep + 'extra_lines.dat', np.atleast_2d(extra_lines), fmt='%.4f %.4f %.4f %.4f' ) pwd = os.getcwd() os.chdir(tempdir) try: if debug: subprocess.call( 'cr_trig_create grid', shell=True, ) else: subprocess.check_output( 'cr_trig_create grid', shell=True, # stdout=subprocess.STDOUT, # stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError as e: print('there was an error generating the grid') print(e.returncode) print(e.output) import shutil shutil.copytree(tempdir, pwd + os.sep + 'GRID_FAIL') exit() finally: os.chdir(pwd) grid = crt_grid( elem_file=tempdir + os.sep + 'grid' + os.sep + 'elem.dat', elec_file=tempdir + os.sep + 'grid' + os.sep + 'elec.dat', ) if workdir is None: tempdir_obj.cleanup() return grid
This is a simple wrapper for cr_trig_create to create simple surface grids. Automatically generated electrode positions are rounded to the third digit. Parameters ---------- nr_electrodes: int, optional the number of surface electrodes spacing: float, optional the spacing between electrodes, usually in [m], required if nr of electrodes is given electrodes_x: array, optional x-electrode positions can be provided here, e.g., for non-equidistant electrode distances depth: float, optional the depth of the grid. If not given, this is computed as half the maximum distance between electrodes left: float, optional the space allocated left of the first electrode. If not given, compute as a fourth of the maximum inter-electrode distance right: float, optional the space allocated right of the first electrode. If not given, compute as a fourth of the maximum inter-electrode distance char_lengths: float|list of 4 floats, optional characteristic lengths, as used by cr_trig_create lines: list of floats, optional at the given depths, add horizontal lines in the grid. Note that all positive values will be multiplied by -1! debug: bool, optional default: False. If true, don't hide the output of cr_trig_create workdir: string, optional if set, use this directory to create the grid. Don't delete files afterwards. Returns ------- grid: :class:`crtomo.grid.crt_grid` instance the generated grid Examples -------- >>> from crtomo.grid import crt_grid >>> grid = crt_grid.create_surface_grid(40, spacing=0.25, depth=5, ... left=2, right=2, char_lengths=[0.1, 0.5, 0.1, 0.5], ... lines=[0.4, 0.8], debug=False, workdir=None) >>> import pylab as plt >>> fig, ax = plt.subplots() >>> grid.plot_grid_to_ax(ax)
def _handle_http_error(self, url, response_obj, status_code, psp_ref, raw_request, raw_response, headers, message): """This function handles the non 200 responses from Adyen, raising an error that should provide more information. Args: url (str): url of the request response_obj (dict): Dict containing the parsed JSON response from Adyen status_code (int): HTTP status code of the request psp_ref (str): Psp reference of the request attempt raw_request (str): The raw request placed to Adyen raw_response (str): The raw response(body) returned by Adyen headers(dict): headers of the response Returns: None """ if status_code == 404: if url == self.merchant_specific_url: erstr = "Received a 404 for url:'{}'. Please ensure that" \ " the custom merchant specific url is correct" \ .format(url) raise AdyenAPICommunicationError(erstr, error_code=response_obj.get( "errorCode")) else: erstr = "Unexpected error while communicating with Adyen." \ " Please reach out to support@adyen.com" \ " if the problem persists" raise AdyenAPICommunicationError(erstr, raw_request=raw_request, raw_response=raw_response, url=url, psp=psp_ref, headers=headers, error_code=response_obj.get( "errorCode")) elif status_code == 400: erstr = "Received validation error with errorCode: %s," \ " message: %s, HTTP Code: %s. Please verify" \ " the values provided. Please reach out" \ " to support@adyen.com if the problem persists," \ " providing the PSP reference: %s" % ( response_obj["errorCode"], response_obj["message"], status_code, psp_ref) raise AdyenAPIValidationError(erstr, error_code=response_obj.get( "errorCode")) elif status_code == 401: erstr = "Unable to authenticate with Adyen's Servers." \ " Please verify the credentials set with the Adyen base" \ " class. Please reach out to your Adyen Admin" \ " if the problem persists" raise AdyenAPIAuthenticationError(erstr, error_code=response_obj.get( "errorCode")) elif status_code == 403: if response_obj.get("message") == "Invalid Merchant Account": erstr = ("You provided the merchant account:'%s' that" " doesn't exist or you don't have access to it.\n" "Please verify the merchant account provided. \n" "Reach out to support@adyen.com" " if the issue persists") \ % raw_request['merchantAccount'] raise AdyenAPIInvalidPermission(erstr, error_code=response_obj.get( "errorCode")) erstr = "Unable to perform the requested action. message: %s." \ " If you think your webservice user: %s might not have" \ " the necessary permissions to perform this request." \ " Please reach out to support@adyen.com, providing" \ " the PSP reference: %s" % ( response_obj["message"], self.username, psp_ref) raise AdyenAPIInvalidPermission(erstr, error_code=response_obj.get( "errorCode")) elif status_code == 422: if response_obj.get("message") == "Invalid amount specified": raise AdyenAPIInvalidAmount( "Invalid amount specified" "Amount may be improperly formatted, too small or too big." "If the issue persists, contact support@adyen.com", error_code=response_obj.get("errorCode")) elif status_code == 500: if response_obj.get("errorType") == "validation": err_args = (response_obj.get("errorCode"), response_obj.get("message"), status_code) erstr = "Received validation error with errorCode: %s," \ " message: %s, HTTP Code: %s. Please verify" \ " the values provided." % err_args raise AdyenAPIValidationError(erstr, error_code=response_obj.get( "errorCode")) if response_obj.get("message") == "Failed to serialize node " \ "Failed to parse [123.34]" \ " as a Long": raise AdyenAPIInvalidFormat( "The payment amount must be set in cents," " and can not contain commas or points.", error_code=response_obj.get("errorCode") ) else: raise AdyenAPICommunicationError( "Unexpected error while communicating with Adyen. Received the" " response data:'{}', HTTP Code:'{}'. Please reach out to " "support@adyen.com if the problem persists" " with the psp:{}".format(raw_response, status_code, psp_ref), status_code=status_code, raw_request=raw_request, raw_response=raw_response, url=url, psp=psp_ref, headers=headers, error_code=response_obj.get("errorCode"))
This function handles the non 200 responses from Adyen, raising an error that should provide more information. Args: url (str): url of the request response_obj (dict): Dict containing the parsed JSON response from Adyen status_code (int): HTTP status code of the request psp_ref (str): Psp reference of the request attempt raw_request (str): The raw request placed to Adyen raw_response (str): The raw response(body) returned by Adyen headers(dict): headers of the response Returns: None
def intersect_leaderboards(self, destination, keys, aggregate='SUM'): ''' Intersect leaderboards given by keys with this leaderboard into a named destination leaderboard. @param destination [String] Destination leaderboard name. @param keys [Array] Leaderboards to be merged with the current leaderboard. @param options [Hash] Options for intersecting the leaderboards. ''' keys.insert(0, self.leaderboard_name) self.redis_connection.zinterstore(destination, keys, aggregate)
Intersect leaderboards given by keys with this leaderboard into a named destination leaderboard. @param destination [String] Destination leaderboard name. @param keys [Array] Leaderboards to be merged with the current leaderboard. @param options [Hash] Options for intersecting the leaderboards.
def _arg(self, line): '''singularity doesn't have support for ARG, so instead will issue a warning to the console for the user to export the variable with SINGULARITY prefixed at build. Parameters ========== line: the line from the recipe file to parse for ARG ''' line = self._setup('ARG', line) bot.warning("ARG is not supported for Singularity! To get %s" %line[0]) bot.warning("in the container, on host export SINGULARITY_%s" %line[0])
singularity doesn't have support for ARG, so instead will issue a warning to the console for the user to export the variable with SINGULARITY prefixed at build. Parameters ========== line: the line from the recipe file to parse for ARG
def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's package database (not generally recommended). CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' errors = [] ret = set([]) pkgs = {} cmd = 'dpkg -l {0}'.format(' '.join(packages)) out = __salt__['cmd.run_all'](cmd, python_shell=False) if out['retcode'] != 0: msg = 'Error: ' + out['stderr'] log.error(msg) return msg out = out['stdout'] for line in out.splitlines(): if line.startswith('ii '): comps = line.split() pkgs[comps[1]] = {'version': comps[2], 'description': ' '.join(comps[3:])} if 'No packages found' in line: errors.append(line) for pkg in pkgs: files = [] cmd = 'dpkg -L {0}'.format(pkg) for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines(): files.append(line) fileset = set(files) ret = ret.union(fileset) return {'errors': errors, 'files': list(ret)}
List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's package database (not generally recommended). CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list
def insertRnaQuantificationSet(self, rnaQuantificationSet): """ Inserts a the specified rnaQuantificationSet into this repository. """ try: models.Rnaquantificationset.create( id=rnaQuantificationSet.getId(), datasetid=rnaQuantificationSet.getParentContainer().getId(), referencesetid=rnaQuantificationSet.getReferenceSet().getId(), name=rnaQuantificationSet.getLocalId(), dataurl=rnaQuantificationSet.getDataUrl(), attributes=json.dumps(rnaQuantificationSet.getAttributes())) except Exception: raise exceptions.DuplicateNameException( rnaQuantificationSet.getLocalId(), rnaQuantificationSet.getParentContainer().getLocalId())
Inserts a the specified rnaQuantificationSet into this repository.
async def main() -> None: """Create the aiohttp session and run the example.""" logging.basicConfig(level=logging.INFO) async with ClientSession() as websession: try: client = Client(websession) await client.profile.login('<EMAIL>', '<PASSWORD>') _LOGGER.info('Account ID: %s', client.profile.account_id) summary = await client.profile.summary() _LOGGER.info('Account Summary: %s', summary) packages = await client.profile.packages() _LOGGER.info('Package Summary: %s', packages) except SeventeenTrackError as err: print(err)
Create the aiohttp session and run the example.
def embedding_density( adata: AnnData, basis: str, *, groupby: Optional[str] = None, key_added: Optional[str] = None, components: Union[str, Sequence[str]] = None ): """Calculate the density of cells in an embedding (per condition) Gaussian kernel density estimation is used to calculate the density of cells in an embedded space. This can be performed per category over a categorical cell annotation. The cell density can be plotted using the `sc.pl.embedding_density()` function. Note that density values are scaled to be between 0 and 1. Thus, the density value at each cell is only comparable to other densities in the same condition category. This function was written by Sophie Tritschler and implemented into Scanpy by Malte Luecken. Parameters ---------- adata The annotated data matrix. basis The embedding over which the density will be calculated. This embedded representation should be found in `adata.obsm['X_[basis]']``. groupby Keys for categorical observation/cell annotation for which densities are calculated per category. Columns with up to ten categories are accepted. key_added Name of the `.obs` covariate that will be added with the density estimates. components The embedding dimensions over which the density should be calculated. This is limited to two components. Returns ------- Updates `adata.obs` with an additional field specified by the `key_added` parameter. This parameter defaults to `[basis]_density_[groupby]`, where where `[basis]` is one of `umap`, `diffmap`, `pca`, `tsne`, or `draw_graph_fa` and `[groupby]` denotes the parameter input. Updates `adata.uns` with an additional field `[key_added]_params`. Examples -------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.tl.umap(adata) >>> sc.tl.embedding_density(adata, basis='umap', groupby='phase') >>> sc.pl.embedding_density(adata, basis='umap', key='umap_density_phase', ... group='G1') >>> sc.pl.embedding_density(adata, basis='umap', key='umap_density_phase', ... group='S') """ sanitize_anndata(adata) # to ensure that newly created covariates are categorical to test for categoy numbers logg.info('computing density on \'{}\''.format(basis), r=True) # Test user inputs basis = basis.lower() if basis == 'fa': basis = 'draw_graph_fa' if 'X_'+basis not in adata.obsm_keys(): raise ValueError('Cannot find the embedded representation `adata.obsm[X_{!r}]`. ' 'Compute the embedding first.'.format(basis)) if components is None: components = '1,2' if isinstance(components, str): components = components.split(',') components = np.array(components).astype(int) - 1 if len(components) != 2: raise ValueError('Please specify exactly 2 components, or `None`.') if basis == 'diffmap': components += 1 if groupby is not None: if groupby not in adata.obs: raise ValueError('Could not find {!r} `.obs` column.'.format(groupby)) if adata.obs[groupby].dtype.name != 'category': raise ValueError('{!r} column does not contain Categorical data'.format(groupby)) if len(adata.obs[groupby].cat.categories) > 10: raise ValueError('More than 10 categories in {!r} column.'.format(groupby)) # Define new covariate name if key_added is not None: density_covariate = key_added elif groupby is not None: density_covariate = basis+'_density_'+groupby else: density_covariate = basis+'_density' # Calculate the densities over each category in the groupby column if groupby is not None: categories = adata.obs[groupby].cat.categories density_values = np.zeros(adata.n_obs) for cat in categories: cat_mask = adata.obs[groupby] == cat embed_x = adata.obsm['X_'+basis][cat_mask, components[0]] embed_y = adata.obsm['X_'+basis][cat_mask, components[1]] dens_embed = _calc_density(embed_x, embed_y) density_values[cat_mask] = dens_embed adata.obs[density_covariate] = density_values # Calculate the density over the whole embedding without subsetting else: #if groupby is None embed_x = adata.obsm['X_'+basis][:, components[0]] embed_y = adata.obsm['X_'+basis][:, components[1]] adata.obs[density_covariate] = _calc_density(embed_x, embed_y) # Reduce diffmap components for labeling # Note: plot_scatter takes care of correcting diffmap components for plotting automatically if basis != 'diffmap': components += 1 adata.uns[density_covariate+'_params'] = {'covariate':groupby, 'components':components.tolist()} logg.hint('added\n' ' \'{}\', densities (adata.obs)\n' ' \'{}_params\', parameter (adata.uns)'.format(density_covariate, density_covariate)) return None
Calculate the density of cells in an embedding (per condition) Gaussian kernel density estimation is used to calculate the density of cells in an embedded space. This can be performed per category over a categorical cell annotation. The cell density can be plotted using the `sc.pl.embedding_density()` function. Note that density values are scaled to be between 0 and 1. Thus, the density value at each cell is only comparable to other densities in the same condition category. This function was written by Sophie Tritschler and implemented into Scanpy by Malte Luecken. Parameters ---------- adata The annotated data matrix. basis The embedding over which the density will be calculated. This embedded representation should be found in `adata.obsm['X_[basis]']``. groupby Keys for categorical observation/cell annotation for which densities are calculated per category. Columns with up to ten categories are accepted. key_added Name of the `.obs` covariate that will be added with the density estimates. components The embedding dimensions over which the density should be calculated. This is limited to two components. Returns ------- Updates `adata.obs` with an additional field specified by the `key_added` parameter. This parameter defaults to `[basis]_density_[groupby]`, where where `[basis]` is one of `umap`, `diffmap`, `pca`, `tsne`, or `draw_graph_fa` and `[groupby]` denotes the parameter input. Updates `adata.uns` with an additional field `[key_added]_params`. Examples -------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.tl.umap(adata) >>> sc.tl.embedding_density(adata, basis='umap', groupby='phase') >>> sc.pl.embedding_density(adata, basis='umap', key='umap_density_phase', ... group='G1') >>> sc.pl.embedding_density(adata, basis='umap', key='umap_density_phase', ... group='S')
def build_docs(location="doc-source", target=None, library="icetea_lib"): """ Build documentation for Icetea. Start by autogenerating module documentation and finish by building html. :param location: Documentation source :param target: Documentation target path :param library: Library location for autodoc. :return: -1 if something fails. 0 if successfull. """ cmd_ar = ["sphinx-apidoc", "-o", location, library] try: print("Generating api docs.") retcode = check_call(cmd_ar) except CalledProcessError as error: print("Documentation build failed. Return code: {}".format(error.returncode)) return 3 except OSError as error: print(error) print("Documentation build failed. Are you missing Sphinx? Please install sphinx using " "'pip install sphinx'.") return 3 target = "doc{}html".format(os.sep) if target is None else target cmd_ar = ["sphinx-build", "-b", "html", location, target] try: print("Building html documentation.") retcode = check_call(cmd_ar) except CalledProcessError as error: print("Documentation build failed. Return code: {}".format(error.returncode)) return 3 except OSError as error: print(error) print("Documentation build failed. Are you missing Sphinx? Please install sphinx using " "'pip install sphinx'.") return 3 print("Documentation built.") return 0
Build documentation for Icetea. Start by autogenerating module documentation and finish by building html. :param location: Documentation source :param target: Documentation target path :param library: Library location for autodoc. :return: -1 if something fails. 0 if successfull.
def _gerritCmd(self, *args): '''Construct a command as a list of strings suitable for :func:`subprocess.call`. ''' if self.gerrit_identity_file is not None: options = ['-i', self.gerrit_identity_file] else: options = [] return ['ssh'] + options + [ '@'.join((self.gerrit_username, self.gerrit_server)), '-p', str(self.gerrit_port), 'gerrit' ] + list(args)
Construct a command as a list of strings suitable for :func:`subprocess.call`.
def analyze_bash_vars(job_input_file, job_homedir): ''' This function examines the input file, and calculates variables to instantiate in the shell environment. It is called right before starting the execution of an app in a worker. For each input key, we want to have $var $var_filename $var_prefix remove last dot (+gz), and/or remove patterns $var_path $HOME/in/var/$var_filename For example, $HOME/in/genes/A.txt B.txt export genes=('{"$dnanexus_link": "file-xxxx"}' '{"$dnanexus_link": "file-yyyy"}') export genes_filename=("A.txt" "B.txt") export genes_prefix=("A" "B") export genes_path=("$HOME/in/genes/A.txt" "$HOME/in/genes/B.txt") If there are patterns defined in the input spec, then the prefix respects them. Here are several examples, where the patterns are: *.bam, *.bwa-index.tar.gz, foo*.sam, z*ra.sam file name prefix matches foo.zed.bam foo.zed *.bam xxx.bwa-index.tar.gz xxx *.bwa-index.tar.gz food.sam food foo*.sam zebra.sam zebra z*ra.sam xx.c xx xx.c.gz xx The only patterns we recognize are of the form x*.y. For example: legal *.sam, *.c.py, foo*.sam, a*b*c.baz ignored uu.txt x???.tar mon[a-z].py ''' _, file_entries, rest_hash = get_job_input_filenames(job_input_file) patterns_dict = get_input_spec_patterns() # Note: there may be multiple matches, choose the shortest prefix. def get_prefix(basename, key): best_prefix = None patterns = patterns_dict.get(key) if patterns is not None: for pattern in patterns: if fnmatch.fnmatch(basename, pattern): _, _, right_piece = pattern.rpartition("*") best_prefix = choose_shorter_string(best_prefix, basename[:-len(right_piece)]) if best_prefix is not None: return best_prefix else: # no matching rule parts = os.path.splitext(basename) if parts[1] == ".gz": parts = os.path.splitext(parts[0]) return parts[0] def factory(): return {'handler': [], 'basename': [], 'prefix': [], 'path': []} file_key_descs = collections.defaultdict(factory) rel_home_dir = get_input_dir(job_homedir) for key, entries in list(file_entries.items()): for entry in entries: filename = entry['trg_fname'] basename = os.path.basename(filename) prefix = get_prefix(basename, key) k_desc = file_key_descs[key] k_desc['handler'].append(entry['handler']) k_desc['basename'].append(basename) k_desc['prefix'].append(prefix) k_desc['path'].append(os.path.join(rel_home_dir, filename)) return file_key_descs, rest_hash
This function examines the input file, and calculates variables to instantiate in the shell environment. It is called right before starting the execution of an app in a worker. For each input key, we want to have $var $var_filename $var_prefix remove last dot (+gz), and/or remove patterns $var_path $HOME/in/var/$var_filename For example, $HOME/in/genes/A.txt B.txt export genes=('{"$dnanexus_link": "file-xxxx"}' '{"$dnanexus_link": "file-yyyy"}') export genes_filename=("A.txt" "B.txt") export genes_prefix=("A" "B") export genes_path=("$HOME/in/genes/A.txt" "$HOME/in/genes/B.txt") If there are patterns defined in the input spec, then the prefix respects them. Here are several examples, where the patterns are: *.bam, *.bwa-index.tar.gz, foo*.sam, z*ra.sam file name prefix matches foo.zed.bam foo.zed *.bam xxx.bwa-index.tar.gz xxx *.bwa-index.tar.gz food.sam food foo*.sam zebra.sam zebra z*ra.sam xx.c xx xx.c.gz xx The only patterns we recognize are of the form x*.y. For example: legal *.sam, *.c.py, foo*.sam, a*b*c.baz ignored uu.txt x???.tar mon[a-z].py
def _http_put(self, url, data, **kwargs): """ Performs the HTTP PUT request. """ kwargs.update({'data': json.dumps(data)}) return self._http_request('put', url, kwargs)
Performs the HTTP PUT request.
def set_imap(self, imap, callback=True): """ Set the intensity map used by this RGBMapper. `imap` specifies an IntensityMap object. If `callback` is True, then any callbacks associated with this change will be invoked. """ self.imap = imap self.calc_imap() with self.suppress_changed: # TEMP: ignore passed callback parameter self.recalc() # callback=False in the following because we don't want to # recursively invoke set_imap() self.t_.set(intensity_map=imap.name, callback=False)
Set the intensity map used by this RGBMapper. `imap` specifies an IntensityMap object. If `callback` is True, then any callbacks associated with this change will be invoked.
def subset(self, service=None): """Subset the dataset. Open the remote dataset and get a client for talking to ``service``. Parameters ---------- service : str, optional The name of the service for subsetting the dataset. Defaults to 'NetcdfSubset' or 'NetcdfServer', in that order, depending on the services listed in the catalog. Returns ------- a client for communicating using ``service`` """ if service is None: for serviceName in self.ncssServiceNames: if serviceName in self.access_urls: service = serviceName break else: raise RuntimeError('Subset access is not available for this dataset.') elif service not in self.ncssServiceNames: raise ValueError(service + ' is not a valid service for subset. Options are: ' + ', '.join(self.ncssServiceNames)) return self.access_with_service(service)
Subset the dataset. Open the remote dataset and get a client for talking to ``service``. Parameters ---------- service : str, optional The name of the service for subsetting the dataset. Defaults to 'NetcdfSubset' or 'NetcdfServer', in that order, depending on the services listed in the catalog. Returns ------- a client for communicating using ``service``
def get_config_parameter_boolean(config: ConfigParser, section: str, param: str, default: bool) -> bool: """ Get Boolean parameter from ``configparser`` ``.INI`` file. Args: config: :class:`ConfigParser` object section: section name within config file param: name of parameter within section default: default value Returns: parameter value, or default """ try: value = config.getboolean(section, param) except (TypeError, ValueError, NoOptionError): log.warning( "Configuration variable {} not found or improper in section [{}]; " "using default of {!r}", param, section, default) value = default return value
Get Boolean parameter from ``configparser`` ``.INI`` file. Args: config: :class:`ConfigParser` object section: section name within config file param: name of parameter within section default: default value Returns: parameter value, or default
def search(self, query, nid=None): """Search for posts with ``query`` :type nid: str :param nid: This is the ID of the network to get the feed from. This is optional and only to override the existing `network_id` entered when created the class :type query: str :param query: The search query; should just be keywords for posts that you are looking for """ r = self.request( method="network.search", nid=nid, data=dict(query=query) ) return self._handle_error(r, "Search with query '{}' failed." .format(query))
Search for posts with ``query`` :type nid: str :param nid: This is the ID of the network to get the feed from. This is optional and only to override the existing `network_id` entered when created the class :type query: str :param query: The search query; should just be keywords for posts that you are looking for
def get_secret( end_state: NettingChannelEndState, secrethash: SecretHash, ) -> Optional[Secret]: """Returns `secret` if the `secrethash` is for a lock with a known secret.""" partial_unlock_proof = end_state.secrethashes_to_unlockedlocks.get(secrethash) if partial_unlock_proof is None: partial_unlock_proof = end_state.secrethashes_to_onchain_unlockedlocks.get(secrethash) if partial_unlock_proof is not None: return partial_unlock_proof.secret return None
Returns `secret` if the `secrethash` is for a lock with a known secret.
def setRequest(self, endPointReference, action): '''Call For Request ''' self._action = action self.header_pyobjs = None pyobjs = [] namespaceURI = self.wsAddressURI addressTo = self._addressTo messageID = self._messageID = "uuid:%s" %time.time() # Set Message Information Headers # MessageID typecode = GED(namespaceURI, "MessageID") pyobjs.append(typecode.pyclass(messageID)) # Action typecode = GED(namespaceURI, "Action") pyobjs.append(typecode.pyclass(action)) # To typecode = GED(namespaceURI, "To") pyobjs.append(typecode.pyclass(addressTo)) # From typecode = GED(namespaceURI, "From") mihFrom = typecode.pyclass() mihFrom._Address = self.anonymousURI pyobjs.append(mihFrom) if endPointReference: if hasattr(endPointReference, 'typecode') is False: raise EvaluateException, 'endPointReference must have a typecode attribute' if isinstance(endPointReference.typecode, \ GTD(namespaceURI ,'EndpointReferenceType')) is False: raise EvaluateException, 'endPointReference must be of type %s' \ %GTD(namespaceURI ,'EndpointReferenceType') ReferenceProperties = getattr(endPointReference, '_ReferenceProperties', None) if ReferenceProperties is not None: for v in getattr(ReferenceProperties, '_any', ()): if not hasattr(v,'typecode'): raise EvaluateException, '<any> element, instance missing typecode attribute' pyobjs.append(v) self.header_pyobjs = tuple(pyobjs)
Call For Request
def get_logger(name): """Return logger with null handle """ log = logging.getLogger(name) if not log.handlers: log.addHandler(NullHandler()) return log
Return logger with null handle
def date_struct_nn(year, month, day, tz="UTC"): """ Assemble a date object but if day or month is none set them to 1 to make it easier to deal with partial dates """ if not day: day = 1 if not month: month = 1 return date_struct(year, month, day, tz)
Assemble a date object but if day or month is none set them to 1 to make it easier to deal with partial dates
def command(execute=None): # noqa: E501 """Execute a Command Execute a command # noqa: E501 :param execute: The data needed to execute this command :type execute: dict | bytes :rtype: Response """ if connexion.request.is_json: execute = Execute.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
Execute a Command Execute a command # noqa: E501 :param execute: The data needed to execute this command :type execute: dict | bytes :rtype: Response
def _init_valid_functions(action_dimensions): """Initialize ValidFunctions and set up the callbacks.""" sizes = { "screen": tuple(int(i) for i in action_dimensions.screen), "screen2": tuple(int(i) for i in action_dimensions.screen), "minimap": tuple(int(i) for i in action_dimensions.minimap), } types = actions.Arguments(*[ actions.ArgumentType.spec(t.id, t.name, sizes.get(t.name, t.sizes)) for t in actions.TYPES]) functions = actions.Functions([ actions.Function.spec(f.id, f.name, tuple(types[t.id] for t in f.args)) for f in actions.FUNCTIONS]) return actions.ValidActions(types, functions)
Initialize ValidFunctions and set up the callbacks.
def _get_offset(text, visible_width, unicode_aware=True): """ Find the character offset within some text for a given visible offset (taking into account the fact that some character glyphs are double width). :param text: The text to analyze :param visible_width: The required location within that text (as seen on screen). :return: The offset within text (as a character offset within the string). """ result = 0 width = 0 if unicode_aware: for c in text: if visible_width - width <= 0: break result += 1 width += wcwidth(c) if visible_width - width < 0: result -= 1 else: result = min(len(text), visible_width) return result
Find the character offset within some text for a given visible offset (taking into account the fact that some character glyphs are double width). :param text: The text to analyze :param visible_width: The required location within that text (as seen on screen). :return: The offset within text (as a character offset within the string).
def get_file_type(filename): """ Returns I/O object to use for file. Parameters ---------- filename : str Name of file. Returns ------- file_type : {InferenceFile, InferenceTXTFile} The type of inference file object to use. """ txt_extensions = [".txt", ".dat", ".csv"] hdf_extensions = [".hdf", ".h5", ".bkup", ".checkpoint"] for ext in hdf_extensions: if filename.endswith(ext): with _h5py.File(filename, 'r') as fp: filetype = fp.attrs['filetype'] return filetypes[filetype] for ext in txt_extensions: if filename.endswith(ext): return InferenceTXTFile raise TypeError("Extension is not supported.")
Returns I/O object to use for file. Parameters ---------- filename : str Name of file. Returns ------- file_type : {InferenceFile, InferenceTXTFile} The type of inference file object to use.
def _format_explain(self): """ Format the results of an EXPLAIN """ lines = [] for (command, kwargs) in self._call_list: lines.append(command + " " + pformat(kwargs)) return "\n".join(lines)
Format the results of an EXPLAIN
def convert_graph(self, graph_file, input_format, output_formats, email=None, use_threads=False, callback=None): """ Convert a graph from one GraphFormat to another. Arguments: graph_file (str): Filename of the file to convert input_format (str): A grute.GraphFormats output_formats (str[]): A grute.GraphFormats email (str: self.email)*: The email to notify use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads=False. Else, no return value. Raises: RemoteDataUploadError: If there's an issue uploading the data RemoteError: If there's a server-side issue ValueError: If there's a problem with the supplied arguments """ if email is None: email = self.email if input_format not in GraphFormats._any: raise ValueError("Invalid input format {}.".format(input_format)) if not set(output_formats) <= set(GraphFormats._any): raise ValueError("Output formats must be a GraphFormats.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") if not (os.path.exists(graph_file)): raise ValueError("No such file, {}!".format(graph_file)) url = "convert/{}/{}/{}/l".format( email, input_format, ','.join(output_formats) ) if " " in url: raise ValueError("Spaces are not permitted in arguments.") if use_threads: # Run in the background. convert_thread = threading.Thread( target=self._run_convert_graph, args=[url, graph_file, callback] ) convert_thread.start() else: # Run in the foreground. return self._run_convert_graph(url, graph_file) return
Convert a graph from one GraphFormat to another. Arguments: graph_file (str): Filename of the file to convert input_format (str): A grute.GraphFormats output_formats (str[]): A grute.GraphFormats email (str: self.email)*: The email to notify use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads=False. Else, no return value. Raises: RemoteDataUploadError: If there's an issue uploading the data RemoteError: If there's a server-side issue ValueError: If there's a problem with the supplied arguments
async def _request(self, *, http_verb, api_url, req_args): """Submit the HTTP request with the running session or a new session. Returns: A dictionary of the response data. """ if self.session and not self.session.closed: async with self.session.request(http_verb, api_url, **req_args) as res: self._logger.debug("Ran the request with existing session.") return { "data": await res.json(), "headers": res.headers, "status_code": res.status, } async with aiohttp.ClientSession( loop=self._event_loop, timeout=aiohttp.ClientTimeout(total=self.timeout) ) as session: async with session.request(http_verb, api_url, **req_args) as res: self._logger.debug("Ran the request with a new session.") return { "data": await res.json(), "headers": res.headers, "status_code": res.status, }
Submit the HTTP request with the running session or a new session. Returns: A dictionary of the response data.
def new(expr, *args, **kwargs): """ Template an object. :: >>> class MyObject: ... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs): ... self.arg1 = arg1 ... self.arg2 = arg2 ... self.var_args = var_args ... self.foo = foo ... self.bar = bar ... self.kwargs = kwargs ... >>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z']) :: >>> import uqbar >>> new_object = uqbar.objects.new(my_object, foo=666, bar=1234) >>> print(uqbar.objects.get_repr(new_object)) MyObject( 'a', 'b', 'c', 'd', bar=1234, foo=666, quux=['y', 'z'], ) Original object is unchanged: :: >>> print(uqbar.objects.get_repr(my_object)) MyObject( 'a', 'b', 'c', 'd', foo='x', quux=['y', 'z'], ) """ # TODO: Clarify old vs. new variable naming here. current_args, current_var_args, current_kwargs = get_vars(expr) new_kwargs = current_kwargs.copy() recursive_arguments = {} for key in tuple(kwargs): if "__" in key: value = kwargs.pop(key) key, _, subkey = key.partition("__") recursive_arguments.setdefault(key, []).append((subkey, value)) for key, pairs in recursive_arguments.items(): recursed_object = current_args.get(key, current_kwargs.get(key)) if recursed_object is None: continue kwargs[key] = new(recursed_object, **dict(pairs)) if args: current_var_args = args for key, value in kwargs.items(): if key in current_args: current_args[key] = value else: new_kwargs[key] = value new_args = list(current_args.values()) + list(current_var_args) return type(expr)(*new_args, **new_kwargs)
Template an object. :: >>> class MyObject: ... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs): ... self.arg1 = arg1 ... self.arg2 = arg2 ... self.var_args = var_args ... self.foo = foo ... self.bar = bar ... self.kwargs = kwargs ... >>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z']) :: >>> import uqbar >>> new_object = uqbar.objects.new(my_object, foo=666, bar=1234) >>> print(uqbar.objects.get_repr(new_object)) MyObject( 'a', 'b', 'c', 'd', bar=1234, foo=666, quux=['y', 'z'], ) Original object is unchanged: :: >>> print(uqbar.objects.get_repr(my_object)) MyObject( 'a', 'b', 'c', 'd', foo='x', quux=['y', 'z'], )
def import_log_funcs(): """Import the common log functions from the global logger to the module.""" global g_logger curr_mod = sys.modules[__name__] for func_name in _logging_funcs: func = getattr(g_logger, func_name) setattr(curr_mod, func_name, func)
Import the common log functions from the global logger to the module.
def add_dimension(self, dimension, dim_pos, dim_val, vdim=False, **kwargs): """Adds a dimension and its values to the object Requires the dimension name or object, the desired position in the key dimensions and a key value scalar or sequence of the same length as the existing keys. Args: dimension: Dimension or dimension spec to add dim_pos (int) Integer index to insert dimension at dim_val (scalar or ndarray): Dimension value(s) to add vdim: Disabled, this type does not have value dimensions **kwargs: Keyword arguments passed to the cloned element Returns: Cloned object containing the new dimension """ dimension = asdim(dimension) if dimension in self.dimensions(): raise Exception('{dim} dimension already defined'.format(dim=dimension.name)) if vdim and self._deep_indexable: raise Exception('Cannot add value dimension to object that is deep indexable') if vdim: dims = self.vdims[:] dims.insert(dim_pos, dimension) dimensions = dict(vdims=dims) dim_pos += self.ndims else: dims = self.kdims[:] dims.insert(dim_pos, dimension) dimensions = dict(kdims=dims) if isinstance(dim_val, basestring) or not hasattr(dim_val, '__iter__'): dim_val = cycle([dim_val]) else: if not len(dim_val) == len(self): raise ValueError("Added dimension values must be same length" "as existing keys.") items = OrderedDict() for dval, (key, val) in zip(dim_val, self.data.items()): if vdim: new_val = list(val) new_val.insert(dim_pos, dval) items[key] = tuple(new_val) else: new_key = list(key) new_key.insert(dim_pos, dval) items[tuple(new_key)] = val return self.clone(items, **dict(dimensions, **kwargs))
Adds a dimension and its values to the object Requires the dimension name or object, the desired position in the key dimensions and a key value scalar or sequence of the same length as the existing keys. Args: dimension: Dimension or dimension spec to add dim_pos (int) Integer index to insert dimension at dim_val (scalar or ndarray): Dimension value(s) to add vdim: Disabled, this type does not have value dimensions **kwargs: Keyword arguments passed to the cloned element Returns: Cloned object containing the new dimension
def _aggregation_op(cls, op: Callable[[tf.Tensor, Optional[Sequence[int]]], tf.Tensor], x: 'TensorFluent', vars_list: List[str]) -> 'TensorFluent': '''Returns a TensorFluent for the aggregation `op` applied to fluent `x`. Args: op: The aggregation operation. x: The input fluent. vars_list: The list of variables to be aggregated over. Returns: A TensorFluent wrapping the aggregation operator's output. ''' axis = cls._varslist2axis(x, vars_list) t = op(x.tensor, axis) scope = [] for var in x.scope.as_list(): if var not in vars_list: scope.append(var) batch = x.batch return TensorFluent(t, scope, batch=batch)
Returns a TensorFluent for the aggregation `op` applied to fluent `x`. Args: op: The aggregation operation. x: The input fluent. vars_list: The list of variables to be aggregated over. Returns: A TensorFluent wrapping the aggregation operator's output.
def error_value_processor(value, error): """ If an error is a percentage, we convert to a float, then calculate the percentage of the supplied value. :param value: base value, e.g. 10 :param error: e.g. 20.0% :return: the absolute error, e.g. 12 for the above case. """ if isinstance(error, (str, unicode)): try: if "%" in error: error_float = float(error.replace("%", "")) error_abs = (value/100) * error_float return error_abs elif error == "": error = 0.0 else: error = float(error) except: pass return error
If an error is a percentage, we convert to a float, then calculate the percentage of the supplied value. :param value: base value, e.g. 10 :param error: e.g. 20.0% :return: the absolute error, e.g. 12 for the above case.
def build(self, lv2_uri): """ Returns a new :class:`.Lv2Effect` by the valid lv2_uri :param string lv2_uri: :return Lv2Effect: Effect created """ try: plugin = self._plugins[lv2_uri] except KeyError: raise Lv2EffectBuilderError( "Lv2EffectBuilder not contains metadata information about the plugin '{}'. \n" "Try re-scan the installed plugins using the reload method::\n" " >>> lv2_effect_builder.reload(lv2_effect_builder.lv2_plugins_data())".format(lv2_uri)) return Lv2Effect(plugin)
Returns a new :class:`.Lv2Effect` by the valid lv2_uri :param string lv2_uri: :return Lv2Effect: Effect created
def __response_message_descriptor(self, message_type, method_id): """Describes the response. Args: message_type: messages.Message class, The message to describe. method_id: string, Unique method identifier (e.g. 'myapi.items.method') Returns: Dictionary describing the response. """ # Skeleton response descriptor, common to all response objects descriptor = {'200': {'description': 'A successful response'}} if message_type != message_types.VoidMessage(): self.__parser.add_message(message_type.__class__) self.__response_schema[method_id] = self.__parser.ref_for_message_type( message_type.__class__) descriptor['200']['schema'] = {'$ref': '#/definitions/{0}'.format( self.__response_schema[method_id])} return dict(descriptor)
Describes the response. Args: message_type: messages.Message class, The message to describe. method_id: string, Unique method identifier (e.g. 'myapi.items.method') Returns: Dictionary describing the response.
def _info_long(self) -> Optional[str]: """Extract journey information.""" try: return str( html.unescape(self.journey.InfoTextList.InfoText.get("textL")).replace( "<br />", "\n" ) ) except AttributeError: return None
Extract journey information.
def Case(self, caseVal, *statements): "c-like case of switch statement" assert self.parentStm is None caseVal = toHVal(caseVal, self.switchOn._dtype) assert isinstance(caseVal, Value), caseVal assert caseVal._isFullVld(), "Cmp with invalid value" assert caseVal not in self._case_value_index, ( "Switch statement already has case for value ", caseVal) self.rank += 1 case = [] self._case_value_index[caseVal] = len(self.cases) self.cases.append((caseVal, case)) cond = self.switchOn._eq(caseVal) self._inputs.append(cond) cond.endpoints.append(self) self._register_stements(statements, case) return self
c-like case of switch statement
def remove_node(self, node, stop=False): """Removes a node from the cluster. By default, it doesn't also stop the node, just remove from the known hosts of this cluster. :param node: node to remove :type node: :py:class:`Node` :param stop: Stop the node :type stop: bool """ if node.kind not in self.nodes: raise NodeNotFound("Unable to remove node %s: invalid node type `%s`.", node.name, node.kind) else: try: index = self.nodes[node.kind].index(node) if self.nodes[node.kind][index]: del self.nodes[node.kind][index] if stop: node.stop() self._naming_policy.free(node.kind, node.name) self.repository.save_or_update(self) except ValueError: raise NodeNotFound("Node %s not found in cluster" % node.name)
Removes a node from the cluster. By default, it doesn't also stop the node, just remove from the known hosts of this cluster. :param node: node to remove :type node: :py:class:`Node` :param stop: Stop the node :type stop: bool
def roll(self, speed, heading, state=1): """ speed can have value between 0x00 and 0xFF heading can have value between 0 and 359 """ return self.write(request.Roll(self.seq, speed, heading, state ))
speed can have value between 0x00 and 0xFF heading can have value between 0 and 359
def blockSelectionSignals( self, state ): """ Sets the state for the seleciton finished signal. When it \ is set to True, it will emit the signal. This is used \ internally to control selection signal propogation, so \ should not really be called unless you know why you are \ calling it. :param state <bool> """ if ( self._selectionSignalsBlocked == state ): return self._selectionSignalsBlocked = state if ( not state ): self.emitSelectionFinished()
Sets the state for the seleciton finished signal. When it \ is set to True, it will emit the signal. This is used \ internally to control selection signal propogation, so \ should not really be called unless you know why you are \ calling it. :param state <bool>
def _parseSCDOCDC(self, src): """[S|CDO|CDC]*""" while 1: src = src.lstrip() if src.startswith('<!--'): src = src[4:] elif src.startswith('-->'): src = src[3:] else: break return src
[S|CDO|CDC]*
def storage(self, *, resource=None): """ Get an instance to handle file storage (OneDrive / Sharepoint) for the specified account resource :param str resource: Custom resource to be used in this drive object (Defaults to parent main_resource) :return: a representation of OneDrive File Storage :rtype: Storage :raises RuntimeError: if protocol doesn't support the feature """ if not isinstance(self.protocol, MSGraphProtocol): # TODO: Custom protocol accessing OneDrive/Sharepoint Api fails here raise RuntimeError( 'Drive options only works on Microsoft Graph API') return Storage(parent=self, main_resource=resource)
Get an instance to handle file storage (OneDrive / Sharepoint) for the specified account resource :param str resource: Custom resource to be used in this drive object (Defaults to parent main_resource) :return: a representation of OneDrive File Storage :rtype: Storage :raises RuntimeError: if protocol doesn't support the feature
def by_value(self, value, default=None): """ Returns the key for the given value """ try: return [k for k, v in self.items() if v == value][0] except IndexError: if default is not None: return default raise ValueError('%s' % value)
Returns the key for the given value
def do_cd(self, line): """cd DIRECTORY Changes the current directory. ~ expansion is supported, and cd - goes to the previous directory. """ args = self.line_to_args(line) if len(args) == 0: dirname = '~' else: if args[0] == '-': dirname = self.prev_dir else: dirname = args[0] dirname = resolve_path(dirname) mode = auto(get_mode, dirname) if mode_isdir(mode): global cur_dir self.prev_dir = cur_dir cur_dir = dirname auto(chdir, dirname) else: print_err("Directory '%s' does not exist" % dirname)
cd DIRECTORY Changes the current directory. ~ expansion is supported, and cd - goes to the previous directory.
def get(key, default=-1): """Backport support for original codes.""" if isinstance(key, int): return Suite(key) if key not in Suite._member_map_: extend_enum(Suite, key, default) return Suite[key]
Backport support for original codes.
def exec_command(cmd, in_data='', chdir=None, shell=None, emulate_tty=False): """ Run a command in a subprocess, emulating the argument handling behaviour of SSH. :param bytes cmd: String command line, passed to user's shell. :param bytes in_data: Optional standard input for the command. :return: (return code, stdout bytes, stderr bytes) """ assert isinstance(cmd, mitogen.core.UnicodeType) return exec_args( args=[get_user_shell(), '-c', cmd], in_data=in_data, chdir=chdir, shell=shell, emulate_tty=emulate_tty, )
Run a command in a subprocess, emulating the argument handling behaviour of SSH. :param bytes cmd: String command line, passed to user's shell. :param bytes in_data: Optional standard input for the command. :return: (return code, stdout bytes, stderr bytes)
def do_set_log_level(self, arg): """Set the log level. Usage: set_log_level i|v Parameters: log_level: i - info | v - verbose """ if arg in ['i', 'v']: _LOGGING.info('Setting log level to %s', arg) if arg == 'i': _LOGGING.setLevel(logging.INFO) _INSTEONPLM_LOGGING.setLevel(logging.INFO) else: _LOGGING.setLevel(logging.DEBUG) _INSTEONPLM_LOGGING.setLevel(logging.DEBUG) else: _LOGGING.error('Log level value error.') self.do_help('set_log_level')
Set the log level. Usage: set_log_level i|v Parameters: log_level: i - info | v - verbose
def load(self, verbose=False): """ Load the list of songs. Note that this only loads a list of songs that this artist was the main artist of. If they were only featured in the song, that song won't be listed here. There is a list on the artist page for that, I just haven't added any parsing code for that, since I don't need it. """ self._songs = [] page_num = 1 total_pages = 1 while page_num <= total_pages: if verbose: print('retrieving page %d' % page_num) page = requests.get(ARTIST_URL.format(artist=self.name, n=page_num)) tree = html.fromstring(page.text) song_rows_xp = r'//*[@id="popular"]/div/table/tbody/tr' songlist_pagination_xp = r'//*[@id="main-content"]/div[1]/'\ 'div[2]/p/span/a' rows = tree.xpath(song_rows_xp) for row in rows: song_link = row.xpath(r'./td/a[contains(@class,"title")]') assert len(song_link) == 1 self._songs.append(Song(url=song_link[0].attrib['href'])) total_pages = len(tree.xpath(songlist_pagination_xp)) page_num += 1 return self
Load the list of songs. Note that this only loads a list of songs that this artist was the main artist of. If they were only featured in the song, that song won't be listed here. There is a list on the artist page for that, I just haven't added any parsing code for that, since I don't need it.
def register_comet_callback(self, *args, **kwargs): """Registers a single Comet callback function (see :ref:`comet-plugin`). Refer to :func:`sijax.plugin.comet.register_comet_callback` for more details - its signature differs slightly. This method's signature is the same, except that the first argument that :func:`sijax.plugin.comet.register_comet_callback` expects is the Sijax instance, and this method does that automatically, so you don't have to do it. """ sijax.plugin.comet.register_comet_callback(self._sijax, *args, **kwargs)
Registers a single Comet callback function (see :ref:`comet-plugin`). Refer to :func:`sijax.plugin.comet.register_comet_callback` for more details - its signature differs slightly. This method's signature is the same, except that the first argument that :func:`sijax.plugin.comet.register_comet_callback` expects is the Sijax instance, and this method does that automatically, so you don't have to do it.
def in_project_directory() -> bool: """ Returns whether or not the current working directory is a Cauldron project directory, which contains a cauldron.json file. """ current_directory = os.path.realpath(os.curdir) project_path = os.path.join(current_directory, 'cauldron.json') return os.path.exists(project_path) and os.path.isfile(project_path)
Returns whether or not the current working directory is a Cauldron project directory, which contains a cauldron.json file.
def plot(self, flip=False, ax_channels=None, ax=None, *args, **kwargs): """ {_gate_plot_doc} """ for gate in self.gates: gate.plot(flip=flip, ax_channels=ax_channels, ax=ax, *args, **kwargs)
{_gate_plot_doc}
def _lazy_load_get_model(): """Lazy loading of get_model. get_model loads django.conf.settings, which may fail if the settings haven't been configured yet. """ if django is None: def _get_model(app, model): raise import_failure else: from django import apps as django_apps _get_model = django_apps.apps.get_model _LAZY_LOADS['get_model'] = _get_model
Lazy loading of get_model. get_model loads django.conf.settings, which may fail if the settings haven't been configured yet.
def get_new_selection_attr_state(self, selection, attr_key): """Toggles new attr selection state and returns it Parameters ---------- selection: Selection object \tSelection for which attr toggle shall be returned attr_key: Hashable \tAttribute key """ cell_attributes = self.grid.code_array.cell_attributes attr_values = self.attr_toggle_values[attr_key] # Map attr_value to next attr_value attr_map = dict(zip(attr_values, attr_values[1:] + attr_values[:1])) selection_attrs = \ (attr for attr in cell_attributes if attr[0] == selection) attrs = {} for selection_attr in selection_attrs: attrs.update(selection_attr[2]) if attr_key in attrs: return attr_map[attrs[attr_key]] else: # Default next value return self.attr_toggle_values[attr_key][1]
Toggles new attr selection state and returns it Parameters ---------- selection: Selection object \tSelection for which attr toggle shall be returned attr_key: Hashable \tAttribute key
def update(self, cur_value, mesg=None): """Update progressbar with current value of process Parameters ---------- cur_value : number Current value of process. Should be <= max_value (but this is not enforced). The percent of the progressbar will be computed as (cur_value / max_value) * 100 mesg : str Message to display to the right of the progressbar. If None, the last message provided will be used. To clear the current message, pass a null string, ''. """ # Ensure floating-point division so we can get fractions of a percent # for the progressbar. self.cur_value = cur_value progress = float(self.cur_value) / self.max_value num_chars = int(progress * self.max_chars) num_left = self.max_chars - num_chars # Update the message if mesg is not None: self.mesg = mesg # The \r tells the cursor to return to the beginning of the line rather # than starting a new line. This allows us to have a progressbar-style # display in the console window. bar = self.template.format(self.progress_character * num_chars, ' ' * num_left, progress * 100, self.spinner_symbols[self.spinner_index], self.mesg) sys.stdout.write(bar) # Increament the spinner if self.spinner: self.spinner_index = (self.spinner_index + 1) % self.n_spinner # Force a flush because sometimes when using bash scripts and pipes, # the output is not printed until after the program exits. sys.stdout.flush()
Update progressbar with current value of process Parameters ---------- cur_value : number Current value of process. Should be <= max_value (but this is not enforced). The percent of the progressbar will be computed as (cur_value / max_value) * 100 mesg : str Message to display to the right of the progressbar. If None, the last message provided will be used. To clear the current message, pass a null string, ''.
def predict(self, X): """Predict count for samples in X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples,] Predicted count for each sample """ X_ = self._check_array(X) return exp(dot(X_, self._coef))
Predict count for samples in X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples,] Predicted count for each sample
def late(): """Used by functions in package.py that are evaluated lazily. The term 'late' refers to the fact these package attributes are evaluated late, ie when the attribute is queried for the first time. If you want to implement a package.py attribute as a function, you MUST use this decorator - otherwise it is understood that you want your attribute to be a function, not the return value of that function. """ from rez.package_resources_ import package_rex_keys def decorated(fn): # this is done here rather than in standard schema validation because # the latter causes a very obfuscated error message if fn.__name__ in package_rex_keys: raise ValueError("Cannot use @late decorator on function '%s'" % fn.__name__) setattr(fn, "_late", True) _add_decorator(fn, "late") return fn return decorated
Used by functions in package.py that are evaluated lazily. The term 'late' refers to the fact these package attributes are evaluated late, ie when the attribute is queried for the first time. If you want to implement a package.py attribute as a function, you MUST use this decorator - otherwise it is understood that you want your attribute to be a function, not the return value of that function.
def filter_human_only(stmts_in, **kwargs): """Filter out statements that are grounded, but not to a human gene. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes all bound conditions that are grounded but not to human genes. If false (default), filters out statements with boundary conditions that are grounded to non-human genes. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ from indra.databases import uniprot_client if 'remove_bound' in kwargs and kwargs['remove_bound']: remove_bound = True else: remove_bound = False dump_pkl = kwargs.get('save') logger.info('Filtering %d statements for human genes only...' % len(stmts_in)) stmts_out = [] def criterion(agent): upid = agent.db_refs.get('UP') if upid and not uniprot_client.is_human(upid): return False else: return True for st in stmts_in: human_genes = True for agent in st.agent_list(): if agent is not None: if not criterion(agent): human_genes = False break if remove_bound: _remove_bound_conditions(agent, criterion) elif _any_bound_condition_fails_criterion(agent, criterion): human_genes = False break if human_genes: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
Filter out statements that are grounded, but not to a human gene. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes all bound conditions that are grounded but not to human genes. If false (default), filters out statements with boundary conditions that are grounded to non-human genes. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
def prior_from_config(cp, prior_section='prior'): """Loads a prior distribution from the given config file. Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser The config file to read. sections : list of str, optional The sections to retrieve the prior from. If ``None`` (the default), will look in sections starting with 'prior'. Returns ------- distributions.JointDistribution The prior distribution. """ # Read variable and static parameters from the config file variable_params, _ = distributions.read_params_from_config( cp, prior_section=prior_section, vargs_section='variable_params', sargs_section='static_params') # Read constraints to apply to priors from the config file constraints = distributions.read_constraints_from_config(cp) # Get PyCBC distribution instances for each variable parameter in the # config file dists = distributions.read_distributions_from_config(cp, prior_section) # construct class that will return draws from the prior return distributions.JointDistribution(variable_params, *dists, **{"constraints": constraints})
Loads a prior distribution from the given config file. Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser The config file to read. sections : list of str, optional The sections to retrieve the prior from. If ``None`` (the default), will look in sections starting with 'prior'. Returns ------- distributions.JointDistribution The prior distribution.
def kill(self, sig): '''Invoke the stop on the event loop method.''' if self.is_alive() and self._loop: self._loop.call_soon_threadsafe(self._loop.stop)
Invoke the stop on the event loop method.
def chrome_getdata_view(request): """Get the data of the last notification sent to the current user. This is needed because Chrome, as of version 44, doesn't support sending a data payload to a notification. Thus, information on what the notification is actually for must be manually fetched. """ data = {} if request.user.is_authenticated: # authenticated session notifs = GCMNotification.objects.filter(sent_to__user=request.user).order_by("-time") if notifs.count() > 0: notif = notifs.first() ndata = notif.data if "title" in ndata and "text" in ndata: data = { "title": ndata['title'] if 'title' in ndata else '', "text": ndata['text'] if 'text' in ndata else '', "url": ndata['url'] if 'url' in ndata else '' } else: schedule_chk = chrome_getdata_check(request) if schedule_chk: data = schedule_chk else: schedule_chk = chrome_getdata_check(request) if schedule_chk: data = schedule_chk else: return HttpResponse("null", content_type="text/json") else: schedule_chk = chrome_getdata_check(request) if schedule_chk: data = schedule_chk else: data = {"title": "Check Intranet", "text": "You have a new notification that couldn't be loaded right now."} j = json.dumps(data) return HttpResponse(j, content_type="text/json")
Get the data of the last notification sent to the current user. This is needed because Chrome, as of version 44, doesn't support sending a data payload to a notification. Thus, information on what the notification is actually for must be manually fetched.
def remove_reactions(self, reactions, remove_orphans=False): """Remove reactions from the model. The change is reverted upon exit when using the model as a context. Parameters ---------- reactions : list A list with reactions (`cobra.Reaction`), or their id's, to remove remove_orphans : bool Remove orphaned genes and metabolites from the model as well """ if isinstance(reactions, string_types) or hasattr(reactions, "id"): warn("need to pass in a list") reactions = [reactions] context = get_context(self) for reaction in reactions: # Make sure the reaction is in the model try: reaction = self.reactions[self.reactions.index(reaction)] except ValueError: warn('%s not in %s' % (reaction, self)) else: forward = reaction.forward_variable reverse = reaction.reverse_variable if context: obj_coef = reaction.objective_coefficient if obj_coef != 0: context(partial( self.solver.objective.set_linear_coefficients, {forward: obj_coef, reverse: -obj_coef})) context(partial(self._populate_solver, [reaction])) context(partial(setattr, reaction, '_model', self)) context(partial(self.reactions.add, reaction)) self.remove_cons_vars([forward, reverse]) self.reactions.remove(reaction) reaction._model = None for met in reaction._metabolites: if reaction in met._reaction: met._reaction.remove(reaction) if context: context(partial(met._reaction.add, reaction)) if remove_orphans and len(met._reaction) == 0: self.remove_metabolites(met) for gene in reaction._genes: if reaction in gene._reaction: gene._reaction.remove(reaction) if context: context(partial(gene._reaction.add, reaction)) if remove_orphans and len(gene._reaction) == 0: self.genes.remove(gene) if context: context(partial(self.genes.add, gene)) # remove reference to the reaction in all groups associated_groups = self.get_associated_groups(reaction) for group in associated_groups: group.remove_members(reaction)
Remove reactions from the model. The change is reverted upon exit when using the model as a context. Parameters ---------- reactions : list A list with reactions (`cobra.Reaction`), or their id's, to remove remove_orphans : bool Remove orphaned genes and metabolites from the model as well
def _configure_send(self, request, **kwargs): # type: (ClientRequest, Any) -> Dict[str, str] """Configure the kwargs to use with requests. See "send" for kwargs details. :param ClientRequest request: The request object to be sent. :returns: The requests.Session.request kwargs :rtype: dict[str,str] """ requests_kwargs = {} # type: Any session = kwargs.pop('session', self.session) # If custom session was not create here if session is not self.session: self._init_session(session) session.max_redirects = int(self.config.redirect_policy()) session.trust_env = bool(self.config.proxies.use_env_settings) # Initialize requests_kwargs with "config" value requests_kwargs.update(self.config.connection()) requests_kwargs['allow_redirects'] = bool(self.config.redirect_policy) requests_kwargs['headers'] = self.config.headers.copy() proxies = self.config.proxies() if proxies: requests_kwargs['proxies'] = proxies # Replace by operation level kwargs # We allow some of them, since some like stream or json are controled by msrest for key in kwargs: if key in self._REQUESTS_KWARGS: requests_kwargs[key] = kwargs[key] # Hooks. Deprecated, should be a policy def make_user_hook_cb(user_hook, session): def user_hook_cb(r, *args, **kwargs): kwargs.setdefault("msrest", {})['session'] = session return user_hook(r, *args, **kwargs) return user_hook_cb hooks = [] for user_hook in self.config.hooks: hooks.append(make_user_hook_cb(user_hook, self.session)) if hooks: requests_kwargs['hooks'] = {'response': hooks} # Configuration callback. Deprecated, should be a policy output_kwargs = self.config.session_configuration_callback( session, self.config, kwargs, **requests_kwargs ) if output_kwargs is not None: requests_kwargs = output_kwargs # If custom session was not create here if session is not self.session: requests_kwargs['session'] = session ### Autorest forced kwargs now ### # If Autorest needs this response to be streamable. True for compat. requests_kwargs['stream'] = kwargs.get('stream', True) if request.files: requests_kwargs['files'] = request.files elif request.data: requests_kwargs['data'] = request.data requests_kwargs['headers'].update(request.headers) return requests_kwargs
Configure the kwargs to use with requests. See "send" for kwargs details. :param ClientRequest request: The request object to be sent. :returns: The requests.Session.request kwargs :rtype: dict[str,str]
def pivot(self, index, **kwargs): """ Pivots a dataframe """ try: df = self._pivot(index, **kwargs) return pd.pivot_table(self.df, index=kwargs["index"], **kwargs) """if df is None: self.err("Can not pivot table") return self.df = df""" except Exception as e: self.err(e, "Can not pivot dataframe")
Pivots a dataframe
def run_updater_in_background(self): """ Starts a thread that runs the updater in the background. """ thread = threading.Thread(target=self.updater_loop()) thread.daemon = True thread.start()
Starts a thread that runs the updater in the background.
def parameter_to_field(self, name): """ Promotes a parameter to a field by creating a new array of same size as the other existing fields, filling it with the current value of the parameter, and then removing that parameter. """ if name not in self._parameters: raise ValueError("no '%s' parameter found" % (name)) if self._fields.count(name) > 0: raise ValueError("field with name '%s' already exists" % (name)) data = np.array([self._parameters[name]]*self._num_fix) self.rm_parameter(name) self.add_field(name, data)
Promotes a parameter to a field by creating a new array of same size as the other existing fields, filling it with the current value of the parameter, and then removing that parameter.
def _related(self, concept): """ Returns related concepts for a concept. """ return concept.hypernyms() + \ concept.hyponyms() + \ concept.member_meronyms() + \ concept.substance_meronyms() + \ concept.part_meronyms() + \ concept.member_holonyms() + \ concept.substance_holonyms() + \ concept.part_holonyms() + \ concept.attributes() + \ concept.also_sees() + \ concept.similar_tos()
Returns related concepts for a concept.
def write_entity(self, entity): """ Write a single entity to a line in the output file """ db, db_object_id = self._split_prefix(entity) taxon = normalize_taxon(entity["taxon"]["id"]) vals = [ db, db_object_id, entity.get('label'), entity.get('full_name'), entity.get('synonyms'), entity.get('type'), taxon, entity.get('parents'), entity.get('xrefs'), entity.get('properties') ] self._write_row(vals)
Write a single entity to a line in the output file
def user_present(name, password, email, tenant=None, enabled=True, roles=None, profile=None, password_reset=True, project=None, **connection_args): ''' Ensure that the keystone user is present with the specified properties. name The name of the user to manage password The password to use for this user. .. note:: If the user already exists and a different password was set for the user than the one specified here, the password for the user will be updated. Please set the ``password_reset`` option to ``False`` if this is not the desired behavior. password_reset Whether or not to reset password after initial set. Defaults to ``True``. email The email address for this user tenant The tenant (name) for this user project The project (name) for this user (overrides tenant in api v3) enabled Availability state for this user roles The roles the user should have under given tenants. Passed as a dictionary mapping tenant names to a list of roles in this tenant, i.e.:: roles: admin: # tenant - admin # role service: - admin - Member ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'User "{0}" will be updated'.format(name)} _api_version(profile=profile, **connection_args) if project and not tenant: tenant = project # Validate tenant if set if tenant is not None: tenantdata = __salt__['keystone.tenant_get'](name=tenant, profile=profile, **connection_args) if 'Error' in tenantdata: ret['result'] = False ret['comment'] = 'Tenant / project "{0}" does not exist'.format(tenant) return ret tenant_id = tenantdata[tenant]['id'] else: tenant_id = None # Check if user is already present user = __salt__['keystone.user_get'](name=name, profile=profile, **connection_args) if 'Error' not in user: change_email = False change_enabled = False change_tenant = False change_password = False if user[name].get('email', None) != email: change_email = True if user[name].get('enabled', None) != enabled: change_enabled = True if tenant and (_TENANT_ID not in user[name] or user[name].get(_TENANT_ID, None) != tenant_id): change_tenant = True if (password_reset is True and not __salt__['keystone.user_verify_password'](name=name, password=password, profile=profile, **connection_args)): change_password = True if __opts__.get('test') and (change_email or change_enabled or change_tenant or change_password): ret['result'] = None ret['comment'] = 'User "{0}" will be updated'.format(name) if change_email is True: ret['changes']['Email'] = 'Will be updated' if change_enabled is True: ret['changes']['Enabled'] = 'Will be True' if change_tenant is True: ret['changes']['Tenant'] = 'Will be added to "{0}" tenant'.format(tenant) if change_password is True: ret['changes']['Password'] = 'Will be updated' return ret ret['comment'] = 'User "{0}" is already present'.format(name) if change_email: __salt__['keystone.user_update'](name=name, email=email, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been updated'.format(name) ret['changes']['Email'] = 'Updated' if change_enabled: __salt__['keystone.user_update'](name=name, enabled=enabled, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been updated'.format(name) ret['changes']['Enabled'] = 'Now {0}'.format(enabled) if change_tenant: __salt__['keystone.user_update'](name=name, tenant=tenant, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been updated'.format(name) ret['changes']['Tenant'] = 'Added to "{0}" tenant'.format(tenant) if change_password: __salt__['keystone.user_password_update'](name=name, password=password, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been updated'.format(name) ret['changes']['Password'] = 'Updated' if roles: for tenant in roles: args = dict({'user_name': name, 'tenant_name': tenant, 'profile': profile}, **connection_args) tenant_roles = __salt__['keystone.user_role_list'](**args) for role in roles[tenant]: if role not in tenant_roles: if __opts__.get('test'): ret['result'] = None ret['comment'] = 'User roles "{0}" will been updated'.format(name) return ret addargs = dict({'user': name, 'role': role, 'tenant': tenant, 'profile': profile}, **connection_args) newrole = __salt__['keystone.user_role_add'](**addargs) if 'roles' in ret['changes']: ret['changes']['roles'].append(newrole) else: ret['changes']['roles'] = [newrole] roles_to_remove = list(set(tenant_roles) - set(roles[tenant])) for role in roles_to_remove: if __opts__.get('test'): ret['result'] = None ret['comment'] = 'User roles "{0}" will been updated'.format(name) return ret addargs = dict({'user': name, 'role': role, 'tenant': tenant, 'profile': profile}, **connection_args) oldrole = __salt__['keystone.user_role_remove'](**addargs) if 'roles' in ret['changes']: ret['changes']['roles'].append(oldrole) else: ret['changes']['roles'] = [oldrole] else: # Create that user! if __opts__.get('test'): ret['result'] = None ret['comment'] = 'Keystone user "{0}" will be added'.format(name) ret['changes']['User'] = 'Will be created' return ret __salt__['keystone.user_create'](name=name, password=password, email=email, tenant_id=tenant_id, enabled=enabled, profile=profile, **connection_args) if roles: for tenant in roles: for role in roles[tenant]: __salt__['keystone.user_role_add'](user=name, role=role, tenant=tenant, profile=profile, **connection_args) ret['comment'] = 'Keystone user {0} has been added'.format(name) ret['changes']['User'] = 'Created' return ret
Ensure that the keystone user is present with the specified properties. name The name of the user to manage password The password to use for this user. .. note:: If the user already exists and a different password was set for the user than the one specified here, the password for the user will be updated. Please set the ``password_reset`` option to ``False`` if this is not the desired behavior. password_reset Whether or not to reset password after initial set. Defaults to ``True``. email The email address for this user tenant The tenant (name) for this user project The project (name) for this user (overrides tenant in api v3) enabled Availability state for this user roles The roles the user should have under given tenants. Passed as a dictionary mapping tenant names to a list of roles in this tenant, i.e.:: roles: admin: # tenant - admin # role service: - admin - Member
def add_header_part(self): """Return (header_part, rId) pair for newly-created header part.""" header_part = HeaderPart.new(self.package) rId = self.relate_to(header_part, RT.HEADER) return header_part, rId
Return (header_part, rId) pair for newly-created header part.
def volshow( data, lighting=False, data_min=None, data_max=None, max_shape=256, tf=None, stereo=False, ambient_coefficient=0.5, diffuse_coefficient=0.8, specular_coefficient=0.5, specular_exponent=5, downscale=1, level=[0.1, 0.5, 0.9], opacity=[0.01, 0.05, 0.1], level_width=0.1, controls=True, max_opacity=0.2, memorder='C', extent=None, ): """Visualize a 3d array using volume rendering. Currently only 1 volume can be rendered. :param data: 3d numpy array :param origin: origin of the volume data, this is to match meshes which have a different origin :param domain_size: domain size is the size of the volume :param bool lighting: use lighting or not, if set to false, lighting parameters will be overriden :param float data_min: minimum value to consider for data, if None, computed using np.nanmin :param float data_max: maximum value to consider for data, if None, computed using np.nanmax :parap int max_shape: maximum shape for the 3d cube, if larger, the data is reduced by skipping/slicing (data[::N]), set to None to disable. :param tf: transfer function (or a default one) :param bool stereo: stereo view for virtual reality (cardboard and similar VR head mount) :param ambient_coefficient: lighting parameter :param diffuse_coefficient: lighting parameter :param specular_coefficient: lighting parameter :param specular_exponent: lighting parameter :param float downscale: downscale the rendering for better performance, for instance when set to 2, a 512x512 canvas will show a 256x256 rendering upscaled, but it will render twice as fast. :param level: level(s) for the where the opacity in the volume peaks, maximum sequence of length 3 :param opacity: opacity(ies) for each level, scalar or sequence of max length 3 :param level_width: width of the (gaussian) bumps where the opacity peaks, scalar or sequence of max length 3 :param bool controls: add controls for lighting and transfer function or not :param float max_opacity: maximum opacity for transfer function controls :param extent: list of [[xmin, xmax], [ymin, ymax], [zmin, zmax]] values that define the bounds of the volume, otherwise the viewport is used :return: """ fig = gcf() if tf is None: tf = transfer_function(level, opacity, level_width, controls=controls, max_opacity=max_opacity) if data_min is None: data_min = np.nanmin(data) if data_max is None: data_max = np.nanmax(data) if memorder == 'F': data = data.T if extent is None: extent = [(0, k) for k in data.shape[::-1]] if extent: _grow_limits(*extent) vol = ipv.Volume( data_original=data, tf=tf, data_min=data_min, data_max=data_max, show_min=data_min, show_max=data_max, extent_original=extent, data_max_shape=max_shape, ambient_coefficient=ambient_coefficient, diffuse_coefficient=diffuse_coefficient, specular_coefficient=specular_coefficient, specular_exponent=specular_exponent, rendering_lighting=lighting, ) vol._listen_to(fig) if controls: widget_opacity_scale = ipywidgets.FloatLogSlider(base=10, min=-2, max=2, description="opacity") widget_brightness = ipywidgets.FloatLogSlider(base=10, min=-1, max=1, description="brightness") ipywidgets.jslink((vol, 'opacity_scale'), (widget_opacity_scale, 'value')) ipywidgets.jslink((vol, 'brightness'), (widget_brightness, 'value')) widgets_bottom = [ipywidgets.HBox([widget_opacity_scale, widget_brightness])] current.container.children += tuple(widgets_bottom) fig.volumes = fig.volumes + [vol] return vol
Visualize a 3d array using volume rendering. Currently only 1 volume can be rendered. :param data: 3d numpy array :param origin: origin of the volume data, this is to match meshes which have a different origin :param domain_size: domain size is the size of the volume :param bool lighting: use lighting or not, if set to false, lighting parameters will be overriden :param float data_min: minimum value to consider for data, if None, computed using np.nanmin :param float data_max: maximum value to consider for data, if None, computed using np.nanmax :parap int max_shape: maximum shape for the 3d cube, if larger, the data is reduced by skipping/slicing (data[::N]), set to None to disable. :param tf: transfer function (or a default one) :param bool stereo: stereo view for virtual reality (cardboard and similar VR head mount) :param ambient_coefficient: lighting parameter :param diffuse_coefficient: lighting parameter :param specular_coefficient: lighting parameter :param specular_exponent: lighting parameter :param float downscale: downscale the rendering for better performance, for instance when set to 2, a 512x512 canvas will show a 256x256 rendering upscaled, but it will render twice as fast. :param level: level(s) for the where the opacity in the volume peaks, maximum sequence of length 3 :param opacity: opacity(ies) for each level, scalar or sequence of max length 3 :param level_width: width of the (gaussian) bumps where the opacity peaks, scalar or sequence of max length 3 :param bool controls: add controls for lighting and transfer function or not :param float max_opacity: maximum opacity for transfer function controls :param extent: list of [[xmin, xmax], [ymin, ymax], [zmin, zmax]] values that define the bounds of the volume, otherwise the viewport is used :return: