code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _get_save_wall_photo(session, photo, server, hash, user_id=None, group_id=None): """ https://vk.com/dev/photos.saveWallPhoto """ if group_id < 0: group_id = abs(group_id) response = session.fetch("photos.saveWallPhoto", photo=photo, server=server, hash=hash, user_id=user_id, group_id=group_id)[0] return response['id'], response['owner_id']
https://vk.com/dev/photos.saveWallPhoto
Below is the the instruction that describes the task: ### Input: https://vk.com/dev/photos.saveWallPhoto ### Response: def _get_save_wall_photo(session, photo, server, hash, user_id=None, group_id=None): """ https://vk.com/dev/photos.saveWallPhoto """ if group_id < 0: group_id = abs(group_id) response = session.fetch("photos.saveWallPhoto", photo=photo, server=server, hash=hash, user_id=user_id, group_id=group_id)[0] return response['id'], response['owner_id']
def search(self, line): """CN search.""" if self._session.get(d1_cli.impl.session.QUERY_ENGINE_NAME) == "solr": return self._search_solr(line) raise d1_cli.impl.exceptions.InvalidArguments( "Unsupported query engine: {}".format( self._session.get(d1_cli.impl.session.QUERY_ENGINE_NAME) ) )
CN search.
Below is the the instruction that describes the task: ### Input: CN search. ### Response: def search(self, line): """CN search.""" if self._session.get(d1_cli.impl.session.QUERY_ENGINE_NAME) == "solr": return self._search_solr(line) raise d1_cli.impl.exceptions.InvalidArguments( "Unsupported query engine: {}".format( self._session.get(d1_cli.impl.session.QUERY_ENGINE_NAME) ) )
def delistify(x): """ A basic slug version of a given parameter list. """ if isinstance(x, list): x = [e.replace("'", "") for e in x] return '-'.join(sorted(x)) return x
A basic slug version of a given parameter list.
Below is the the instruction that describes the task: ### Input: A basic slug version of a given parameter list. ### Response: def delistify(x): """ A basic slug version of a given parameter list. """ if isinstance(x, list): x = [e.replace("'", "") for e in x] return '-'.join(sorted(x)) return x
def estimateHeritabilities(self, K, verbose=False): """ estimate variance components and fixed effects from a single trait model having only two terms """ # Fit single trait model varg = SP.zeros(self.P) varn = SP.zeros(self.P) fixed = SP.zeros((1,self.P)) for p in range(self.P): y = self.Y[:,p:p+1] lmm = limix.CLMM() lmm.setK(K) lmm.setSNPs(SP.ones((K.shape[0],1))) lmm.setPheno(y) lmm.setCovs(SP.zeros((K.shape[0],1))) lmm.setVarcompApprox0(-20, 20, 1000) lmm.process() delta = SP.exp(lmm.getLdelta0()[0,0]) Vtot = SP.exp(lmm.getLSigma()[0,0]) varg[p] = Vtot varn[p] = delta*Vtot fixed[:,p] = lmm.getBetaSNP() if verbose: print(p) sth = {} sth['varg'] = varg sth['varn'] = varn sth['fixed'] = fixed return sth
estimate variance components and fixed effects from a single trait model having only two terms
Below is the the instruction that describes the task: ### Input: estimate variance components and fixed effects from a single trait model having only two terms ### Response: def estimateHeritabilities(self, K, verbose=False): """ estimate variance components and fixed effects from a single trait model having only two terms """ # Fit single trait model varg = SP.zeros(self.P) varn = SP.zeros(self.P) fixed = SP.zeros((1,self.P)) for p in range(self.P): y = self.Y[:,p:p+1] lmm = limix.CLMM() lmm.setK(K) lmm.setSNPs(SP.ones((K.shape[0],1))) lmm.setPheno(y) lmm.setCovs(SP.zeros((K.shape[0],1))) lmm.setVarcompApprox0(-20, 20, 1000) lmm.process() delta = SP.exp(lmm.getLdelta0()[0,0]) Vtot = SP.exp(lmm.getLSigma()[0,0]) varg[p] = Vtot varn[p] = delta*Vtot fixed[:,p] = lmm.getBetaSNP() if verbose: print(p) sth = {} sth['varg'] = varg sth['varn'] = varn sth['fixed'] = fixed return sth
def thumbnail(self): """ Deprecated as of June 8, 2015. Use self.status.images. Return thumbnail url of current playing item. """ if not self.status: return None images = self.status.images return images[0].url if images else None
Deprecated as of June 8, 2015. Use self.status.images. Return thumbnail url of current playing item.
Below is the the instruction that describes the task: ### Input: Deprecated as of June 8, 2015. Use self.status.images. Return thumbnail url of current playing item. ### Response: def thumbnail(self): """ Deprecated as of June 8, 2015. Use self.status.images. Return thumbnail url of current playing item. """ if not self.status: return None images = self.status.images return images[0].url if images else None
def runner(self): """ Run the necessary methods in the correct order """ logging.info('Starting {} analysis pipeline'.format(self.analysistype)) # Initialise the GenObject for sample in self.runmetadata.samples: setattr(sample, self.analysistype, GenObject()) try: sample[self.analysistype].pointfindergenus = self.pointfinder_org_dict[sample.general.referencegenus] except KeyError: sample[self.analysistype].pointfindergenus = 'ND' # Run the raw read mapping PointSipping(inputobject=self, cutoff=self.cutoff) # Create FASTA files from the raw read matcves self.fasta() # Run PointFinder on the FASTA files self.run_pointfinder() # Create summary reports of the PointFinder outputs self.parse_pointfinder()
Run the necessary methods in the correct order
Below is the the instruction that describes the task: ### Input: Run the necessary methods in the correct order ### Response: def runner(self): """ Run the necessary methods in the correct order """ logging.info('Starting {} analysis pipeline'.format(self.analysistype)) # Initialise the GenObject for sample in self.runmetadata.samples: setattr(sample, self.analysistype, GenObject()) try: sample[self.analysistype].pointfindergenus = self.pointfinder_org_dict[sample.general.referencegenus] except KeyError: sample[self.analysistype].pointfindergenus = 'ND' # Run the raw read mapping PointSipping(inputobject=self, cutoff=self.cutoff) # Create FASTA files from the raw read matcves self.fasta() # Run PointFinder on the FASTA files self.run_pointfinder() # Create summary reports of the PointFinder outputs self.parse_pointfinder()
def log(self, url=None, credentials=None, do_verify_certificate=True): """ Wrapper for the other log methods, decide which one based on the URL parameter. """ if url is None: url = self.url if re.match("file://", url): self.log_file(url) elif re.match("https://", url) or re.match("http://", url): self.log_post(url, credentials, do_verify_certificate) else: self.log_stdout()
Wrapper for the other log methods, decide which one based on the URL parameter.
Below is the the instruction that describes the task: ### Input: Wrapper for the other log methods, decide which one based on the URL parameter. ### Response: def log(self, url=None, credentials=None, do_verify_certificate=True): """ Wrapper for the other log methods, decide which one based on the URL parameter. """ if url is None: url = self.url if re.match("file://", url): self.log_file(url) elif re.match("https://", url) or re.match("http://", url): self.log_post(url, credentials, do_verify_certificate) else: self.log_stdout()
def fork_pty(): '''This implements a substitute for the forkpty system call. This should be more portable than the pty.fork() function. Specifically, this should work on Solaris. Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to resolve the issue with Python's pty.fork() not supporting Solaris, particularly ssh. Based on patch to posixmodule.c authored by Noah Spurrier:: http://mail.python.org/pipermail/python-dev/2003-May/035281.html ''' parent_fd, child_fd = os.openpty() if parent_fd < 0 or child_fd < 0: raise OSError("os.openpty() failed") pid = os.fork() if pid == CHILD: # Child. os.close(parent_fd) pty_make_controlling_tty(child_fd) os.dup2(child_fd, STDIN_FILENO) os.dup2(child_fd, STDOUT_FILENO) os.dup2(child_fd, STDERR_FILENO) else: # Parent. os.close(child_fd) return pid, parent_fd
This implements a substitute for the forkpty system call. This should be more portable than the pty.fork() function. Specifically, this should work on Solaris. Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to resolve the issue with Python's pty.fork() not supporting Solaris, particularly ssh. Based on patch to posixmodule.c authored by Noah Spurrier:: http://mail.python.org/pipermail/python-dev/2003-May/035281.html
Below is the the instruction that describes the task: ### Input: This implements a substitute for the forkpty system call. This should be more portable than the pty.fork() function. Specifically, this should work on Solaris. Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to resolve the issue with Python's pty.fork() not supporting Solaris, particularly ssh. Based on patch to posixmodule.c authored by Noah Spurrier:: http://mail.python.org/pipermail/python-dev/2003-May/035281.html ### Response: def fork_pty(): '''This implements a substitute for the forkpty system call. This should be more portable than the pty.fork() function. Specifically, this should work on Solaris. Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to resolve the issue with Python's pty.fork() not supporting Solaris, particularly ssh. Based on patch to posixmodule.c authored by Noah Spurrier:: http://mail.python.org/pipermail/python-dev/2003-May/035281.html ''' parent_fd, child_fd = os.openpty() if parent_fd < 0 or child_fd < 0: raise OSError("os.openpty() failed") pid = os.fork() if pid == CHILD: # Child. os.close(parent_fd) pty_make_controlling_tty(child_fd) os.dup2(child_fd, STDIN_FILENO) os.dup2(child_fd, STDOUT_FILENO) os.dup2(child_fd, STDERR_FILENO) else: # Parent. os.close(child_fd) return pid, parent_fd
def createExperimentArgs(): """Run the basic probability of false positives experiment.""" experimentArguments = [] # for n in [300, 500, 700, 900, 1100, 1300, 1500, 1700, 1900, 2100, 2300, # 2500, 2700, 2900, 3100, 3300, 3500, 3700, 3900]: for n in [1500, 1700, 1900, 2100]: for a in [128]: # Some parameter combinations are just not worth running! if ( a==64 and n<=1500 ) or ( a==128 and n<= 1900 ) or ( a==256 ): experimentArguments.append( ("./sdr_calculations2", "results_errorbars/temp_"+str(n)+"_"+str(a)+".csv", "200000", str(n), str(a), "0"), ) return experimentArguments
Run the basic probability of false positives experiment.
Below is the the instruction that describes the task: ### Input: Run the basic probability of false positives experiment. ### Response: def createExperimentArgs(): """Run the basic probability of false positives experiment.""" experimentArguments = [] # for n in [300, 500, 700, 900, 1100, 1300, 1500, 1700, 1900, 2100, 2300, # 2500, 2700, 2900, 3100, 3300, 3500, 3700, 3900]: for n in [1500, 1700, 1900, 2100]: for a in [128]: # Some parameter combinations are just not worth running! if ( a==64 and n<=1500 ) or ( a==128 and n<= 1900 ) or ( a==256 ): experimentArguments.append( ("./sdr_calculations2", "results_errorbars/temp_"+str(n)+"_"+str(a)+".csv", "200000", str(n), str(a), "0"), ) return experimentArguments
def fromfits(infile, hdu = 0, verbose = True): """ Factory function that reads a FITS file and returns a f2nimage object. Use hdu to specify which HDU you want (primary = 0) """ pixelarray, hdr = ft.getdata(infile, hdu, header=True) pixelarray = np.asarray(pixelarray).transpose() #print pixelarray pixelarrayshape = pixelarray.shape if verbose : print "Input shape : (%i, %i)" % (pixelarrayshape[0], pixelarrayshape[1]) print "Input file BITPIX : %s" % (hdr["BITPIX"]) pixelarrayshape = np.asarray(pixelarrayshape) if verbose : print "Internal array type :", pixelarray.dtype.name return f2nimage(pixelarray, verbose = verbose)
Factory function that reads a FITS file and returns a f2nimage object. Use hdu to specify which HDU you want (primary = 0)
Below is the the instruction that describes the task: ### Input: Factory function that reads a FITS file and returns a f2nimage object. Use hdu to specify which HDU you want (primary = 0) ### Response: def fromfits(infile, hdu = 0, verbose = True): """ Factory function that reads a FITS file and returns a f2nimage object. Use hdu to specify which HDU you want (primary = 0) """ pixelarray, hdr = ft.getdata(infile, hdu, header=True) pixelarray = np.asarray(pixelarray).transpose() #print pixelarray pixelarrayshape = pixelarray.shape if verbose : print "Input shape : (%i, %i)" % (pixelarrayshape[0], pixelarrayshape[1]) print "Input file BITPIX : %s" % (hdr["BITPIX"]) pixelarrayshape = np.asarray(pixelarrayshape) if verbose : print "Internal array type :", pixelarray.dtype.name return f2nimage(pixelarray, verbose = verbose)
def is_valid_bibtex(reference): """ Use pybtex to validate that a reference is in proper BibTeX format Args: reference: A String reference in BibTeX format. Returns: Boolean indicating if reference is valid bibtex. """ # str is necessary since pybtex seems to have an issue with unicode. The # filter expression removes all non-ASCII characters. sio = StringIO(remove_non_ascii(reference)) parser = bibtex.Parser() errors.set_strict_mode(False) bib_data = parser.parse_stream(sio) return len(bib_data.entries) > 0
Use pybtex to validate that a reference is in proper BibTeX format Args: reference: A String reference in BibTeX format. Returns: Boolean indicating if reference is valid bibtex.
Below is the the instruction that describes the task: ### Input: Use pybtex to validate that a reference is in proper BibTeX format Args: reference: A String reference in BibTeX format. Returns: Boolean indicating if reference is valid bibtex. ### Response: def is_valid_bibtex(reference): """ Use pybtex to validate that a reference is in proper BibTeX format Args: reference: A String reference in BibTeX format. Returns: Boolean indicating if reference is valid bibtex. """ # str is necessary since pybtex seems to have an issue with unicode. The # filter expression removes all non-ASCII characters. sio = StringIO(remove_non_ascii(reference)) parser = bibtex.Parser() errors.set_strict_mode(False) bib_data = parser.parse_stream(sio) return len(bib_data.entries) > 0
def get_prepopulated_value(field, instance): """ Returns preliminary value based on `populate_from`. """ if hasattr(field.populate_from, '__call__'): # AutoSlugField(populate_from=lambda instance: ...) return field.populate_from(instance) else: # AutoSlugField(populate_from='foo') attr = getattr(instance, field.populate_from) return callable(attr) and attr() or attr
Returns preliminary value based on `populate_from`.
Below is the the instruction that describes the task: ### Input: Returns preliminary value based on `populate_from`. ### Response: def get_prepopulated_value(field, instance): """ Returns preliminary value based on `populate_from`. """ if hasattr(field.populate_from, '__call__'): # AutoSlugField(populate_from=lambda instance: ...) return field.populate_from(instance) else: # AutoSlugField(populate_from='foo') attr = getattr(instance, field.populate_from) return callable(attr) and attr() or attr
def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 count = int(vals[i]) i += 1 for _ in range(count): obj = DesignCondition() obj.read(vals[i:i + obj.field_count]) self.add_design_condition(obj) i += obj.field_count
Read values. Args: vals (list): list of strings representing values
Below is the the instruction that describes the task: ### Input: Read values. Args: vals (list): list of strings representing values ### Response: def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 count = int(vals[i]) i += 1 for _ in range(count): obj = DesignCondition() obj.read(vals[i:i + obj.field_count]) self.add_design_condition(obj) i += obj.field_count
def fail(self, group, message): '''Mark the particular job as failed, with the provided type, and a more specific message. By `type`, we mean some phrase that might be one of several categorical modes of failure. The `message` is something more job-specific, like perhaps a traceback. This method should __not__ be used to note that a job has been dropped or has failed in a transient way. This method __should__ be used to note that a job has something really wrong with it that must be remedied. The motivation behind the `type` is so that similar errors can be grouped together. Optionally, updated data can be provided for the job. A job in any state can be marked as failed. If it has been given to a worker as a job, then its subsequent requests to heartbeat or complete that job will fail. Failed jobs are kept until they are canceled or completed. __Returns__ the id of the failed job if successful, or `False` on failure.''' logger.warn('Failing %s (%s): %s', self.jid, group, message) return self.client('fail', self.jid, self.client.worker_name, group, message, json.dumps(self.data)) or False
Mark the particular job as failed, with the provided type, and a more specific message. By `type`, we mean some phrase that might be one of several categorical modes of failure. The `message` is something more job-specific, like perhaps a traceback. This method should __not__ be used to note that a job has been dropped or has failed in a transient way. This method __should__ be used to note that a job has something really wrong with it that must be remedied. The motivation behind the `type` is so that similar errors can be grouped together. Optionally, updated data can be provided for the job. A job in any state can be marked as failed. If it has been given to a worker as a job, then its subsequent requests to heartbeat or complete that job will fail. Failed jobs are kept until they are canceled or completed. __Returns__ the id of the failed job if successful, or `False` on failure.
Below is the the instruction that describes the task: ### Input: Mark the particular job as failed, with the provided type, and a more specific message. By `type`, we mean some phrase that might be one of several categorical modes of failure. The `message` is something more job-specific, like perhaps a traceback. This method should __not__ be used to note that a job has been dropped or has failed in a transient way. This method __should__ be used to note that a job has something really wrong with it that must be remedied. The motivation behind the `type` is so that similar errors can be grouped together. Optionally, updated data can be provided for the job. A job in any state can be marked as failed. If it has been given to a worker as a job, then its subsequent requests to heartbeat or complete that job will fail. Failed jobs are kept until they are canceled or completed. __Returns__ the id of the failed job if successful, or `False` on failure. ### Response: def fail(self, group, message): '''Mark the particular job as failed, with the provided type, and a more specific message. By `type`, we mean some phrase that might be one of several categorical modes of failure. The `message` is something more job-specific, like perhaps a traceback. This method should __not__ be used to note that a job has been dropped or has failed in a transient way. This method __should__ be used to note that a job has something really wrong with it that must be remedied. The motivation behind the `type` is so that similar errors can be grouped together. Optionally, updated data can be provided for the job. A job in any state can be marked as failed. If it has been given to a worker as a job, then its subsequent requests to heartbeat or complete that job will fail. Failed jobs are kept until they are canceled or completed. __Returns__ the id of the failed job if successful, or `False` on failure.''' logger.warn('Failing %s (%s): %s', self.jid, group, message) return self.client('fail', self.jid, self.client.worker_name, group, message, json.dumps(self.data)) or False
def init_db_conn(connection_name, HOSTS=None): """ Initialize a redis connection by each connection string defined in the configuration file """ el = elasticsearch.Elasticsearch(hosts=HOSTS) el_pool.connections[connection_name] = ElasticSearchClient(el)
Initialize a redis connection by each connection string defined in the configuration file
Below is the the instruction that describes the task: ### Input: Initialize a redis connection by each connection string defined in the configuration file ### Response: def init_db_conn(connection_name, HOSTS=None): """ Initialize a redis connection by each connection string defined in the configuration file """ el = elasticsearch.Elasticsearch(hosts=HOSTS) el_pool.connections[connection_name] = ElasticSearchClient(el)
def QueueResponse(self, response, timestamp=None): """Queues the message on the flow's state.""" if timestamp is None: timestamp = self.frozen_timestamp self.response_queue.append((response, timestamp))
Queues the message on the flow's state.
Below is the the instruction that describes the task: ### Input: Queues the message on the flow's state. ### Response: def QueueResponse(self, response, timestamp=None): """Queues the message on the flow's state.""" if timestamp is None: timestamp = self.frozen_timestamp self.response_queue.append((response, timestamp))
def called_with(self, *args, **kwargs): """Return True if the spy was called with the specified args/kwargs. Otherwise raise VerificationError. """ expected_call = Call(*args, **kwargs) if expected_call in calls(self.spy): return True raise VerificationError( "expected %s to be called with %s, but it wasn't" % ( self.spy, expected_call.formatted_args))
Return True if the spy was called with the specified args/kwargs. Otherwise raise VerificationError.
Below is the the instruction that describes the task: ### Input: Return True if the spy was called with the specified args/kwargs. Otherwise raise VerificationError. ### Response: def called_with(self, *args, **kwargs): """Return True if the spy was called with the specified args/kwargs. Otherwise raise VerificationError. """ expected_call = Call(*args, **kwargs) if expected_call in calls(self.spy): return True raise VerificationError( "expected %s to be called with %s, but it wasn't" % ( self.spy, expected_call.formatted_args))
def modify(self, **kwargs): """We need to implement the custom exclusive parameter check.""" self._check_exclusive_parameters(**kwargs) return super(Rule, self)._modify(**kwargs)
We need to implement the custom exclusive parameter check.
Below is the the instruction that describes the task: ### Input: We need to implement the custom exclusive parameter check. ### Response: def modify(self, **kwargs): """We need to implement the custom exclusive parameter check.""" self._check_exclusive_parameters(**kwargs) return super(Rule, self)._modify(**kwargs)
def split_low_tag(tag): ''' Take a low tag and split it back into the low dict that it came from ''' state, id_, name, fun = tag.split('_|-') return {'state': state, '__id__': id_, 'name': name, 'fun': fun}
Take a low tag and split it back into the low dict that it came from
Below is the the instruction that describes the task: ### Input: Take a low tag and split it back into the low dict that it came from ### Response: def split_low_tag(tag): ''' Take a low tag and split it back into the low dict that it came from ''' state, id_, name, fun = tag.split('_|-') return {'state': state, '__id__': id_, 'name': name, 'fun': fun}
def _on_client_latency_changed(self, data): """Handle client latency changed.""" self._clients.get(data.get('id')).update_latency(data)
Handle client latency changed.
Below is the the instruction that describes the task: ### Input: Handle client latency changed. ### Response: def _on_client_latency_changed(self, data): """Handle client latency changed.""" self._clients.get(data.get('id')).update_latency(data)
def prepare_token_request(self, token_url, authorization_response=None, redirect_url=None, state=None, body='', **kwargs): """Prepare a token creation request. Note that these requests usually require client authentication, either by including client_id or a set of provider specific authentication credentials. :param token_url: Provider token creation endpoint URL. :param authorization_response: The full redirection URL string, i.e. the location to which the user was redirected after successfull authorization. Used to mine credentials needed to obtain a token in this step, such as authorization code. :param redirect_url: The redirect_url supplied with the authorization request (if there was one). :param state: :param body: Existing request body (URL encoded string) to embed parameters into. This may contain extra paramters. Default ''. :param kwargs: Additional parameters to included in the request. :returns: The prepared request tuple with (url, headers, body). """ if not is_secure_transport(token_url): raise InsecureTransportError() state = state or self.state if authorization_response: self.parse_request_uri_response( authorization_response, state=state) self.redirect_url = redirect_url or self.redirect_url body = self.prepare_request_body(body=body, redirect_uri=self.redirect_url, **kwargs) return token_url, FORM_ENC_HEADERS, body
Prepare a token creation request. Note that these requests usually require client authentication, either by including client_id or a set of provider specific authentication credentials. :param token_url: Provider token creation endpoint URL. :param authorization_response: The full redirection URL string, i.e. the location to which the user was redirected after successfull authorization. Used to mine credentials needed to obtain a token in this step, such as authorization code. :param redirect_url: The redirect_url supplied with the authorization request (if there was one). :param state: :param body: Existing request body (URL encoded string) to embed parameters into. This may contain extra paramters. Default ''. :param kwargs: Additional parameters to included in the request. :returns: The prepared request tuple with (url, headers, body).
Below is the the instruction that describes the task: ### Input: Prepare a token creation request. Note that these requests usually require client authentication, either by including client_id or a set of provider specific authentication credentials. :param token_url: Provider token creation endpoint URL. :param authorization_response: The full redirection URL string, i.e. the location to which the user was redirected after successfull authorization. Used to mine credentials needed to obtain a token in this step, such as authorization code. :param redirect_url: The redirect_url supplied with the authorization request (if there was one). :param state: :param body: Existing request body (URL encoded string) to embed parameters into. This may contain extra paramters. Default ''. :param kwargs: Additional parameters to included in the request. :returns: The prepared request tuple with (url, headers, body). ### Response: def prepare_token_request(self, token_url, authorization_response=None, redirect_url=None, state=None, body='', **kwargs): """Prepare a token creation request. Note that these requests usually require client authentication, either by including client_id or a set of provider specific authentication credentials. :param token_url: Provider token creation endpoint URL. :param authorization_response: The full redirection URL string, i.e. the location to which the user was redirected after successfull authorization. Used to mine credentials needed to obtain a token in this step, such as authorization code. :param redirect_url: The redirect_url supplied with the authorization request (if there was one). :param state: :param body: Existing request body (URL encoded string) to embed parameters into. This may contain extra paramters. Default ''. :param kwargs: Additional parameters to included in the request. :returns: The prepared request tuple with (url, headers, body). """ if not is_secure_transport(token_url): raise InsecureTransportError() state = state or self.state if authorization_response: self.parse_request_uri_response( authorization_response, state=state) self.redirect_url = redirect_url or self.redirect_url body = self.prepare_request_body(body=body, redirect_uri=self.redirect_url, **kwargs) return token_url, FORM_ENC_HEADERS, body
async def start_serving(self, address=None, sockets=None, **kw): """create the server endpoint. """ if self._server: raise RuntimeError('Already serving') server = DGServer(self._loop) loop = self._loop if sockets: for sock in sockets: transport, _ = await loop.create_datagram_endpoint( self.create_protocol, sock=sock) server.transports.append(transport) elif isinstance(address, tuple): transport, _ = await loop.create_datagram_endpoint( self.create_protocol, local_addr=address) server.transports.append(transport) else: raise RuntimeError('sockets or address must be supplied') self._set_server(server)
create the server endpoint.
Below is the the instruction that describes the task: ### Input: create the server endpoint. ### Response: async def start_serving(self, address=None, sockets=None, **kw): """create the server endpoint. """ if self._server: raise RuntimeError('Already serving') server = DGServer(self._loop) loop = self._loop if sockets: for sock in sockets: transport, _ = await loop.create_datagram_endpoint( self.create_protocol, sock=sock) server.transports.append(transport) elif isinstance(address, tuple): transport, _ = await loop.create_datagram_endpoint( self.create_protocol, local_addr=address) server.transports.append(transport) else: raise RuntimeError('sockets or address must be supplied') self._set_server(server)
def image_activation_profiles(self): """ :class:`~zhmcclient.ActivationProfileManager`: Access to the :term:`Image Activation Profiles <Image Activation Profile>` in this CPC. """ # We do here some lazy loading. if not self._image_activation_profiles: self._image_activation_profiles = \ ActivationProfileManager(self, profile_type='image') return self._image_activation_profiles
:class:`~zhmcclient.ActivationProfileManager`: Access to the :term:`Image Activation Profiles <Image Activation Profile>` in this CPC.
Below is the the instruction that describes the task: ### Input: :class:`~zhmcclient.ActivationProfileManager`: Access to the :term:`Image Activation Profiles <Image Activation Profile>` in this CPC. ### Response: def image_activation_profiles(self): """ :class:`~zhmcclient.ActivationProfileManager`: Access to the :term:`Image Activation Profiles <Image Activation Profile>` in this CPC. """ # We do here some lazy loading. if not self._image_activation_profiles: self._image_activation_profiles = \ ActivationProfileManager(self, profile_type='image') return self._image_activation_profiles
def reset(): """Resets Logger to its initial state""" Logger.journal = [] Logger.fatal_warnings = False Logger._ignored_codes = set() Logger._ignored_domains = set() Logger._verbosity = 2 Logger._last_checkpoint = 0
Resets Logger to its initial state
Below is the the instruction that describes the task: ### Input: Resets Logger to its initial state ### Response: def reset(): """Resets Logger to its initial state""" Logger.journal = [] Logger.fatal_warnings = False Logger._ignored_codes = set() Logger._ignored_domains = set() Logger._verbosity = 2 Logger._last_checkpoint = 0
def get_formatter(columns): """This function returns a callback to use with click options. The returned function parses a comma-separated value and returns a new ColumnFormatter. :param columns: a list of Column instances """ column_map = dict((column.name, column) for column in columns) def validate(ctx, param, value): """Click validation function.""" if value == '': raise click.BadParameter('At least one column is required.') formatter = ColumnFormatter() for column in [col.strip() for col in value.split(',')]: if column in column_map: formatter.add_column(column_map[column]) else: formatter.add_column(Column(column, column.split('.'))) return formatter return validate
This function returns a callback to use with click options. The returned function parses a comma-separated value and returns a new ColumnFormatter. :param columns: a list of Column instances
Below is the the instruction that describes the task: ### Input: This function returns a callback to use with click options. The returned function parses a comma-separated value and returns a new ColumnFormatter. :param columns: a list of Column instances ### Response: def get_formatter(columns): """This function returns a callback to use with click options. The returned function parses a comma-separated value and returns a new ColumnFormatter. :param columns: a list of Column instances """ column_map = dict((column.name, column) for column in columns) def validate(ctx, param, value): """Click validation function.""" if value == '': raise click.BadParameter('At least one column is required.') formatter = ColumnFormatter() for column in [col.strip() for col in value.split(',')]: if column in column_map: formatter.add_column(column_map[column]) else: formatter.add_column(Column(column, column.split('.'))) return formatter return validate
def parse_contexts(contexts): """ Convert a contexts JSON to an Elasticsearch-compatible list of key-value pairs For example, the JSON { "data": [ { "data": { "unique": true }, "schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0" }, { "data": { "value": 1 }, "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0" }, { "data": { "value": 2 }, "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0" } ], "schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0" } would become [ ("context_com_acme_duplicated_1", [{"value": 1}, {"value": 2}]), ("context_com_acme_unduplicated_1", [{"unique": true}]) ] """ my_json = json.loads(contexts) data = my_json['data'] distinct_contexts = {} for context in data: schema = fix_schema("contexts", context['schema']) inner_data = context['data'] if schema not in distinct_contexts: distinct_contexts[schema] = [inner_data] else: distinct_contexts[schema].append(inner_data) output = [] for key in distinct_contexts: output.append((key, distinct_contexts[key])) return output
Convert a contexts JSON to an Elasticsearch-compatible list of key-value pairs For example, the JSON { "data": [ { "data": { "unique": true }, "schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0" }, { "data": { "value": 1 }, "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0" }, { "data": { "value": 2 }, "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0" } ], "schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0" } would become [ ("context_com_acme_duplicated_1", [{"value": 1}, {"value": 2}]), ("context_com_acme_unduplicated_1", [{"unique": true}]) ]
Below is the the instruction that describes the task: ### Input: Convert a contexts JSON to an Elasticsearch-compatible list of key-value pairs For example, the JSON { "data": [ { "data": { "unique": true }, "schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0" }, { "data": { "value": 1 }, "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0" }, { "data": { "value": 2 }, "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0" } ], "schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0" } would become [ ("context_com_acme_duplicated_1", [{"value": 1}, {"value": 2}]), ("context_com_acme_unduplicated_1", [{"unique": true}]) ] ### Response: def parse_contexts(contexts): """ Convert a contexts JSON to an Elasticsearch-compatible list of key-value pairs For example, the JSON { "data": [ { "data": { "unique": true }, "schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0" }, { "data": { "value": 1 }, "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0" }, { "data": { "value": 2 }, "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0" } ], "schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0" } would become [ ("context_com_acme_duplicated_1", [{"value": 1}, {"value": 2}]), ("context_com_acme_unduplicated_1", [{"unique": true}]) ] """ my_json = json.loads(contexts) data = my_json['data'] distinct_contexts = {} for context in data: schema = fix_schema("contexts", context['schema']) inner_data = context['data'] if schema not in distinct_contexts: distinct_contexts[schema] = [inner_data] else: distinct_contexts[schema].append(inner_data) output = [] for key in distinct_contexts: output.append((key, distinct_contexts[key])) return output
def init(options, use_sigterm_handler=True): """ Must be called just after registration, before anything else """ # pylint: disable-msg=W0613 global _AUTH, _OPTIONS if isinstance(options, dict): _OPTIONS = DEFAULT_OPTIONS.copy() _OPTIONS.update(options) else: for optname, optvalue in DEFAULT_OPTIONS.iteritems(): if hasattr(options, optname): _OPTIONS[optname] = getattr(options, optname) else: _OPTIONS[optname] = optvalue if _OPTIONS['testmethods']: def fortytwo(request): "test GET method" return 42 def ping(request): "test POST method" return request.payload_params() register(fortytwo, 'GET') register(ping, 'POST') if _OPTIONS['auth_basic_file']: _AUTH = HttpAuthentication(_OPTIONS['auth_basic_file'], realm = _OPTIONS['auth_basic']).parse_file() for name, cmd in _COMMANDS.iteritems(): if cmd.safe_init: LOG.info("safe_init: %r", name) cmd.safe_init(_OPTIONS) if use_sigterm_handler: # signal.signal(signal.SIGHUP, lambda *x: None) # XXX signal.signal(signal.SIGTERM, sigterm_handler) signal.signal(signal.SIGINT, sigterm_handler)
Must be called just after registration, before anything else
Below is the the instruction that describes the task: ### Input: Must be called just after registration, before anything else ### Response: def init(options, use_sigterm_handler=True): """ Must be called just after registration, before anything else """ # pylint: disable-msg=W0613 global _AUTH, _OPTIONS if isinstance(options, dict): _OPTIONS = DEFAULT_OPTIONS.copy() _OPTIONS.update(options) else: for optname, optvalue in DEFAULT_OPTIONS.iteritems(): if hasattr(options, optname): _OPTIONS[optname] = getattr(options, optname) else: _OPTIONS[optname] = optvalue if _OPTIONS['testmethods']: def fortytwo(request): "test GET method" return 42 def ping(request): "test POST method" return request.payload_params() register(fortytwo, 'GET') register(ping, 'POST') if _OPTIONS['auth_basic_file']: _AUTH = HttpAuthentication(_OPTIONS['auth_basic_file'], realm = _OPTIONS['auth_basic']).parse_file() for name, cmd in _COMMANDS.iteritems(): if cmd.safe_init: LOG.info("safe_init: %r", name) cmd.safe_init(_OPTIONS) if use_sigterm_handler: # signal.signal(signal.SIGHUP, lambda *x: None) # XXX signal.signal(signal.SIGTERM, sigterm_handler) signal.signal(signal.SIGINT, sigterm_handler)
def get_rating(self): """get_rating() Returns the weighted average rating.""" if not (self.votes and self.score): return 0 return float(self.score)/(self.votes+self.field.weight)
get_rating() Returns the weighted average rating.
Below is the the instruction that describes the task: ### Input: get_rating() Returns the weighted average rating. ### Response: def get_rating(self): """get_rating() Returns the weighted average rating.""" if not (self.votes and self.score): return 0 return float(self.score)/(self.votes+self.field.weight)
def lastElementChild(self) -> Optional[AbstractNode]: """Last Element child node. If this node has no element child, return None. """ for child in reversed(self.childNodes): # type: ignore if child.nodeType == Node.ELEMENT_NODE: return child return None
Last Element child node. If this node has no element child, return None.
Below is the the instruction that describes the task: ### Input: Last Element child node. If this node has no element child, return None. ### Response: def lastElementChild(self) -> Optional[AbstractNode]: """Last Element child node. If this node has no element child, return None. """ for child in reversed(self.childNodes): # type: ignore if child.nodeType == Node.ELEMENT_NODE: return child return None
def hdr(data, filename): """ write ENVI header files Parameters ---------- data: str or dict the file or dictionary to get the info from filename: str the HDR file to write Returns ------- """ hdrobj = data if isinstance(data, HDRobject) else HDRobject(data) hdrobj.write(filename)
write ENVI header files Parameters ---------- data: str or dict the file or dictionary to get the info from filename: str the HDR file to write Returns -------
Below is the the instruction that describes the task: ### Input: write ENVI header files Parameters ---------- data: str or dict the file or dictionary to get the info from filename: str the HDR file to write Returns ------- ### Response: def hdr(data, filename): """ write ENVI header files Parameters ---------- data: str or dict the file or dictionary to get the info from filename: str the HDR file to write Returns ------- """ hdrobj = data if isinstance(data, HDRobject) else HDRobject(data) hdrobj.write(filename)
def get_stats(self): """Return a string describing the stats""" ostr = '' errtotal = self.deletions['total']+self.insertions['total']+self.mismatches ostr += "ALIGNMENT_COUNT\t"+str(self.alignment_count)+"\n" ostr += "ALIGNMENT_BASES\t"+str(self.alignment_length)+"\n" ostr += "ANY_ERROR\t"+str(errtotal)+"\n" ostr += "MISMATCHES\t"+str(self.mismatches)+"\n" ostr += "ANY_DELETION\t"+str(self.deletions['total'])+"\n" ostr += "COMPLETE_DELETION\t"+str(self.deletions['specific'])+"\n" ostr += "HOMOPOLYMER_DELETION\t"+str(self.deletions['homopolymer'])+"\n" ostr += "ANY_INSERTION\t"+str(self.insertions['total'])+"\n" ostr += "COMPLETE_INSERTION\t"+str(self.insertions['specific'])+"\n" ostr += "HOMOPOLYMER_INSERTION\t"+str(self.insertions['homopolymer'])+"\n" return ostr
Return a string describing the stats
Below is the the instruction that describes the task: ### Input: Return a string describing the stats ### Response: def get_stats(self): """Return a string describing the stats""" ostr = '' errtotal = self.deletions['total']+self.insertions['total']+self.mismatches ostr += "ALIGNMENT_COUNT\t"+str(self.alignment_count)+"\n" ostr += "ALIGNMENT_BASES\t"+str(self.alignment_length)+"\n" ostr += "ANY_ERROR\t"+str(errtotal)+"\n" ostr += "MISMATCHES\t"+str(self.mismatches)+"\n" ostr += "ANY_DELETION\t"+str(self.deletions['total'])+"\n" ostr += "COMPLETE_DELETION\t"+str(self.deletions['specific'])+"\n" ostr += "HOMOPOLYMER_DELETION\t"+str(self.deletions['homopolymer'])+"\n" ostr += "ANY_INSERTION\t"+str(self.insertions['total'])+"\n" ostr += "COMPLETE_INSERTION\t"+str(self.insertions['specific'])+"\n" ostr += "HOMOPOLYMER_INSERTION\t"+str(self.insertions['homopolymer'])+"\n" return ostr
def _calc_input_estimates(keyvals, get_retriever): """Calculate estimations of input file sizes for disk usage approximation. These are current dominated by fastq/BAM sizes, so estimate based on that. """ out = {} for key, val in keyvals.items(): size = _calc_file_size(val, 0, get_retriever) if size: out[key] = size return out
Calculate estimations of input file sizes for disk usage approximation. These are current dominated by fastq/BAM sizes, so estimate based on that.
Below is the the instruction that describes the task: ### Input: Calculate estimations of input file sizes for disk usage approximation. These are current dominated by fastq/BAM sizes, so estimate based on that. ### Response: def _calc_input_estimates(keyvals, get_retriever): """Calculate estimations of input file sizes for disk usage approximation. These are current dominated by fastq/BAM sizes, so estimate based on that. """ out = {} for key, val in keyvals.items(): size = _calc_file_size(val, 0, get_retriever) if size: out[key] = size return out
def op_and(self, *elements): """Update the ``Expression`` by joining the specified additional ``elements`` using an "AND" ``Operator`` Args: *elements (BaseExpression): The ``Expression`` and/or ``Constraint`` elements which the "AND" ``Operator`` applies to. Returns: Expression: ``self`` or related ``Expression``. """ expression = self.add_operator(Operator(';')) for element in elements: expression.add_element(element) return expression
Update the ``Expression`` by joining the specified additional ``elements`` using an "AND" ``Operator`` Args: *elements (BaseExpression): The ``Expression`` and/or ``Constraint`` elements which the "AND" ``Operator`` applies to. Returns: Expression: ``self`` or related ``Expression``.
Below is the the instruction that describes the task: ### Input: Update the ``Expression`` by joining the specified additional ``elements`` using an "AND" ``Operator`` Args: *elements (BaseExpression): The ``Expression`` and/or ``Constraint`` elements which the "AND" ``Operator`` applies to. Returns: Expression: ``self`` or related ``Expression``. ### Response: def op_and(self, *elements): """Update the ``Expression`` by joining the specified additional ``elements`` using an "AND" ``Operator`` Args: *elements (BaseExpression): The ``Expression`` and/or ``Constraint`` elements which the "AND" ``Operator`` applies to. Returns: Expression: ``self`` or related ``Expression``. """ expression = self.add_operator(Operator(';')) for element in elements: expression.add_element(element) return expression
def Main(): """The main program function. Returns: bool: True if successful or False if not. """ argument_parser = argparse.ArgumentParser(description=( 'Calculates a message digest hash for every file in a directory or ' 'storage media image.')) argument_parser.add_argument( '--output_file', '--output-file', dest='output_file', action='store', metavar='source.hashes', default=None, help=( 'path of the output file, default is to output to stdout.')) argument_parser.add_argument( 'source', nargs='?', action='store', metavar='image.raw', default=None, help='path of the directory or storage media image.') options = argument_parser.parse_args() if not options.source: print('Source value is missing.') print('') argument_parser.print_help() print('') return False logging.basicConfig( level=logging.INFO, format='[%(levelname)s] %(message)s') if options.output_file: output_writer = FileOutputWriter(options.output_file) else: output_writer = StdoutWriter() try: output_writer.Open() except IOError as exception: print('Unable to open output writer with error: {0!s}.'.format( exception)) print('') return False return_value = True mediator = command_line.CLIVolumeScannerMediator() recursive_hasher = RecursiveHasher(mediator=mediator) try: base_path_specs = recursive_hasher.GetBasePathSpecs(options.source) if not base_path_specs: print('No supported file system found in source.') print('') return False recursive_hasher.CalculateHashes(base_path_specs, output_writer) print('') print('Completed.') except errors.ScannerError as exception: return_value = False print('') print('[ERROR] {0!s}'.format(exception)) except errors.UserAbort as exception: return_value = False print('') print('Aborted.') output_writer.Close() return return_value
The main program function. Returns: bool: True if successful or False if not.
Below is the the instruction that describes the task: ### Input: The main program function. Returns: bool: True if successful or False if not. ### Response: def Main(): """The main program function. Returns: bool: True if successful or False if not. """ argument_parser = argparse.ArgumentParser(description=( 'Calculates a message digest hash for every file in a directory or ' 'storage media image.')) argument_parser.add_argument( '--output_file', '--output-file', dest='output_file', action='store', metavar='source.hashes', default=None, help=( 'path of the output file, default is to output to stdout.')) argument_parser.add_argument( 'source', nargs='?', action='store', metavar='image.raw', default=None, help='path of the directory or storage media image.') options = argument_parser.parse_args() if not options.source: print('Source value is missing.') print('') argument_parser.print_help() print('') return False logging.basicConfig( level=logging.INFO, format='[%(levelname)s] %(message)s') if options.output_file: output_writer = FileOutputWriter(options.output_file) else: output_writer = StdoutWriter() try: output_writer.Open() except IOError as exception: print('Unable to open output writer with error: {0!s}.'.format( exception)) print('') return False return_value = True mediator = command_line.CLIVolumeScannerMediator() recursive_hasher = RecursiveHasher(mediator=mediator) try: base_path_specs = recursive_hasher.GetBasePathSpecs(options.source) if not base_path_specs: print('No supported file system found in source.') print('') return False recursive_hasher.CalculateHashes(base_path_specs, output_writer) print('') print('Completed.') except errors.ScannerError as exception: return_value = False print('') print('[ERROR] {0!s}'.format(exception)) except errors.UserAbort as exception: return_value = False print('') print('Aborted.') output_writer.Close() return return_value
def _eight_byte_real_to_float(value): """ Convert a number from GDSII 8 byte real format to float. Parameters ---------- value : string The GDSII binary string representation of the number. Returns ------- out : float The number represented by ``value``. """ short1, short2, long3 = struct.unpack('>HHL', value) exponent = (short1 & 0x7f00) // 256 - 64 mantissa = (((short1 & 0x00ff) * 65536 + short2) * 4294967296 + long3) / 72057594037927936.0 if short1 & 0x8000: return -mantissa * 16.**exponent return mantissa * 16.**exponent
Convert a number from GDSII 8 byte real format to float. Parameters ---------- value : string The GDSII binary string representation of the number. Returns ------- out : float The number represented by ``value``.
Below is the the instruction that describes the task: ### Input: Convert a number from GDSII 8 byte real format to float. Parameters ---------- value : string The GDSII binary string representation of the number. Returns ------- out : float The number represented by ``value``. ### Response: def _eight_byte_real_to_float(value): """ Convert a number from GDSII 8 byte real format to float. Parameters ---------- value : string The GDSII binary string representation of the number. Returns ------- out : float The number represented by ``value``. """ short1, short2, long3 = struct.unpack('>HHL', value) exponent = (short1 & 0x7f00) // 256 - 64 mantissa = (((short1 & 0x00ff) * 65536 + short2) * 4294967296 + long3) / 72057594037927936.0 if short1 & 0x8000: return -mantissa * 16.**exponent return mantissa * 16.**exponent
def search_notficant_for_facets(self, **kwargs): # noqa: E501 """Lists the values of one or more facets over the customer's notificants # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_notficant_for_facets(async_req=True) >>> result = thread.get() :param async_req bool :param FacetsSearchRequestContainer body: :return: ResponseContainerFacetsResponseContainer If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_notficant_for_facets_with_http_info(**kwargs) # noqa: E501 else: (data) = self.search_notficant_for_facets_with_http_info(**kwargs) # noqa: E501 return data
Lists the values of one or more facets over the customer's notificants # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_notficant_for_facets(async_req=True) >>> result = thread.get() :param async_req bool :param FacetsSearchRequestContainer body: :return: ResponseContainerFacetsResponseContainer If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: Lists the values of one or more facets over the customer's notificants # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_notficant_for_facets(async_req=True) >>> result = thread.get() :param async_req bool :param FacetsSearchRequestContainer body: :return: ResponseContainerFacetsResponseContainer If the method is called asynchronously, returns the request thread. ### Response: def search_notficant_for_facets(self, **kwargs): # noqa: E501 """Lists the values of one or more facets over the customer's notificants # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_notficant_for_facets(async_req=True) >>> result = thread.get() :param async_req bool :param FacetsSearchRequestContainer body: :return: ResponseContainerFacetsResponseContainer If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_notficant_for_facets_with_http_info(**kwargs) # noqa: E501 else: (data) = self.search_notficant_for_facets_with_http_info(**kwargs) # noqa: E501 return data
def clearText(self, keepFocus=False): """Clear the text in the field""" self.text = '' self.focus = keepFocus self._updateImage()
Clear the text in the field
Below is the the instruction that describes the task: ### Input: Clear the text in the field ### Response: def clearText(self, keepFocus=False): """Clear the text in the field""" self.text = '' self.focus = keepFocus self._updateImage()
def search_for_devices_by_serial_number(self, sn): """ Returns a list of device objects that match the serial number in param 'sn'. This will match partial serial numbers. """ import re sn_search = re.compile(sn) matches = [] for dev_o in self.get_all_devices_in_portal(): # print("Checking {0}".format(dev_o['sn'])) try: if sn_search.match(dev_o['sn']): matches.append(dev_o) except TypeError as err: print("Problem checking device {!r}: {!r}".format( dev_o['info']['description']['name'], str(err))) return matches
Returns a list of device objects that match the serial number in param 'sn'. This will match partial serial numbers.
Below is the the instruction that describes the task: ### Input: Returns a list of device objects that match the serial number in param 'sn'. This will match partial serial numbers. ### Response: def search_for_devices_by_serial_number(self, sn): """ Returns a list of device objects that match the serial number in param 'sn'. This will match partial serial numbers. """ import re sn_search = re.compile(sn) matches = [] for dev_o in self.get_all_devices_in_portal(): # print("Checking {0}".format(dev_o['sn'])) try: if sn_search.match(dev_o['sn']): matches.append(dev_o) except TypeError as err: print("Problem checking device {!r}: {!r}".format( dev_o['info']['description']['name'], str(err))) return matches
def search(cls, session, queries, out_type): """Search for a record given a domain. Args: session (requests.sessions.Session): Authenticated session. queries (helpscout.models.Domain or iter): The queries for the domain. If a ``Domain`` object is provided, it will simply be returned. Otherwise, a ``Domain`` object will be generated from the complex queries. In this case, the queries should conform to the interface in :func:`helpscout.domain.Domain.from_tuple`. out_type (helpscout.BaseModel): The type of record to output. This should be provided by child classes, by calling super. Returns: RequestPaginator(output_type=helpscout.BaseModel): Results iterator of the ``out_type`` that is defined. """ cls._check_implements('search') domain = cls.get_search_domain(queries) return cls( '/search/%s.json' % cls.__endpoint__, data={'query': str(domain)}, session=session, out_type=out_type, )
Search for a record given a domain. Args: session (requests.sessions.Session): Authenticated session. queries (helpscout.models.Domain or iter): The queries for the domain. If a ``Domain`` object is provided, it will simply be returned. Otherwise, a ``Domain`` object will be generated from the complex queries. In this case, the queries should conform to the interface in :func:`helpscout.domain.Domain.from_tuple`. out_type (helpscout.BaseModel): The type of record to output. This should be provided by child classes, by calling super. Returns: RequestPaginator(output_type=helpscout.BaseModel): Results iterator of the ``out_type`` that is defined.
Below is the the instruction that describes the task: ### Input: Search for a record given a domain. Args: session (requests.sessions.Session): Authenticated session. queries (helpscout.models.Domain or iter): The queries for the domain. If a ``Domain`` object is provided, it will simply be returned. Otherwise, a ``Domain`` object will be generated from the complex queries. In this case, the queries should conform to the interface in :func:`helpscout.domain.Domain.from_tuple`. out_type (helpscout.BaseModel): The type of record to output. This should be provided by child classes, by calling super. Returns: RequestPaginator(output_type=helpscout.BaseModel): Results iterator of the ``out_type`` that is defined. ### Response: def search(cls, session, queries, out_type): """Search for a record given a domain. Args: session (requests.sessions.Session): Authenticated session. queries (helpscout.models.Domain or iter): The queries for the domain. If a ``Domain`` object is provided, it will simply be returned. Otherwise, a ``Domain`` object will be generated from the complex queries. In this case, the queries should conform to the interface in :func:`helpscout.domain.Domain.from_tuple`. out_type (helpscout.BaseModel): The type of record to output. This should be provided by child classes, by calling super. Returns: RequestPaginator(output_type=helpscout.BaseModel): Results iterator of the ``out_type`` that is defined. """ cls._check_implements('search') domain = cls.get_search_domain(queries) return cls( '/search/%s.json' % cls.__endpoint__, data={'query': str(domain)}, session=session, out_type=out_type, )
def chunks(arr, size): """Splits a list into chunks :param arr: list to split :type arr: :class:`list` :param size: number of elements in each chunk :type size: :class:`int` :return: generator object :rtype: :class:`generator` """ for i in _range(0, len(arr), size): yield arr[i:i+size]
Splits a list into chunks :param arr: list to split :type arr: :class:`list` :param size: number of elements in each chunk :type size: :class:`int` :return: generator object :rtype: :class:`generator`
Below is the the instruction that describes the task: ### Input: Splits a list into chunks :param arr: list to split :type arr: :class:`list` :param size: number of elements in each chunk :type size: :class:`int` :return: generator object :rtype: :class:`generator` ### Response: def chunks(arr, size): """Splits a list into chunks :param arr: list to split :type arr: :class:`list` :param size: number of elements in each chunk :type size: :class:`int` :return: generator object :rtype: :class:`generator` """ for i in _range(0, len(arr), size): yield arr[i:i+size]
def __stop(self): # pragma: no cover """Stops the background engine.""" if not self._listener_thread: return logger.info('Stopping listener') self._celery_receiver.should_stop = True self._listener_thread.join() self._listener_thread = self._celery_receiver = None
Stops the background engine.
Below is the the instruction that describes the task: ### Input: Stops the background engine. ### Response: def __stop(self): # pragma: no cover """Stops the background engine.""" if not self._listener_thread: return logger.info('Stopping listener') self._celery_receiver.should_stop = True self._listener_thread.join() self._listener_thread = self._celery_receiver = None
def define_points_grid(self): """ This is experimental code that could be used in the spatialDomainNoGrid section to build a grid of points on which to generate the solution. However, the current development plan (as of 27 Jan 2015) is to have the end user supply the list of points where they want a solution (and/or for it to be provided in a more automated way by GRASS GIS). But because this (untested) code may still be useful, it will remain as its own function here. It used to be in f2d.py. """ # Grid making step # In this case, an output at different (x,y), e.g., on a grid, is desired # First, see if there is a need for a grid, and then make it # latlon arrays must have a pre-set grid if self.latlon == False: # Warn that any existing grid will be overwritten try: self.dx if self.Quiet == False: print("dx and dy being overwritten -- supply a full grid") except: try: self.dy if self.Quiet == False: print("dx and dy being overwritten -- supply a full grid") except: pass # Boundaries n = np.max(self.y) + self.alpha s = np.min(self.y) - self.alpha w = np.min(self.x) + self.alpha e = np.max(self.x) - self.alpha # Grid spacing dxprelim = self.alpha/50. # x or y nx = np.ceil((e-w)/dxprelim) ny = np.ceil((n-s)/dxprelim) dx = (e-w) / nx dy = (n-s) / ny self.dx = self.dy = (dx+dy)/2. # Average of these to create a # square grid for more compatibility self.xw = np.linspace(w, e, nx) self.yw = np.linspace(s, n, ny) else: print("Lat/lon xw and yw must be pre-set: grid will not be square") print("and may run into issues with poles, so to ensure the proper") print("output points are chosen, the end user should do this.") sys.exit()
This is experimental code that could be used in the spatialDomainNoGrid section to build a grid of points on which to generate the solution. However, the current development plan (as of 27 Jan 2015) is to have the end user supply the list of points where they want a solution (and/or for it to be provided in a more automated way by GRASS GIS). But because this (untested) code may still be useful, it will remain as its own function here. It used to be in f2d.py.
Below is the the instruction that describes the task: ### Input: This is experimental code that could be used in the spatialDomainNoGrid section to build a grid of points on which to generate the solution. However, the current development plan (as of 27 Jan 2015) is to have the end user supply the list of points where they want a solution (and/or for it to be provided in a more automated way by GRASS GIS). But because this (untested) code may still be useful, it will remain as its own function here. It used to be in f2d.py. ### Response: def define_points_grid(self): """ This is experimental code that could be used in the spatialDomainNoGrid section to build a grid of points on which to generate the solution. However, the current development plan (as of 27 Jan 2015) is to have the end user supply the list of points where they want a solution (and/or for it to be provided in a more automated way by GRASS GIS). But because this (untested) code may still be useful, it will remain as its own function here. It used to be in f2d.py. """ # Grid making step # In this case, an output at different (x,y), e.g., on a grid, is desired # First, see if there is a need for a grid, and then make it # latlon arrays must have a pre-set grid if self.latlon == False: # Warn that any existing grid will be overwritten try: self.dx if self.Quiet == False: print("dx and dy being overwritten -- supply a full grid") except: try: self.dy if self.Quiet == False: print("dx and dy being overwritten -- supply a full grid") except: pass # Boundaries n = np.max(self.y) + self.alpha s = np.min(self.y) - self.alpha w = np.min(self.x) + self.alpha e = np.max(self.x) - self.alpha # Grid spacing dxprelim = self.alpha/50. # x or y nx = np.ceil((e-w)/dxprelim) ny = np.ceil((n-s)/dxprelim) dx = (e-w) / nx dy = (n-s) / ny self.dx = self.dy = (dx+dy)/2. # Average of these to create a # square grid for more compatibility self.xw = np.linspace(w, e, nx) self.yw = np.linspace(s, n, ny) else: print("Lat/lon xw and yw must be pre-set: grid will not be square") print("and may run into issues with poles, so to ensure the proper") print("output points are chosen, the end user should do this.") sys.exit()
def make_pmml_pipeline(obj, active_fields = None, target_fields = None): """Translates a regular Scikit-Learn estimator or pipeline to a PMML pipeline. Parameters: ---------- obj: BaseEstimator The object. active_fields: list of strings, optional Feature names. If missing, "x1", "x2", .., "xn" are assumed. target_fields: list of strings, optional Label name(s). If missing, "y" is assumed. """ steps = _filter_steps(_get_steps(obj)) pipeline = PMMLPipeline(steps) if active_fields is not None: pipeline.active_fields = numpy.asarray(active_fields) if target_fields is not None: pipeline.target_fields = numpy.asarray(target_fields) return pipeline
Translates a regular Scikit-Learn estimator or pipeline to a PMML pipeline. Parameters: ---------- obj: BaseEstimator The object. active_fields: list of strings, optional Feature names. If missing, "x1", "x2", .., "xn" are assumed. target_fields: list of strings, optional Label name(s). If missing, "y" is assumed.
Below is the the instruction that describes the task: ### Input: Translates a regular Scikit-Learn estimator or pipeline to a PMML pipeline. Parameters: ---------- obj: BaseEstimator The object. active_fields: list of strings, optional Feature names. If missing, "x1", "x2", .., "xn" are assumed. target_fields: list of strings, optional Label name(s). If missing, "y" is assumed. ### Response: def make_pmml_pipeline(obj, active_fields = None, target_fields = None): """Translates a regular Scikit-Learn estimator or pipeline to a PMML pipeline. Parameters: ---------- obj: BaseEstimator The object. active_fields: list of strings, optional Feature names. If missing, "x1", "x2", .., "xn" are assumed. target_fields: list of strings, optional Label name(s). If missing, "y" is assumed. """ steps = _filter_steps(_get_steps(obj)) pipeline = PMMLPipeline(steps) if active_fields is not None: pipeline.active_fields = numpy.asarray(active_fields) if target_fields is not None: pipeline.target_fields = numpy.asarray(target_fields) return pipeline
def parse_requests_response(response, **kwargs): """Build a ContentDisposition from a requests (PyPI) response. """ return parse_headers( response.headers.get('content-disposition'), response.url, **kwargs)
Build a ContentDisposition from a requests (PyPI) response.
Below is the the instruction that describes the task: ### Input: Build a ContentDisposition from a requests (PyPI) response. ### Response: def parse_requests_response(response, **kwargs): """Build a ContentDisposition from a requests (PyPI) response. """ return parse_headers( response.headers.get('content-disposition'), response.url, **kwargs)
def message_checksum(msg): '''calculate a 8-bit checksum of the key fields of a message, so we can detect incompatible XML changes''' from .mavcrc import x25crc crc = x25crc() crc.accumulate_str(msg.name + ' ') # in order to allow for extensions the crc does not include # any field extensions crc_end = msg.base_fields() for i in range(crc_end): f = msg.ordered_fields[i] crc.accumulate_str(f.type + ' ') crc.accumulate_str(f.name + ' ') if f.array_length: crc.accumulate([f.array_length]) return (crc.crc&0xFF) ^ (crc.crc>>8)
calculate a 8-bit checksum of the key fields of a message, so we can detect incompatible XML changes
Below is the the instruction that describes the task: ### Input: calculate a 8-bit checksum of the key fields of a message, so we can detect incompatible XML changes ### Response: def message_checksum(msg): '''calculate a 8-bit checksum of the key fields of a message, so we can detect incompatible XML changes''' from .mavcrc import x25crc crc = x25crc() crc.accumulate_str(msg.name + ' ') # in order to allow for extensions the crc does not include # any field extensions crc_end = msg.base_fields() for i in range(crc_end): f = msg.ordered_fields[i] crc.accumulate_str(f.type + ' ') crc.accumulate_str(f.name + ' ') if f.array_length: crc.accumulate([f.array_length]) return (crc.crc&0xFF) ^ (crc.crc>>8)
def _load_client_cert_chain(keychain, *paths): """ Load certificates and maybe keys from a number of files. Has the end goal of returning a CFArray containing one SecIdentityRef, and then zero or more SecCertificateRef objects, suitable for use as a client certificate trust chain. """ # Ok, the strategy. # # This relies on knowing that macOS will not give you a SecIdentityRef # unless you have imported a key into a keychain. This is a somewhat # artificial limitation of macOS (for example, it doesn't necessarily # affect iOS), but there is nothing inside Security.framework that lets you # get a SecIdentityRef without having a key in a keychain. # # So the policy here is we take all the files and iterate them in order. # Each one will use SecItemImport to have one or more objects loaded from # it. We will also point at a keychain that macOS can use to work with the # private key. # # Once we have all the objects, we'll check what we actually have. If we # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise, # we'll take the first certificate (which we assume to be our leaf) and # ask the keychain to give us a SecIdentityRef with that cert's associated # key. # # We'll then return a CFArray containing the trust chain: one # SecIdentityRef and then zero-or-more SecCertificateRef objects. The # responsibility for freeing this CFArray will be with the caller. This # CFArray must remain alive for the entire connection, so in practice it # will be stored with a single SSLSocket, along with the reference to the # keychain. certificates = [] identities = [] # Filter out bad paths. paths = (path for path in paths if path) try: for file_path in paths: new_identities, new_certs = _load_items_from_file( keychain, file_path ) identities.extend(new_identities) certificates.extend(new_certs) # Ok, we have everything. The question is: do we have an identity? If # not, we want to grab one from the first cert we have. if not identities: new_identity = Security.SecIdentityRef() status = Security.SecIdentityCreateWithCertificate( keychain, certificates[0], ctypes.byref(new_identity) ) _assert_no_error(status) identities.append(new_identity) # We now want to release the original certificate, as we no longer # need it. CoreFoundation.CFRelease(certificates.pop(0)) # We now need to build a new CFArray that holds the trust chain. trust_chain = CoreFoundation.CFArrayCreateMutable( CoreFoundation.kCFAllocatorDefault, 0, ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), ) for item in itertools.chain(identities, certificates): # ArrayAppendValue does a CFRetain on the item. That's fine, # because the finally block will release our other refs to them. CoreFoundation.CFArrayAppendValue(trust_chain, item) return trust_chain finally: for obj in itertools.chain(identities, certificates): CoreFoundation.CFRelease(obj)
Load certificates and maybe keys from a number of files. Has the end goal of returning a CFArray containing one SecIdentityRef, and then zero or more SecCertificateRef objects, suitable for use as a client certificate trust chain.
Below is the the instruction that describes the task: ### Input: Load certificates and maybe keys from a number of files. Has the end goal of returning a CFArray containing one SecIdentityRef, and then zero or more SecCertificateRef objects, suitable for use as a client certificate trust chain. ### Response: def _load_client_cert_chain(keychain, *paths): """ Load certificates and maybe keys from a number of files. Has the end goal of returning a CFArray containing one SecIdentityRef, and then zero or more SecCertificateRef objects, suitable for use as a client certificate trust chain. """ # Ok, the strategy. # # This relies on knowing that macOS will not give you a SecIdentityRef # unless you have imported a key into a keychain. This is a somewhat # artificial limitation of macOS (for example, it doesn't necessarily # affect iOS), but there is nothing inside Security.framework that lets you # get a SecIdentityRef without having a key in a keychain. # # So the policy here is we take all the files and iterate them in order. # Each one will use SecItemImport to have one or more objects loaded from # it. We will also point at a keychain that macOS can use to work with the # private key. # # Once we have all the objects, we'll check what we actually have. If we # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise, # we'll take the first certificate (which we assume to be our leaf) and # ask the keychain to give us a SecIdentityRef with that cert's associated # key. # # We'll then return a CFArray containing the trust chain: one # SecIdentityRef and then zero-or-more SecCertificateRef objects. The # responsibility for freeing this CFArray will be with the caller. This # CFArray must remain alive for the entire connection, so in practice it # will be stored with a single SSLSocket, along with the reference to the # keychain. certificates = [] identities = [] # Filter out bad paths. paths = (path for path in paths if path) try: for file_path in paths: new_identities, new_certs = _load_items_from_file( keychain, file_path ) identities.extend(new_identities) certificates.extend(new_certs) # Ok, we have everything. The question is: do we have an identity? If # not, we want to grab one from the first cert we have. if not identities: new_identity = Security.SecIdentityRef() status = Security.SecIdentityCreateWithCertificate( keychain, certificates[0], ctypes.byref(new_identity) ) _assert_no_error(status) identities.append(new_identity) # We now want to release the original certificate, as we no longer # need it. CoreFoundation.CFRelease(certificates.pop(0)) # We now need to build a new CFArray that holds the trust chain. trust_chain = CoreFoundation.CFArrayCreateMutable( CoreFoundation.kCFAllocatorDefault, 0, ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), ) for item in itertools.chain(identities, certificates): # ArrayAppendValue does a CFRetain on the item. That's fine, # because the finally block will release our other refs to them. CoreFoundation.CFArrayAppendValue(trust_chain, item) return trust_chain finally: for obj in itertools.chain(identities, certificates): CoreFoundation.CFRelease(obj)
def supported_currencies(self, project='moneywagon', level="full"): """ Returns a list of all currencies that are supported by the passed in project. and support level. Support level can be: "block", "transaction", "address" or "full". """ ret = [] if project == 'multiexplorer-wallet': for currency, data in self.sorted_crypto_data: if not data.get("bip44_coin_type"): continue if len(data.get('services', {}).get("push_tx", [])) < 1: continue if len(data.get('services', {}).get("historical_transactions", [])) < 1: continue if len(data.get('services', {}).get("single_transaction", [])) < 1: continue if len(data.get('services', {}).get("unspent_outputs", [])) < 1: continue ret.append(currency) altcore_tx = self.supported_currencies('altcore', level="transaction") return [x for x in ret if x in altcore_tx] for symbol, data in self.sorted_crypto_data: if symbol == '': # template continue if self.is_all_supported(data, project, level): ret.append(symbol) return ret
Returns a list of all currencies that are supported by the passed in project. and support level. Support level can be: "block", "transaction", "address" or "full".
Below is the the instruction that describes the task: ### Input: Returns a list of all currencies that are supported by the passed in project. and support level. Support level can be: "block", "transaction", "address" or "full". ### Response: def supported_currencies(self, project='moneywagon', level="full"): """ Returns a list of all currencies that are supported by the passed in project. and support level. Support level can be: "block", "transaction", "address" or "full". """ ret = [] if project == 'multiexplorer-wallet': for currency, data in self.sorted_crypto_data: if not data.get("bip44_coin_type"): continue if len(data.get('services', {}).get("push_tx", [])) < 1: continue if len(data.get('services', {}).get("historical_transactions", [])) < 1: continue if len(data.get('services', {}).get("single_transaction", [])) < 1: continue if len(data.get('services', {}).get("unspent_outputs", [])) < 1: continue ret.append(currency) altcore_tx = self.supported_currencies('altcore', level="transaction") return [x for x in ret if x in altcore_tx] for symbol, data in self.sorted_crypto_data: if symbol == '': # template continue if self.is_all_supported(data, project, level): ret.append(symbol) return ret
def estimate_tuning(input_file): '''Load an audio file and estimate tuning (in cents)''' print('Loading ', input_file) y, sr = librosa.load(input_file) print('Separating harmonic component ... ') y_harm = librosa.effects.harmonic(y) print('Estimating tuning ... ') # Just track the pitches associated with high magnitude tuning = librosa.estimate_tuning(y=y_harm, sr=sr) print('{:+0.2f} cents'.format(100 * tuning))
Load an audio file and estimate tuning (in cents)
Below is the the instruction that describes the task: ### Input: Load an audio file and estimate tuning (in cents) ### Response: def estimate_tuning(input_file): '''Load an audio file and estimate tuning (in cents)''' print('Loading ', input_file) y, sr = librosa.load(input_file) print('Separating harmonic component ... ') y_harm = librosa.effects.harmonic(y) print('Estimating tuning ... ') # Just track the pitches associated with high magnitude tuning = librosa.estimate_tuning(y=y_harm, sr=sr) print('{:+0.2f} cents'.format(100 * tuning))
def has_parent_families(self, family_id): """Tests if the ``Family`` has any parents. arg: family_id (osid.id.Id): the ``Id`` of a family return: (boolean) - ``true`` if the family has parents, ``false`` otherwise raise: NotFound - ``family_id`` is not found raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.has_parent_bins if self._catalog_session is not None: return self._catalog_session.has_parent_catalogs(catalog_id=family_id) return self._hierarchy_session.has_parents(id_=family_id)
Tests if the ``Family`` has any parents. arg: family_id (osid.id.Id): the ``Id`` of a family return: (boolean) - ``true`` if the family has parents, ``false`` otherwise raise: NotFound - ``family_id`` is not found raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Tests if the ``Family`` has any parents. arg: family_id (osid.id.Id): the ``Id`` of a family return: (boolean) - ``true`` if the family has parents, ``false`` otherwise raise: NotFound - ``family_id`` is not found raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* ### Response: def has_parent_families(self, family_id): """Tests if the ``Family`` has any parents. arg: family_id (osid.id.Id): the ``Id`` of a family return: (boolean) - ``true`` if the family has parents, ``false`` otherwise raise: NotFound - ``family_id`` is not found raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.has_parent_bins if self._catalog_session is not None: return self._catalog_session.has_parent_catalogs(catalog_id=family_id) return self._hierarchy_session.has_parents(id_=family_id)
def pause(self, id, when=None): # pylint: disable=invalid-name,redefined-builtin """Pause a running result. :param id: Result ID as an int. :param when: Must be string `end-of-test` or `end-of-loop`. """ return self.service.post(self.base+str(id)+'/pause/', params={'when': when})
Pause a running result. :param id: Result ID as an int. :param when: Must be string `end-of-test` or `end-of-loop`.
Below is the the instruction that describes the task: ### Input: Pause a running result. :param id: Result ID as an int. :param when: Must be string `end-of-test` or `end-of-loop`. ### Response: def pause(self, id, when=None): # pylint: disable=invalid-name,redefined-builtin """Pause a running result. :param id: Result ID as an int. :param when: Must be string `end-of-test` or `end-of-loop`. """ return self.service.post(self.base+str(id)+'/pause/', params={'when': when})
def __prune_search_template(self, extract_as_keys, search_template): """ Returns a new search template, but the new template has only the extract_as_keys given. :param extract_as_keys: List of extract as keys to keep :param search_template: The search template to prune :return: New search template with pruned columns """ data = { "extract_as_keys": extract_as_keys, "search_template": search_template } failure_message = "Failed to prune a search template" return self._get_success_json(self._post_json( 'v1/search_templates/prune-to-extract-as', data, failure_message=failure_message))['data']
Returns a new search template, but the new template has only the extract_as_keys given. :param extract_as_keys: List of extract as keys to keep :param search_template: The search template to prune :return: New search template with pruned columns
Below is the the instruction that describes the task: ### Input: Returns a new search template, but the new template has only the extract_as_keys given. :param extract_as_keys: List of extract as keys to keep :param search_template: The search template to prune :return: New search template with pruned columns ### Response: def __prune_search_template(self, extract_as_keys, search_template): """ Returns a new search template, but the new template has only the extract_as_keys given. :param extract_as_keys: List of extract as keys to keep :param search_template: The search template to prune :return: New search template with pruned columns """ data = { "extract_as_keys": extract_as_keys, "search_template": search_template } failure_message = "Failed to prune a search template" return self._get_success_json(self._post_json( 'v1/search_templates/prune-to-extract-as', data, failure_message=failure_message))['data']
def run_upload_db(filename=None): """ Uploads your local database to the server. You can create a local dump with ``fab export_db`` first. In order to import the database on the server you still need to SSH into the server. Usage:: fab prod run_upload_db fab prod run_upload_db:filename=foobar.dump """ if not filename: filename = settings.DB_DUMP_FILENAME if env.key_filename: ssh = settings.PROJECT_NAME else: ssh = '{0}@{1}'.format(env.user, env.host_string) local('scp {0} {1}:{3}'.format( filename, ssh, settings.FAB_SETTING('SERVER_DB_BACKUP_DIR')))
Uploads your local database to the server. You can create a local dump with ``fab export_db`` first. In order to import the database on the server you still need to SSH into the server. Usage:: fab prod run_upload_db fab prod run_upload_db:filename=foobar.dump
Below is the the instruction that describes the task: ### Input: Uploads your local database to the server. You can create a local dump with ``fab export_db`` first. In order to import the database on the server you still need to SSH into the server. Usage:: fab prod run_upload_db fab prod run_upload_db:filename=foobar.dump ### Response: def run_upload_db(filename=None): """ Uploads your local database to the server. You can create a local dump with ``fab export_db`` first. In order to import the database on the server you still need to SSH into the server. Usage:: fab prod run_upload_db fab prod run_upload_db:filename=foobar.dump """ if not filename: filename = settings.DB_DUMP_FILENAME if env.key_filename: ssh = settings.PROJECT_NAME else: ssh = '{0}@{1}'.format(env.user, env.host_string) local('scp {0} {1}:{3}'.format( filename, ssh, settings.FAB_SETTING('SERVER_DB_BACKUP_DIR')))
def debug_video_writer_factory(output_dir): """Creates a VideoWriter for debug videos.""" if FLAGS.disable_ffmpeg: return common_video.IndividualFrameWriter(output_dir) else: output_path = os.path.join(output_dir, "video.avi") return common_video.WholeVideoWriter( fps=10, output_path=output_path, file_format="avi" )
Creates a VideoWriter for debug videos.
Below is the the instruction that describes the task: ### Input: Creates a VideoWriter for debug videos. ### Response: def debug_video_writer_factory(output_dir): """Creates a VideoWriter for debug videos.""" if FLAGS.disable_ffmpeg: return common_video.IndividualFrameWriter(output_dir) else: output_path = os.path.join(output_dir, "video.avi") return common_video.WholeVideoWriter( fps=10, output_path=output_path, file_format="avi" )
def security_rule_delete(security_rule, security_group, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 Delete a security rule within a specified security group. :param name: The name of the security rule to delete. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rule_delete testrule1 testnsg testgroup ''' result = False netconn = __utils__['azurearm.get_client']('network', **kwargs) try: secrule = netconn.security_rules.delete( network_security_group_name=security_group, resource_group_name=resource_group, security_rule_name=security_rule ) secrule.wait() result = True except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) return result
.. versionadded:: 2019.2.0 Delete a security rule within a specified security group. :param name: The name of the security rule to delete. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rule_delete testrule1 testnsg testgroup
Below is the the instruction that describes the task: ### Input: .. versionadded:: 2019.2.0 Delete a security rule within a specified security group. :param name: The name of the security rule to delete. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rule_delete testrule1 testnsg testgroup ### Response: def security_rule_delete(security_rule, security_group, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 Delete a security rule within a specified security group. :param name: The name of the security rule to delete. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rule_delete testrule1 testnsg testgroup ''' result = False netconn = __utils__['azurearm.get_client']('network', **kwargs) try: secrule = netconn.security_rules.delete( network_security_group_name=security_group, resource_group_name=resource_group, security_rule_name=security_rule ) secrule.wait() result = True except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) return result
def parse(self, s): """ Parses a :py:class:`~luigi.date_interval.DateInterval` from the input. see :py:mod:`luigi.date_interval` for details on the parsing of DateIntervals. """ # TODO: can we use xml.utils.iso8601 or something similar? from luigi import date_interval as d for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]: i = cls.parse(s) if i: return i raise ValueError('Invalid date interval - could not be parsed')
Parses a :py:class:`~luigi.date_interval.DateInterval` from the input. see :py:mod:`luigi.date_interval` for details on the parsing of DateIntervals.
Below is the the instruction that describes the task: ### Input: Parses a :py:class:`~luigi.date_interval.DateInterval` from the input. see :py:mod:`luigi.date_interval` for details on the parsing of DateIntervals. ### Response: def parse(self, s): """ Parses a :py:class:`~luigi.date_interval.DateInterval` from the input. see :py:mod:`luigi.date_interval` for details on the parsing of DateIntervals. """ # TODO: can we use xml.utils.iso8601 or something similar? from luigi import date_interval as d for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]: i = cls.parse(s) if i: return i raise ValueError('Invalid date interval - could not be parsed')
def set(self, value): """Set the value for this measurement, with some sanity checks.""" if self.is_value_set: # While we want to *allow* re-setting previously set measurements, we'd # rather promote the use of multidimensional measurements instead of # discarding data, so we make this somewhat chatty. _LOG.warning( 'Overriding previous measurement %s value of %s with %s, the old ' 'value will be lost. Use a dimensioned measurement if you need to ' 'save multiple values.', self.name, self.stored_value, value) if value is None: _LOG.warning('Measurement %s is set to None', self.name) self.stored_value = value self._cached_value = data.convert_to_base_types(value) self.is_value_set = True
Set the value for this measurement, with some sanity checks.
Below is the the instruction that describes the task: ### Input: Set the value for this measurement, with some sanity checks. ### Response: def set(self, value): """Set the value for this measurement, with some sanity checks.""" if self.is_value_set: # While we want to *allow* re-setting previously set measurements, we'd # rather promote the use of multidimensional measurements instead of # discarding data, so we make this somewhat chatty. _LOG.warning( 'Overriding previous measurement %s value of %s with %s, the old ' 'value will be lost. Use a dimensioned measurement if you need to ' 'save multiple values.', self.name, self.stored_value, value) if value is None: _LOG.warning('Measurement %s is set to None', self.name) self.stored_value = value self._cached_value = data.convert_to_base_types(value) self.is_value_set = True
def add_query(self, sql, auto_begin=True, bindings=None, abridge_sql_log=False): """Add a query to the current transaction. A thin wrapper around ConnectionManager.add_query. :param str sql: The SQL query to add :param bool auto_begin: If set and there is no transaction in progress, begin a new one. :param Optional[List[object]]: An optional list of bindings for the query. :param bool abridge_sql_log: If set, limit the raw sql logged to 512 characters """ return self.connections.add_query(sql, auto_begin, bindings, abridge_sql_log)
Add a query to the current transaction. A thin wrapper around ConnectionManager.add_query. :param str sql: The SQL query to add :param bool auto_begin: If set and there is no transaction in progress, begin a new one. :param Optional[List[object]]: An optional list of bindings for the query. :param bool abridge_sql_log: If set, limit the raw sql logged to 512 characters
Below is the the instruction that describes the task: ### Input: Add a query to the current transaction. A thin wrapper around ConnectionManager.add_query. :param str sql: The SQL query to add :param bool auto_begin: If set and there is no transaction in progress, begin a new one. :param Optional[List[object]]: An optional list of bindings for the query. :param bool abridge_sql_log: If set, limit the raw sql logged to 512 characters ### Response: def add_query(self, sql, auto_begin=True, bindings=None, abridge_sql_log=False): """Add a query to the current transaction. A thin wrapper around ConnectionManager.add_query. :param str sql: The SQL query to add :param bool auto_begin: If set and there is no transaction in progress, begin a new one. :param Optional[List[object]]: An optional list of bindings for the query. :param bool abridge_sql_log: If set, limit the raw sql logged to 512 characters """ return self.connections.add_query(sql, auto_begin, bindings, abridge_sql_log)
def _update_secrets(self): '''update secrets will take a secrets credential file either located at .sregistry or the environment variable SREGISTRY_CLIENT_SECRETS and update the current client secrets as well as the associated API base. ''' self.secrets = read_client_secrets() if self.secrets is not None: if "registry" in self.secrets: if "base" in self.secrets['registry']: self.base = self.secrets['registry']['base'] self._update_base()
update secrets will take a secrets credential file either located at .sregistry or the environment variable SREGISTRY_CLIENT_SECRETS and update the current client secrets as well as the associated API base.
Below is the the instruction that describes the task: ### Input: update secrets will take a secrets credential file either located at .sregistry or the environment variable SREGISTRY_CLIENT_SECRETS and update the current client secrets as well as the associated API base. ### Response: def _update_secrets(self): '''update secrets will take a secrets credential file either located at .sregistry or the environment variable SREGISTRY_CLIENT_SECRETS and update the current client secrets as well as the associated API base. ''' self.secrets = read_client_secrets() if self.secrets is not None: if "registry" in self.secrets: if "base" in self.secrets['registry']: self.base = self.secrets['registry']['base'] self._update_base()
def get_public_key_info(self): """ Analyze the public key information we have in our scriptSig. Returns {'status': true, 'type': 'singlesig' | 'multisig', 'public_keys': [...], 'num_sigs': ...} on success Returns {'error': ...} on error """ script_parts = virtualchain.btc_script_deserialize(base64.b64decode(self.sig)) if len(script_parts) < 2: return {'error': 'Signature script does not appear to encode any public keys'} if len(script_parts) == 2: # possibly p2pkh pubkey = script_parts[1].encode('hex') try: pubkey_object = virtualchain.ecdsalib.ecdsa_public_key(pubkey) except: return {'error': 'Could not instantiate public key {}'.format(pubkey)} if virtualchain.address_reencode(pubkey_object.address()) != virtualchain.address_reencode(self.address): return {'error': 'Public key does not match owner address {}'.format(self.address)} return {'status': True, 'type': 'singlesig', 'public_keys': [pubkey], 'num_sigs': 1} else: # possibly p2sh multisig. redeem_script = script_parts[-1] if virtualchain.address_reencode(virtualchain.btc_make_p2sh_address(redeem_script)) != virtualchain.address_reencode(self.address): return {'error': 'Multisig redeem script does not match owner address {}'.format(self.address)} m, pubkey_hexes = virtualchain.parse_multisig_redeemscript(redeem_script) for pkh in pubkey_hexes: try: virtualchain.ecdsalib.ecdsa_public_key(pkh) except: return {'error': 'Invalid public key string in multisig script'} return {'status': True, 'type': 'multisig', 'public_keys': pubkey_hexes, 'num_sigs': m}
Analyze the public key information we have in our scriptSig. Returns {'status': true, 'type': 'singlesig' | 'multisig', 'public_keys': [...], 'num_sigs': ...} on success Returns {'error': ...} on error
Below is the the instruction that describes the task: ### Input: Analyze the public key information we have in our scriptSig. Returns {'status': true, 'type': 'singlesig' | 'multisig', 'public_keys': [...], 'num_sigs': ...} on success Returns {'error': ...} on error ### Response: def get_public_key_info(self): """ Analyze the public key information we have in our scriptSig. Returns {'status': true, 'type': 'singlesig' | 'multisig', 'public_keys': [...], 'num_sigs': ...} on success Returns {'error': ...} on error """ script_parts = virtualchain.btc_script_deserialize(base64.b64decode(self.sig)) if len(script_parts) < 2: return {'error': 'Signature script does not appear to encode any public keys'} if len(script_parts) == 2: # possibly p2pkh pubkey = script_parts[1].encode('hex') try: pubkey_object = virtualchain.ecdsalib.ecdsa_public_key(pubkey) except: return {'error': 'Could not instantiate public key {}'.format(pubkey)} if virtualchain.address_reencode(pubkey_object.address()) != virtualchain.address_reencode(self.address): return {'error': 'Public key does not match owner address {}'.format(self.address)} return {'status': True, 'type': 'singlesig', 'public_keys': [pubkey], 'num_sigs': 1} else: # possibly p2sh multisig. redeem_script = script_parts[-1] if virtualchain.address_reencode(virtualchain.btc_make_p2sh_address(redeem_script)) != virtualchain.address_reencode(self.address): return {'error': 'Multisig redeem script does not match owner address {}'.format(self.address)} m, pubkey_hexes = virtualchain.parse_multisig_redeemscript(redeem_script) for pkh in pubkey_hexes: try: virtualchain.ecdsalib.ecdsa_public_key(pkh) except: return {'error': 'Invalid public key string in multisig script'} return {'status': True, 'type': 'multisig', 'public_keys': pubkey_hexes, 'num_sigs': m}
def replace_from_url(self, photo, url, **kwds): """ Endpoint: /photo/<id>replace.json Import a photo from the specified URL to replace an existing photo. """ result = self._client.post("/photo/%s/replace.json" % self._extract_id(photo), photo=url, **kwds)["result"] return Photo(self._client, result)
Endpoint: /photo/<id>replace.json Import a photo from the specified URL to replace an existing photo.
Below is the the instruction that describes the task: ### Input: Endpoint: /photo/<id>replace.json Import a photo from the specified URL to replace an existing photo. ### Response: def replace_from_url(self, photo, url, **kwds): """ Endpoint: /photo/<id>replace.json Import a photo from the specified URL to replace an existing photo. """ result = self._client.post("/photo/%s/replace.json" % self._extract_id(photo), photo=url, **kwds)["result"] return Photo(self._client, result)
def reset_calibrators(self, parameter): """ Reset all calibrators for the specified parameter to their original MDB value. """ req = mdb_pb2.ChangeParameterRequest() req.action = mdb_pb2.ChangeParameterRequest.RESET_CALIBRATORS calib_info = req.defaultCalibrator url = '/mdb/{}/{}/parameters/{}'.format( self._instance, self._processor, parameter) response = self._client.post_proto(url, data=req.SerializeToString())
Reset all calibrators for the specified parameter to their original MDB value.
Below is the the instruction that describes the task: ### Input: Reset all calibrators for the specified parameter to their original MDB value. ### Response: def reset_calibrators(self, parameter): """ Reset all calibrators for the specified parameter to their original MDB value. """ req = mdb_pb2.ChangeParameterRequest() req.action = mdb_pb2.ChangeParameterRequest.RESET_CALIBRATORS calib_info = req.defaultCalibrator url = '/mdb/{}/{}/parameters/{}'.format( self._instance, self._processor, parameter) response = self._client.post_proto(url, data=req.SerializeToString())
def prepare(args): """ %prog prepare --rearray_lib=<rearraylibrary> --orig_lib_file=<origlibfile> Inferred file names --------------------------------------------- `lookuptblfile` : rearraylibrary.lookup `rearraylibfile`: rearraylibrary.fasta Pick sequences from the original library file and the rearrayed library file based on the mapping information provided in the `lookuptblfile`. # lookuptblfile format: column number (index) # 1 (0) 2 (1) 3 (2) 4 (3) 5 (4) 6 (5) # source_clone source_plate source_well dest_clone dest_plate dest_well The 1st and 4th column in the `lookuptblfile` form the pair of clones which constitute the elements used for the per-clone assembly. """ from operator import itemgetter from jcvi.formats.fasta import Fasta, SeqIO p = OptionParser(prepare.__doc__) p.add_option("--rearray_lib", default=None, help="name of the rearrayed library [default: %default]") p.add_option("--orig_lib_file", help="fasta file containing reads from the original libraries [default: %default]") g = OptionGroup(p, "Optional parameters") g.add_option("--output_folder", default="to_assemble", help="output folder to write the FASTA files to [default: %default]") p.add_option_group(g) opts, args = p.parse_args(args) if not opts.rearray_lib or not opts.orig_lib_file: logging.error("Please specify the required parameters") sys.exit(not p.print_help()) rearraylib, origlibfile = opts.rearray_lib, opts.orig_lib_file if not op.isfile(origlibfile): logging.error("Original library reads file `{0}` does not exist!".format(origlibfile)) sys.exit() lookuptblfile = rearraylib + '.lookup' logging.debug(lookuptblfile) if not op.isfile(lookuptblfile): logging.error("Lookup table file `{0}` does not exist!".format(lookuptblfile)) sys.exit() rearraylibfile = rearraylib + '.fasta' logging.debug(rearraylibfile) if not op.isfile(rearraylibfile): logging.error("Rearrayed library reads file `{0}` does not exist!".format(rearraylibfile)) sys.exit() origlibFasta = Fasta(origlibfile) rearraylibFasta = Fasta(rearraylibfile) origlibids = [o for o in origlibFasta.iterkeys_ordered()] rearraylibids = [r for r in rearraylibFasta.iterkeys_ordered()] if not op.isdir(opts.output_folder): logging.warning("Output directory `{0}` missing. Creating it now...".format(opts.output_folder)) os.makedirs(opts.output_folder) logfile = rearraylib + '.log' log = open(logfile, 'w') fp = open(lookuptblfile, 'r') for row in fp: origprefix, rearrayprefix = itemgetter(0,3)(row.split('\t')) libpair = origprefix + '_' + rearrayprefix outfile = opts.output_folder + '/' + libpair + '.fasta' ofp = open(outfile, 'w') for o in origlibids: if re.match(origprefix, o): SeqIO.write(origlibFasta[o], ofp, 'fasta') for r in rearraylibids: if re.match(rearrayprefix, r): SeqIO.write(rearraylibFasta[r], ofp, 'fasta') ofp.close() print(outfile, file=log) log.close() logging.debug('Wrote log file `{0}`'.format(logfile))
%prog prepare --rearray_lib=<rearraylibrary> --orig_lib_file=<origlibfile> Inferred file names --------------------------------------------- `lookuptblfile` : rearraylibrary.lookup `rearraylibfile`: rearraylibrary.fasta Pick sequences from the original library file and the rearrayed library file based on the mapping information provided in the `lookuptblfile`. # lookuptblfile format: column number (index) # 1 (0) 2 (1) 3 (2) 4 (3) 5 (4) 6 (5) # source_clone source_plate source_well dest_clone dest_plate dest_well The 1st and 4th column in the `lookuptblfile` form the pair of clones which constitute the elements used for the per-clone assembly.
Below is the the instruction that describes the task: ### Input: %prog prepare --rearray_lib=<rearraylibrary> --orig_lib_file=<origlibfile> Inferred file names --------------------------------------------- `lookuptblfile` : rearraylibrary.lookup `rearraylibfile`: rearraylibrary.fasta Pick sequences from the original library file and the rearrayed library file based on the mapping information provided in the `lookuptblfile`. # lookuptblfile format: column number (index) # 1 (0) 2 (1) 3 (2) 4 (3) 5 (4) 6 (5) # source_clone source_plate source_well dest_clone dest_plate dest_well The 1st and 4th column in the `lookuptblfile` form the pair of clones which constitute the elements used for the per-clone assembly. ### Response: def prepare(args): """ %prog prepare --rearray_lib=<rearraylibrary> --orig_lib_file=<origlibfile> Inferred file names --------------------------------------------- `lookuptblfile` : rearraylibrary.lookup `rearraylibfile`: rearraylibrary.fasta Pick sequences from the original library file and the rearrayed library file based on the mapping information provided in the `lookuptblfile`. # lookuptblfile format: column number (index) # 1 (0) 2 (1) 3 (2) 4 (3) 5 (4) 6 (5) # source_clone source_plate source_well dest_clone dest_plate dest_well The 1st and 4th column in the `lookuptblfile` form the pair of clones which constitute the elements used for the per-clone assembly. """ from operator import itemgetter from jcvi.formats.fasta import Fasta, SeqIO p = OptionParser(prepare.__doc__) p.add_option("--rearray_lib", default=None, help="name of the rearrayed library [default: %default]") p.add_option("--orig_lib_file", help="fasta file containing reads from the original libraries [default: %default]") g = OptionGroup(p, "Optional parameters") g.add_option("--output_folder", default="to_assemble", help="output folder to write the FASTA files to [default: %default]") p.add_option_group(g) opts, args = p.parse_args(args) if not opts.rearray_lib or not opts.orig_lib_file: logging.error("Please specify the required parameters") sys.exit(not p.print_help()) rearraylib, origlibfile = opts.rearray_lib, opts.orig_lib_file if not op.isfile(origlibfile): logging.error("Original library reads file `{0}` does not exist!".format(origlibfile)) sys.exit() lookuptblfile = rearraylib + '.lookup' logging.debug(lookuptblfile) if not op.isfile(lookuptblfile): logging.error("Lookup table file `{0}` does not exist!".format(lookuptblfile)) sys.exit() rearraylibfile = rearraylib + '.fasta' logging.debug(rearraylibfile) if not op.isfile(rearraylibfile): logging.error("Rearrayed library reads file `{0}` does not exist!".format(rearraylibfile)) sys.exit() origlibFasta = Fasta(origlibfile) rearraylibFasta = Fasta(rearraylibfile) origlibids = [o for o in origlibFasta.iterkeys_ordered()] rearraylibids = [r for r in rearraylibFasta.iterkeys_ordered()] if not op.isdir(opts.output_folder): logging.warning("Output directory `{0}` missing. Creating it now...".format(opts.output_folder)) os.makedirs(opts.output_folder) logfile = rearraylib + '.log' log = open(logfile, 'w') fp = open(lookuptblfile, 'r') for row in fp: origprefix, rearrayprefix = itemgetter(0,3)(row.split('\t')) libpair = origprefix + '_' + rearrayprefix outfile = opts.output_folder + '/' + libpair + '.fasta' ofp = open(outfile, 'w') for o in origlibids: if re.match(origprefix, o): SeqIO.write(origlibFasta[o], ofp, 'fasta') for r in rearraylibids: if re.match(rearrayprefix, r): SeqIO.write(rearraylibFasta[r], ofp, 'fasta') ofp.close() print(outfile, file=log) log.close() logging.debug('Wrote log file `{0}`'.format(logfile))
def mod_hostname(hostname): ''' Modify hostname .. versionchanged:: 2015.8.0 Added support for SunOS (Solaris 10, Illumos, SmartOS) CLI Example: .. code-block:: bash salt '*' network.mod_hostname master.saltstack.com ''' # # SunOS tested on SmartOS and OmniOS (Solaris 10 compatible) # Oracle Solaris 11 uses smf, currently not supported # # /etc/nodename is the hostname only, not fqdn # /etc/defaultdomain is the domain # /etc/hosts should have both fqdn and hostname entries # if hostname is None: return False hostname_cmd = salt.utils.path.which('hostnamectl') or salt.utils.path.which('hostname') if salt.utils.platform.is_sunos(): uname_cmd = '/usr/bin/uname' if salt.utils.platform.is_smartos() else salt.utils.path.which('uname') check_hostname_cmd = salt.utils.path.which('check-hostname') # Grab the old hostname so we know which hostname to change and then # change the hostname using the hostname command if hostname_cmd.endswith('hostnamectl'): result = __salt__['cmd.run_all']('{0} status'.format(hostname_cmd)) if 0 == result['retcode']: out = result['stdout'] for line in out.splitlines(): line = line.split(':') if 'Static hostname' in line[0]: o_hostname = line[1].strip() else: log.debug('%s was unable to get hostname', hostname_cmd) o_hostname = __salt__['network.get_hostname']() elif not salt.utils.platform.is_sunos(): # don't run hostname -f because -f is not supported on all platforms o_hostname = socket.getfqdn() else: # output: Hostname core OK: fully qualified as core.acheron.be o_hostname = __salt__['cmd.run'](check_hostname_cmd).split(' ')[-1] if hostname_cmd.endswith('hostnamectl'): result = __salt__['cmd.run_all']('{0} set-hostname {1}'.format( hostname_cmd, hostname, )) if result['retcode'] != 0: log.debug('%s was unable to set hostname. Error: %s', hostname_cmd, result['stderr']) return False elif not salt.utils.platform.is_sunos(): __salt__['cmd.run']('{0} {1}'.format(hostname_cmd, hostname)) else: __salt__['cmd.run']('{0} -S {1}'.format(uname_cmd, hostname.split('.')[0])) # Modify the /etc/hosts file to replace the old hostname with the # new hostname with salt.utils.files.fopen('/etc/hosts', 'r') as fp_: host_c = [salt.utils.stringutils.to_unicode(_l) for _l in fp_.readlines()] with salt.utils.files.fopen('/etc/hosts', 'w') as fh_: for host in host_c: host = host.split() try: host[host.index(o_hostname)] = hostname if salt.utils.platform.is_sunos(): # also set a copy of the hostname host[host.index(o_hostname.split('.')[0])] = hostname.split('.')[0] except ValueError: pass fh_.write(salt.utils.stringutils.to_str('\t'.join(host) + '\n')) # Modify the /etc/sysconfig/network configuration file to set the # new hostname if __grains__['os_family'] == 'RedHat': with salt.utils.files.fopen('/etc/sysconfig/network', 'r') as fp_: network_c = [salt.utils.stringutils.to_unicode(_l) for _l in fp_.readlines()] with salt.utils.files.fopen('/etc/sysconfig/network', 'w') as fh_: for net in network_c: if net.startswith('HOSTNAME'): old_hostname = net.split('=', 1)[1].rstrip() quote_type = salt.utils.stringutils.is_quoted(old_hostname) fh_.write(salt.utils.stringutils.to_str( 'HOSTNAME={1}{0}{1}\n'.format( salt.utils.stringutils.dequote(hostname), quote_type))) else: fh_.write(salt.utils.stringutils.to_str(net)) elif __grains__['os_family'] in ('Debian', 'NILinuxRT'): with salt.utils.files.fopen('/etc/hostname', 'w') as fh_: fh_.write(salt.utils.stringutils.to_str(hostname + '\n')) if __grains__['lsb_distrib_id'] == 'nilrt': str_hostname = salt.utils.stringutils.to_str(hostname) nirtcfg_cmd = '/usr/local/natinst/bin/nirtcfg' nirtcfg_cmd += ' --set section=SystemSettings,token=\'Host_Name\',value=\'{0}\''.format(str_hostname) if __salt__['cmd.run_all'](nirtcfg_cmd)['retcode'] != 0: raise CommandExecutionError('Couldn\'t set hostname to: {0}\n'.format(str_hostname)) elif __grains__['os_family'] == 'OpenBSD': with salt.utils.files.fopen('/etc/myname', 'w') as fh_: fh_.write(salt.utils.stringutils.to_str(hostname + '\n')) # Update /etc/nodename and /etc/defaultdomain on SunOS if salt.utils.platform.is_sunos(): with salt.utils.files.fopen('/etc/nodename', 'w') as fh_: fh_.write(salt.utils.stringutils.to_str( hostname.split('.')[0] + '\n') ) with salt.utils.files.fopen('/etc/defaultdomain', 'w') as fh_: fh_.write(salt.utils.stringutils.to_str( ".".join(hostname.split('.')[1:]) + '\n') ) return True
Modify hostname .. versionchanged:: 2015.8.0 Added support for SunOS (Solaris 10, Illumos, SmartOS) CLI Example: .. code-block:: bash salt '*' network.mod_hostname master.saltstack.com
Below is the the instruction that describes the task: ### Input: Modify hostname .. versionchanged:: 2015.8.0 Added support for SunOS (Solaris 10, Illumos, SmartOS) CLI Example: .. code-block:: bash salt '*' network.mod_hostname master.saltstack.com ### Response: def mod_hostname(hostname): ''' Modify hostname .. versionchanged:: 2015.8.0 Added support for SunOS (Solaris 10, Illumos, SmartOS) CLI Example: .. code-block:: bash salt '*' network.mod_hostname master.saltstack.com ''' # # SunOS tested on SmartOS and OmniOS (Solaris 10 compatible) # Oracle Solaris 11 uses smf, currently not supported # # /etc/nodename is the hostname only, not fqdn # /etc/defaultdomain is the domain # /etc/hosts should have both fqdn and hostname entries # if hostname is None: return False hostname_cmd = salt.utils.path.which('hostnamectl') or salt.utils.path.which('hostname') if salt.utils.platform.is_sunos(): uname_cmd = '/usr/bin/uname' if salt.utils.platform.is_smartos() else salt.utils.path.which('uname') check_hostname_cmd = salt.utils.path.which('check-hostname') # Grab the old hostname so we know which hostname to change and then # change the hostname using the hostname command if hostname_cmd.endswith('hostnamectl'): result = __salt__['cmd.run_all']('{0} status'.format(hostname_cmd)) if 0 == result['retcode']: out = result['stdout'] for line in out.splitlines(): line = line.split(':') if 'Static hostname' in line[0]: o_hostname = line[1].strip() else: log.debug('%s was unable to get hostname', hostname_cmd) o_hostname = __salt__['network.get_hostname']() elif not salt.utils.platform.is_sunos(): # don't run hostname -f because -f is not supported on all platforms o_hostname = socket.getfqdn() else: # output: Hostname core OK: fully qualified as core.acheron.be o_hostname = __salt__['cmd.run'](check_hostname_cmd).split(' ')[-1] if hostname_cmd.endswith('hostnamectl'): result = __salt__['cmd.run_all']('{0} set-hostname {1}'.format( hostname_cmd, hostname, )) if result['retcode'] != 0: log.debug('%s was unable to set hostname. Error: %s', hostname_cmd, result['stderr']) return False elif not salt.utils.platform.is_sunos(): __salt__['cmd.run']('{0} {1}'.format(hostname_cmd, hostname)) else: __salt__['cmd.run']('{0} -S {1}'.format(uname_cmd, hostname.split('.')[0])) # Modify the /etc/hosts file to replace the old hostname with the # new hostname with salt.utils.files.fopen('/etc/hosts', 'r') as fp_: host_c = [salt.utils.stringutils.to_unicode(_l) for _l in fp_.readlines()] with salt.utils.files.fopen('/etc/hosts', 'w') as fh_: for host in host_c: host = host.split() try: host[host.index(o_hostname)] = hostname if salt.utils.platform.is_sunos(): # also set a copy of the hostname host[host.index(o_hostname.split('.')[0])] = hostname.split('.')[0] except ValueError: pass fh_.write(salt.utils.stringutils.to_str('\t'.join(host) + '\n')) # Modify the /etc/sysconfig/network configuration file to set the # new hostname if __grains__['os_family'] == 'RedHat': with salt.utils.files.fopen('/etc/sysconfig/network', 'r') as fp_: network_c = [salt.utils.stringutils.to_unicode(_l) for _l in fp_.readlines()] with salt.utils.files.fopen('/etc/sysconfig/network', 'w') as fh_: for net in network_c: if net.startswith('HOSTNAME'): old_hostname = net.split('=', 1)[1].rstrip() quote_type = salt.utils.stringutils.is_quoted(old_hostname) fh_.write(salt.utils.stringutils.to_str( 'HOSTNAME={1}{0}{1}\n'.format( salt.utils.stringutils.dequote(hostname), quote_type))) else: fh_.write(salt.utils.stringutils.to_str(net)) elif __grains__['os_family'] in ('Debian', 'NILinuxRT'): with salt.utils.files.fopen('/etc/hostname', 'w') as fh_: fh_.write(salt.utils.stringutils.to_str(hostname + '\n')) if __grains__['lsb_distrib_id'] == 'nilrt': str_hostname = salt.utils.stringutils.to_str(hostname) nirtcfg_cmd = '/usr/local/natinst/bin/nirtcfg' nirtcfg_cmd += ' --set section=SystemSettings,token=\'Host_Name\',value=\'{0}\''.format(str_hostname) if __salt__['cmd.run_all'](nirtcfg_cmd)['retcode'] != 0: raise CommandExecutionError('Couldn\'t set hostname to: {0}\n'.format(str_hostname)) elif __grains__['os_family'] == 'OpenBSD': with salt.utils.files.fopen('/etc/myname', 'w') as fh_: fh_.write(salt.utils.stringutils.to_str(hostname + '\n')) # Update /etc/nodename and /etc/defaultdomain on SunOS if salt.utils.platform.is_sunos(): with salt.utils.files.fopen('/etc/nodename', 'w') as fh_: fh_.write(salt.utils.stringutils.to_str( hostname.split('.')[0] + '\n') ) with salt.utils.files.fopen('/etc/defaultdomain', 'w') as fh_: fh_.write(salt.utils.stringutils.to_str( ".".join(hostname.split('.')[1:]) + '\n') ) return True
def _maybe_expand_trailing_dim(observed_time_series_tensor): """Ensures `observed_time_series_tensor` has a trailing dimension of size 1. The `tfd.LinearGaussianStateSpaceModel` Distribution has event shape of `[num_timesteps, observation_size]`, but canonical BSTS models are univariate, so their observation_size is always `1`. The extra trailing dimension gets annoying, so this method allows arguments with or without the extra dimension. There is no ambiguity except in the trivial special case where `num_timesteps = 1`; this can be avoided by specifying any unit-length series in the explicit `[num_timesteps, 1]` style. Most users should not call this method directly, and instead call `canonicalize_observed_time_series_with_mask`, which handles converting to `Tensor` and specifying an optional missingness mask. Args: observed_time_series_tensor: `Tensor` of shape `batch_shape + [num_timesteps, 1]` or `batch_shape + [num_timesteps]`, where `num_timesteps > 1`. Returns: expanded_time_series: `Tensor` of shape `batch_shape + [num_timesteps, 1]`. """ with tf.compat.v1.name_scope( 'maybe_expand_trailing_dim', values=[observed_time_series_tensor]): if (observed_time_series_tensor.shape.ndims is not None and tf.compat.dimension_value( observed_time_series_tensor.shape[-1]) is not None): expanded_time_series = ( observed_time_series_tensor if observed_time_series_tensor.shape[-1] == 1 else observed_time_series_tensor[..., tf.newaxis]) else: expanded_time_series = tf.cond( pred=tf.equal(tf.shape(input=observed_time_series_tensor)[-1], 1), true_fn=lambda: observed_time_series_tensor, false_fn=lambda: observed_time_series_tensor[..., tf.newaxis]) return expanded_time_series
Ensures `observed_time_series_tensor` has a trailing dimension of size 1. The `tfd.LinearGaussianStateSpaceModel` Distribution has event shape of `[num_timesteps, observation_size]`, but canonical BSTS models are univariate, so their observation_size is always `1`. The extra trailing dimension gets annoying, so this method allows arguments with or without the extra dimension. There is no ambiguity except in the trivial special case where `num_timesteps = 1`; this can be avoided by specifying any unit-length series in the explicit `[num_timesteps, 1]` style. Most users should not call this method directly, and instead call `canonicalize_observed_time_series_with_mask`, which handles converting to `Tensor` and specifying an optional missingness mask. Args: observed_time_series_tensor: `Tensor` of shape `batch_shape + [num_timesteps, 1]` or `batch_shape + [num_timesteps]`, where `num_timesteps > 1`. Returns: expanded_time_series: `Tensor` of shape `batch_shape + [num_timesteps, 1]`.
Below is the the instruction that describes the task: ### Input: Ensures `observed_time_series_tensor` has a trailing dimension of size 1. The `tfd.LinearGaussianStateSpaceModel` Distribution has event shape of `[num_timesteps, observation_size]`, but canonical BSTS models are univariate, so their observation_size is always `1`. The extra trailing dimension gets annoying, so this method allows arguments with or without the extra dimension. There is no ambiguity except in the trivial special case where `num_timesteps = 1`; this can be avoided by specifying any unit-length series in the explicit `[num_timesteps, 1]` style. Most users should not call this method directly, and instead call `canonicalize_observed_time_series_with_mask`, which handles converting to `Tensor` and specifying an optional missingness mask. Args: observed_time_series_tensor: `Tensor` of shape `batch_shape + [num_timesteps, 1]` or `batch_shape + [num_timesteps]`, where `num_timesteps > 1`. Returns: expanded_time_series: `Tensor` of shape `batch_shape + [num_timesteps, 1]`. ### Response: def _maybe_expand_trailing_dim(observed_time_series_tensor): """Ensures `observed_time_series_tensor` has a trailing dimension of size 1. The `tfd.LinearGaussianStateSpaceModel` Distribution has event shape of `[num_timesteps, observation_size]`, but canonical BSTS models are univariate, so their observation_size is always `1`. The extra trailing dimension gets annoying, so this method allows arguments with or without the extra dimension. There is no ambiguity except in the trivial special case where `num_timesteps = 1`; this can be avoided by specifying any unit-length series in the explicit `[num_timesteps, 1]` style. Most users should not call this method directly, and instead call `canonicalize_observed_time_series_with_mask`, which handles converting to `Tensor` and specifying an optional missingness mask. Args: observed_time_series_tensor: `Tensor` of shape `batch_shape + [num_timesteps, 1]` or `batch_shape + [num_timesteps]`, where `num_timesteps > 1`. Returns: expanded_time_series: `Tensor` of shape `batch_shape + [num_timesteps, 1]`. """ with tf.compat.v1.name_scope( 'maybe_expand_trailing_dim', values=[observed_time_series_tensor]): if (observed_time_series_tensor.shape.ndims is not None and tf.compat.dimension_value( observed_time_series_tensor.shape[-1]) is not None): expanded_time_series = ( observed_time_series_tensor if observed_time_series_tensor.shape[-1] == 1 else observed_time_series_tensor[..., tf.newaxis]) else: expanded_time_series = tf.cond( pred=tf.equal(tf.shape(input=observed_time_series_tensor)[-1], 1), true_fn=lambda: observed_time_series_tensor, false_fn=lambda: observed_time_series_tensor[..., tf.newaxis]) return expanded_time_series
def save(self): """ Easy save(insert or update) for db models """ try: if self.exists() is False: self.db.session.add(self) # self.db.session.merge(self) self.db.session.commit() except (Exception, BaseException) as error: if current_app.config['DEBUG']: raise error return None
Easy save(insert or update) for db models
Below is the the instruction that describes the task: ### Input: Easy save(insert or update) for db models ### Response: def save(self): """ Easy save(insert or update) for db models """ try: if self.exists() is False: self.db.session.add(self) # self.db.session.merge(self) self.db.session.commit() except (Exception, BaseException) as error: if current_app.config['DEBUG']: raise error return None
def select_one_album(albums): """Display the albums returned by search api. :params albums: API['result']['albums'] :return: a Album object. """ if len(albums) == 1: select_i = 0 else: table = PrettyTable(['Sequence', 'Album Name', 'Artist Name']) for i, album in enumerate(albums, 1): table.add_row([i, album['name'], album['artist']['name']]) click.echo(table) select_i = click.prompt('Select one album', type=int, default=1) while select_i < 1 or select_i > len(albums): select_i = click.prompt('Error Select! Select Again', type=int) album_id = albums[select_i-1]['id'] album_name = albums[select_i-1]['name'] album = Album(album_id, album_name) return album
Display the albums returned by search api. :params albums: API['result']['albums'] :return: a Album object.
Below is the the instruction that describes the task: ### Input: Display the albums returned by search api. :params albums: API['result']['albums'] :return: a Album object. ### Response: def select_one_album(albums): """Display the albums returned by search api. :params albums: API['result']['albums'] :return: a Album object. """ if len(albums) == 1: select_i = 0 else: table = PrettyTable(['Sequence', 'Album Name', 'Artist Name']) for i, album in enumerate(albums, 1): table.add_row([i, album['name'], album['artist']['name']]) click.echo(table) select_i = click.prompt('Select one album', type=int, default=1) while select_i < 1 or select_i > len(albums): select_i = click.prompt('Error Select! Select Again', type=int) album_id = albums[select_i-1]['id'] album_name = albums[select_i-1]['name'] album = Album(album_id, album_name) return album
def preloop(self): """adds the banner to the preloop""" lines = textwrap.dedent(self.banner).split("\n") for line in lines: Console._print("BLUE", "", line)
adds the banner to the preloop
Below is the the instruction that describes the task: ### Input: adds the banner to the preloop ### Response: def preloop(self): """adds the banner to the preloop""" lines = textwrap.dedent(self.banner).split("\n") for line in lines: Console._print("BLUE", "", line)
def join_path(a, *p): """Join path tokens together similar to osp.join, but always use '/' instead of possibly '\' on windows.""" path = a for b in p: if len(b) == 0: continue if b.startswith('/'): path += b[1:] elif path == '' or path.endswith('/'): path += b else: path += '/' + b # END for each path token to add return path
Join path tokens together similar to osp.join, but always use '/' instead of possibly '\' on windows.
Below is the the instruction that describes the task: ### Input: Join path tokens together similar to osp.join, but always use '/' instead of possibly '\' on windows. ### Response: def join_path(a, *p): """Join path tokens together similar to osp.join, but always use '/' instead of possibly '\' on windows.""" path = a for b in p: if len(b) == 0: continue if b.startswith('/'): path += b[1:] elif path == '' or path.endswith('/'): path += b else: path += '/' + b # END for each path token to add return path
def bisect_left(a, x, lo=0, hi=None): """Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e < x, and all e in a[i:] have e >= x. So if x already appears in the list, a.insert(x) will insert just before the leftmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if a[mid] < x: lo = mid+1 else: hi = mid return lo
Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e < x, and all e in a[i:] have e >= x. So if x already appears in the list, a.insert(x) will insert just before the leftmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched.
Below is the the instruction that describes the task: ### Input: Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e < x, and all e in a[i:] have e >= x. So if x already appears in the list, a.insert(x) will insert just before the leftmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. ### Response: def bisect_left(a, x, lo=0, hi=None): """Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e < x, and all e in a[i:] have e >= x. So if x already appears in the list, a.insert(x) will insert just before the leftmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if a[mid] < x: lo = mid+1 else: hi = mid return lo
def evaluate(self, m): """Search for comments.""" g = m.groupdict() if g["strings"]: self.line_num += g['strings'].count('\n') elif g["code"]: self.line_num += g["code"].count('\n') else: if g['block']: self.evaluate_block(g) elif self.stylesheets != CSS: if g['start'] is None: self.evaluate_inline_tail(g) else: self.evaluate_inline(g) self.line_num += g['comments'].count('\n')
Search for comments.
Below is the the instruction that describes the task: ### Input: Search for comments. ### Response: def evaluate(self, m): """Search for comments.""" g = m.groupdict() if g["strings"]: self.line_num += g['strings'].count('\n') elif g["code"]: self.line_num += g["code"].count('\n') else: if g['block']: self.evaluate_block(g) elif self.stylesheets != CSS: if g['start'] is None: self.evaluate_inline_tail(g) else: self.evaluate_inline(g) self.line_num += g['comments'].count('\n')
def _write_recordio(f, data): """Writes a single data point as a RecordIO record to the given file.""" length = len(data) f.write(struct.pack('I', _kmagic)) f.write(struct.pack('I', length)) pad = (((length + 3) >> 2) << 2) - length f.write(data) f.write(padding[pad])
Writes a single data point as a RecordIO record to the given file.
Below is the the instruction that describes the task: ### Input: Writes a single data point as a RecordIO record to the given file. ### Response: def _write_recordio(f, data): """Writes a single data point as a RecordIO record to the given file.""" length = len(data) f.write(struct.pack('I', _kmagic)) f.write(struct.pack('I', length)) pad = (((length + 3) >> 2) << 2) - length f.write(data) f.write(padding[pad])
def read_graph(filename, directed=False, weighted=False, default_weight=None): """Read a graph from a text file :param filename: plain text file. All numbers are separated by space. Starts with a line containing n (#vertices) and m (#edges). Then m lines follow, for each edge. Vertices are numbered from 0 to n-1. Line for unweighted edge u,v contains two integers u, v. Line for weighted edge u,v contains three integers u, v, w[u,v]. :param directed: true for a directed graph, false for undirected :param weighted: true for an edge weighted graph :returns: graph in listlist format, possibly followed by weight matrix :complexity: O(n + m) for unweighted graph, :math:`O(n^2)` for weighted graph """ with open(filename, 'r') as f: while True: line = f.readline() # ignore leading comments if line[0] != '#': break nb_nodes, nb_edges = tuple(map(int, line.split())) graph = [[] for u in range(nb_nodes)] if weighted: weight = [[default_weight] * nb_nodes for v in range(nb_nodes)] for v in range(nb_nodes): weight[v][v] = 0 for _ in range(nb_edges): u, v, w = readtab(f, int) graph[u].append(v) weight[u][v] = w if not directed: graph[v].append(u) weight[v][u] = w return graph, weight else: for _ in range(nb_edges): # si le fichier contient des poids, ils seront ignorés u, v = readtab(f, int)[:2] graph[u].append(v) if not directed: graph[v].append(u) return graph
Read a graph from a text file :param filename: plain text file. All numbers are separated by space. Starts with a line containing n (#vertices) and m (#edges). Then m lines follow, for each edge. Vertices are numbered from 0 to n-1. Line for unweighted edge u,v contains two integers u, v. Line for weighted edge u,v contains three integers u, v, w[u,v]. :param directed: true for a directed graph, false for undirected :param weighted: true for an edge weighted graph :returns: graph in listlist format, possibly followed by weight matrix :complexity: O(n + m) for unweighted graph, :math:`O(n^2)` for weighted graph
Below is the the instruction that describes the task: ### Input: Read a graph from a text file :param filename: plain text file. All numbers are separated by space. Starts with a line containing n (#vertices) and m (#edges). Then m lines follow, for each edge. Vertices are numbered from 0 to n-1. Line for unweighted edge u,v contains two integers u, v. Line for weighted edge u,v contains three integers u, v, w[u,v]. :param directed: true for a directed graph, false for undirected :param weighted: true for an edge weighted graph :returns: graph in listlist format, possibly followed by weight matrix :complexity: O(n + m) for unweighted graph, :math:`O(n^2)` for weighted graph ### Response: def read_graph(filename, directed=False, weighted=False, default_weight=None): """Read a graph from a text file :param filename: plain text file. All numbers are separated by space. Starts with a line containing n (#vertices) and m (#edges). Then m lines follow, for each edge. Vertices are numbered from 0 to n-1. Line for unweighted edge u,v contains two integers u, v. Line for weighted edge u,v contains three integers u, v, w[u,v]. :param directed: true for a directed graph, false for undirected :param weighted: true for an edge weighted graph :returns: graph in listlist format, possibly followed by weight matrix :complexity: O(n + m) for unweighted graph, :math:`O(n^2)` for weighted graph """ with open(filename, 'r') as f: while True: line = f.readline() # ignore leading comments if line[0] != '#': break nb_nodes, nb_edges = tuple(map(int, line.split())) graph = [[] for u in range(nb_nodes)] if weighted: weight = [[default_weight] * nb_nodes for v in range(nb_nodes)] for v in range(nb_nodes): weight[v][v] = 0 for _ in range(nb_edges): u, v, w = readtab(f, int) graph[u].append(v) weight[u][v] = w if not directed: graph[v].append(u) weight[v][u] = w return graph, weight else: for _ in range(nb_edges): # si le fichier contient des poids, ils seront ignorés u, v = readtab(f, int)[:2] graph[u].append(v) if not directed: graph[v].append(u) return graph
def parse_rune_html(html: str, url: str) -> dict: """A function that returns a dict representation of the Runeforge.gg page for a specific champ Parameters ---------- html : str The string representation of the html obtained via a GET request url : str The URL for the runeforge page being parsed. Returns ------- dict Contains champ rune info described in ``RuneClient`` and ``AioRuneClient``. """ soup = BeautifulSoup(html, 'lxml') # The soup stuff champ = soup.find('h1', class_='champion-header--title').text title = soup.find('h2', class_='loadout-title').text description = soup.find('p').text # Names of the Rune trees p_tree, s_tree = [x.text for x in soup.find_all('h2', class_='rune-path--name')] # List of all the runes together all_runes = soup.find_all('a', class_='rune-name') # The keystone (duh) keystone = all_runes[0].text # Rest of the runes in the primary tree, sans keystone p_rest = [x.text for x in all_runes[1:4]] # The runes in the secondary tree s_rest = [x.text for x in all_runes[4:7]] return {'name': champ, 'title': title, 'description': description, 'url': url, 'runes': {'primary': {'name': p_tree, 'keystone': keystone, 'rest': p_rest}, 'secondary': {'name': s_tree, 'rest': s_rest}}}
A function that returns a dict representation of the Runeforge.gg page for a specific champ Parameters ---------- html : str The string representation of the html obtained via a GET request url : str The URL for the runeforge page being parsed. Returns ------- dict Contains champ rune info described in ``RuneClient`` and ``AioRuneClient``.
Below is the the instruction that describes the task: ### Input: A function that returns a dict representation of the Runeforge.gg page for a specific champ Parameters ---------- html : str The string representation of the html obtained via a GET request url : str The URL for the runeforge page being parsed. Returns ------- dict Contains champ rune info described in ``RuneClient`` and ``AioRuneClient``. ### Response: def parse_rune_html(html: str, url: str) -> dict: """A function that returns a dict representation of the Runeforge.gg page for a specific champ Parameters ---------- html : str The string representation of the html obtained via a GET request url : str The URL for the runeforge page being parsed. Returns ------- dict Contains champ rune info described in ``RuneClient`` and ``AioRuneClient``. """ soup = BeautifulSoup(html, 'lxml') # The soup stuff champ = soup.find('h1', class_='champion-header--title').text title = soup.find('h2', class_='loadout-title').text description = soup.find('p').text # Names of the Rune trees p_tree, s_tree = [x.text for x in soup.find_all('h2', class_='rune-path--name')] # List of all the runes together all_runes = soup.find_all('a', class_='rune-name') # The keystone (duh) keystone = all_runes[0].text # Rest of the runes in the primary tree, sans keystone p_rest = [x.text for x in all_runes[1:4]] # The runes in the secondary tree s_rest = [x.text for x in all_runes[4:7]] return {'name': champ, 'title': title, 'description': description, 'url': url, 'runes': {'primary': {'name': p_tree, 'keystone': keystone, 'rest': p_rest}, 'secondary': {'name': s_tree, 'rest': s_rest}}}
def setEditable(self, editable): """setter to _editable. apply changes while changing dtype. Raises: TypeError: if editable is not of type bool. Args: editable (bool): apply changes while changing dtype. """ if not isinstance(editable, bool): raise TypeError('Argument is not of type bool') self._editable = editable
setter to _editable. apply changes while changing dtype. Raises: TypeError: if editable is not of type bool. Args: editable (bool): apply changes while changing dtype.
Below is the the instruction that describes the task: ### Input: setter to _editable. apply changes while changing dtype. Raises: TypeError: if editable is not of type bool. Args: editable (bool): apply changes while changing dtype. ### Response: def setEditable(self, editable): """setter to _editable. apply changes while changing dtype. Raises: TypeError: if editable is not of type bool. Args: editable (bool): apply changes while changing dtype. """ if not isinstance(editable, bool): raise TypeError('Argument is not of type bool') self._editable = editable
def p_expression_lor(self, p): 'expression : expression LOR expression' p[0] = Lor(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
expression : expression LOR expression
Below is the the instruction that describes the task: ### Input: expression : expression LOR expression ### Response: def p_expression_lor(self, p): 'expression : expression LOR expression' p[0] = Lor(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
def add_fields(store_name, field_names): """ A class-decorator that creates layout managers with a set of named fields. """ def decorate(cls): def _add(index, name): def _set_dir(self, value): getattr(self, store_name)[index] = value def _get_dir(self): return getattr(self, store_name)[index] setattr(cls, name, property(_get_dir, _set_dir)) for index, field_name in enumerate(field_names): _add(index, field_name) return cls return decorate
A class-decorator that creates layout managers with a set of named fields.
Below is the the instruction that describes the task: ### Input: A class-decorator that creates layout managers with a set of named fields. ### Response: def add_fields(store_name, field_names): """ A class-decorator that creates layout managers with a set of named fields. """ def decorate(cls): def _add(index, name): def _set_dir(self, value): getattr(self, store_name)[index] = value def _get_dir(self): return getattr(self, store_name)[index] setattr(cls, name, property(_get_dir, _set_dir)) for index, field_name in enumerate(field_names): _add(index, field_name) return cls return decorate
def get_worfklow_spec(self): """ Generates and caches the workflow spec package from BPMN diagrams that read from disk Returns: SpiffWorkflow Spec object. """ # TODO: convert from in-process to redis based caching if self.current.workflow_name not in self.workflow_spec_cache: # path = self.find_workflow_path() # spec_package = InMemoryPackager.package_in_memory(self.current.workflow_name, path) # spec = BpmnSerializer().deserialize_workflow_spec(spec_package) try: self.current.wf_object = BPMNWorkflow.objects.get(name=self.current.workflow_name) except ObjectDoesNotExist: self.current.wf_object = BPMNWorkflow.objects.get(name='not_found') self.current.task_data['non-existent-wf'] = self.current.workflow_name self.current.workflow_name = 'not_found' xml_content = self.current.wf_object.xml.body spec = ZopsSerializer().deserialize_workflow_spec(xml_content, self.current.workflow_name) spec.wf_id = self.current.wf_object.key self.workflow_spec_cache[self.current.workflow_name] = spec return self.workflow_spec_cache[self.current.workflow_name]
Generates and caches the workflow spec package from BPMN diagrams that read from disk Returns: SpiffWorkflow Spec object.
Below is the the instruction that describes the task: ### Input: Generates and caches the workflow spec package from BPMN diagrams that read from disk Returns: SpiffWorkflow Spec object. ### Response: def get_worfklow_spec(self): """ Generates and caches the workflow spec package from BPMN diagrams that read from disk Returns: SpiffWorkflow Spec object. """ # TODO: convert from in-process to redis based caching if self.current.workflow_name not in self.workflow_spec_cache: # path = self.find_workflow_path() # spec_package = InMemoryPackager.package_in_memory(self.current.workflow_name, path) # spec = BpmnSerializer().deserialize_workflow_spec(spec_package) try: self.current.wf_object = BPMNWorkflow.objects.get(name=self.current.workflow_name) except ObjectDoesNotExist: self.current.wf_object = BPMNWorkflow.objects.get(name='not_found') self.current.task_data['non-existent-wf'] = self.current.workflow_name self.current.workflow_name = 'not_found' xml_content = self.current.wf_object.xml.body spec = ZopsSerializer().deserialize_workflow_spec(xml_content, self.current.workflow_name) spec.wf_id = self.current.wf_object.key self.workflow_spec_cache[self.current.workflow_name] = spec return self.workflow_spec_cache[self.current.workflow_name]
def setDelta(self, delta): """ Sets the time delta for this widget to the inputed delta. :param delta | <datetime.timedelta> """ days = int(delta.days) secs = int(delta.total_seconds()) direction = 'from now' if secs < 0: direction = 'ago' if days and days % 365 == 0: number = days / 365 unit = 'year(s)' elif days and days % 30 == 0: number = days / 30 unit = 'month(s)' elif days and days % 7 == 0: number = days / 7 unit = 'week(s)' elif days: number = days unit = 'day(s)' elif secs % 3600 == 0: number = secs / 3600 unit = 'hour(s)' elif secs % 60 == 0: number = secs / 60 unit = 'minute(s)' else: number = secs unit = 'second(s)' self._numberSpinner.setValue(abs(int(number))) self._unitCombo.setCurrentIndex(self._unitCombo.findText(unit)) index = self._directionCombo.findText(direction) self._directionCombo.setCurrentIndex(index)
Sets the time delta for this widget to the inputed delta. :param delta | <datetime.timedelta>
Below is the the instruction that describes the task: ### Input: Sets the time delta for this widget to the inputed delta. :param delta | <datetime.timedelta> ### Response: def setDelta(self, delta): """ Sets the time delta for this widget to the inputed delta. :param delta | <datetime.timedelta> """ days = int(delta.days) secs = int(delta.total_seconds()) direction = 'from now' if secs < 0: direction = 'ago' if days and days % 365 == 0: number = days / 365 unit = 'year(s)' elif days and days % 30 == 0: number = days / 30 unit = 'month(s)' elif days and days % 7 == 0: number = days / 7 unit = 'week(s)' elif days: number = days unit = 'day(s)' elif secs % 3600 == 0: number = secs / 3600 unit = 'hour(s)' elif secs % 60 == 0: number = secs / 60 unit = 'minute(s)' else: number = secs unit = 'second(s)' self._numberSpinner.setValue(abs(int(number))) self._unitCombo.setCurrentIndex(self._unitCombo.findText(unit)) index = self._directionCombo.findText(direction) self._directionCombo.setCurrentIndex(index)
def _add_photo_to_album( self, photoId, album): """*add a photo with the given photo ID to a named album* **Key Arguments:** - ``photoId`` -- the ID of the photo to add to the album - ``album`` -- the name of the album to add the photo to **Return:** - None **Usage:** .. todo:: - add usage info - create a sublime snippet for usage - update package tutorial if needed .. code-block:: python usage code """ self.log.info('starting the ``_add_photo_to_album`` method') try: response = requests.get( url="https://api.flickr.com/services/rest/", params={ "method": "flickr.photosets.getList", "format": "json", "nojsoncallback": "1", }, auth=self.auth, ) except requests.exceptions.RequestException: print('HTTP Request failed') albumId = False for s in response.json()["photosets"]["photoset"]: if s["title"]["_content"].lower().strip() == album.strip(): albumId = s["id"] if albumId: try: response = requests.post( url="https://api.flickr.com/services/rest/", params={ "method": "flickr.photosets.addPhoto", "photoset_id": albumId, "photo_id": photoId, "format": "json", "nojsoncallback": "1", }, auth=self.auth, ) except requests.exceptions.RequestException: print('HTTP Request failed') else: try: response = requests.post( url="https://api.flickr.com/services/rest/", params={ "method": "flickr.photosets.create", "title": album, "primary_photo_id": photoId, "format": "json", "nojsoncallback": "1", }, auth=self.auth, ) except requests.exceptions.RequestException: print('HTTP Request failed') self.log.info('completed the ``_add_photo_to_album`` method') return None
*add a photo with the given photo ID to a named album* **Key Arguments:** - ``photoId`` -- the ID of the photo to add to the album - ``album`` -- the name of the album to add the photo to **Return:** - None **Usage:** .. todo:: - add usage info - create a sublime snippet for usage - update package tutorial if needed .. code-block:: python usage code
Below is the the instruction that describes the task: ### Input: *add a photo with the given photo ID to a named album* **Key Arguments:** - ``photoId`` -- the ID of the photo to add to the album - ``album`` -- the name of the album to add the photo to **Return:** - None **Usage:** .. todo:: - add usage info - create a sublime snippet for usage - update package tutorial if needed .. code-block:: python usage code ### Response: def _add_photo_to_album( self, photoId, album): """*add a photo with the given photo ID to a named album* **Key Arguments:** - ``photoId`` -- the ID of the photo to add to the album - ``album`` -- the name of the album to add the photo to **Return:** - None **Usage:** .. todo:: - add usage info - create a sublime snippet for usage - update package tutorial if needed .. code-block:: python usage code """ self.log.info('starting the ``_add_photo_to_album`` method') try: response = requests.get( url="https://api.flickr.com/services/rest/", params={ "method": "flickr.photosets.getList", "format": "json", "nojsoncallback": "1", }, auth=self.auth, ) except requests.exceptions.RequestException: print('HTTP Request failed') albumId = False for s in response.json()["photosets"]["photoset"]: if s["title"]["_content"].lower().strip() == album.strip(): albumId = s["id"] if albumId: try: response = requests.post( url="https://api.flickr.com/services/rest/", params={ "method": "flickr.photosets.addPhoto", "photoset_id": albumId, "photo_id": photoId, "format": "json", "nojsoncallback": "1", }, auth=self.auth, ) except requests.exceptions.RequestException: print('HTTP Request failed') else: try: response = requests.post( url="https://api.flickr.com/services/rest/", params={ "method": "flickr.photosets.create", "title": album, "primary_photo_id": photoId, "format": "json", "nojsoncallback": "1", }, auth=self.auth, ) except requests.exceptions.RequestException: print('HTTP Request failed') self.log.info('completed the ``_add_photo_to_album`` method') return None
def make_zip(self, filename, files=None, path=None, clone=None, compress=True): """Create a Zip archive. Provide any of the following: files - A list of files path - A directory of .txt files clone - Copy any files from a zip archive not specified above Duplicate files will be ignored. The 'files' argument will be used first, then files found in the specified 'path', then in the specified 'clone' archive. """ if filename and os.path.exists(filename): raise IOError('File exists: %s'%filename) files = files or [] arcnames = [] if path and os.path.isdir(path): files += glob.glob(os.path.join(path, '*.txt')) if compress: compress_level = zipfile.ZIP_DEFLATED else: compress_level = zipfile.ZIP_STORED # Write files. self.log("Creating zip archive: %s"%filename) zf = zipfile.ZipFile(filename, 'a', compression=compress_level) for f in files: base = os.path.basename(f) if base in arcnames: self.log('... skipping: %s'%f) else: self.log('... adding: %s'%f) arcnames.append(base) zf.write(f, base) # Clone from existing zip archive. if clone and os.path.exists(clone): zc = zipfile.ZipFile(clone) for f in zc.namelist(): base = os.path.basename(f) if os.path.splitext(base)[-1] != '.txt': pass # self.log('... skipping from clone: %s'%f) elif base in arcnames: self.log('... skipping from clone: %s'%f) else: self.log('... adding from clone: %s'%f) arcnames.append(base) with zc.open(f) as i: data = i.read() zf.writestr(base, data) zf.close()
Create a Zip archive. Provide any of the following: files - A list of files path - A directory of .txt files clone - Copy any files from a zip archive not specified above Duplicate files will be ignored. The 'files' argument will be used first, then files found in the specified 'path', then in the specified 'clone' archive.
Below is the the instruction that describes the task: ### Input: Create a Zip archive. Provide any of the following: files - A list of files path - A directory of .txt files clone - Copy any files from a zip archive not specified above Duplicate files will be ignored. The 'files' argument will be used first, then files found in the specified 'path', then in the specified 'clone' archive. ### Response: def make_zip(self, filename, files=None, path=None, clone=None, compress=True): """Create a Zip archive. Provide any of the following: files - A list of files path - A directory of .txt files clone - Copy any files from a zip archive not specified above Duplicate files will be ignored. The 'files' argument will be used first, then files found in the specified 'path', then in the specified 'clone' archive. """ if filename and os.path.exists(filename): raise IOError('File exists: %s'%filename) files = files or [] arcnames = [] if path and os.path.isdir(path): files += glob.glob(os.path.join(path, '*.txt')) if compress: compress_level = zipfile.ZIP_DEFLATED else: compress_level = zipfile.ZIP_STORED # Write files. self.log("Creating zip archive: %s"%filename) zf = zipfile.ZipFile(filename, 'a', compression=compress_level) for f in files: base = os.path.basename(f) if base in arcnames: self.log('... skipping: %s'%f) else: self.log('... adding: %s'%f) arcnames.append(base) zf.write(f, base) # Clone from existing zip archive. if clone and os.path.exists(clone): zc = zipfile.ZipFile(clone) for f in zc.namelist(): base = os.path.basename(f) if os.path.splitext(base)[-1] != '.txt': pass # self.log('... skipping from clone: %s'%f) elif base in arcnames: self.log('... skipping from clone: %s'%f) else: self.log('... adding from clone: %s'%f) arcnames.append(base) with zc.open(f) as i: data = i.read() zf.writestr(base, data) zf.close()
def _check_etag(self, etag): """Check if etag is the same across requests to GCS. If self._etag is None, set it. If etag is set, check that the new etag equals the old one. In the __init__ method, we fire one HEAD and one GET request using ndb tasklet. One of them would return first and set the first value. Args: etag: etag from a GCS HTTP response. None if etag is not part of the response header. It could be None for example in the case of GCS composite file. Raises: ValueError: if two etags are not equal. """ if etag is None: return elif self._etag is None: self._etag = etag elif self._etag != etag: raise ValueError('File on GCS has changed while reading.')
Check if etag is the same across requests to GCS. If self._etag is None, set it. If etag is set, check that the new etag equals the old one. In the __init__ method, we fire one HEAD and one GET request using ndb tasklet. One of them would return first and set the first value. Args: etag: etag from a GCS HTTP response. None if etag is not part of the response header. It could be None for example in the case of GCS composite file. Raises: ValueError: if two etags are not equal.
Below is the the instruction that describes the task: ### Input: Check if etag is the same across requests to GCS. If self._etag is None, set it. If etag is set, check that the new etag equals the old one. In the __init__ method, we fire one HEAD and one GET request using ndb tasklet. One of them would return first and set the first value. Args: etag: etag from a GCS HTTP response. None if etag is not part of the response header. It could be None for example in the case of GCS composite file. Raises: ValueError: if two etags are not equal. ### Response: def _check_etag(self, etag): """Check if etag is the same across requests to GCS. If self._etag is None, set it. If etag is set, check that the new etag equals the old one. In the __init__ method, we fire one HEAD and one GET request using ndb tasklet. One of them would return first and set the first value. Args: etag: etag from a GCS HTTP response. None if etag is not part of the response header. It could be None for example in the case of GCS composite file. Raises: ValueError: if two etags are not equal. """ if etag is None: return elif self._etag is None: self._etag = etag elif self._etag != etag: raise ValueError('File on GCS has changed while reading.')
def output(self, to=None, *args, **kwargs): '''Outputs the set text''' to.write(str(self._value))
Outputs the set text
Below is the the instruction that describes the task: ### Input: Outputs the set text ### Response: def output(self, to=None, *args, **kwargs): '''Outputs the set text''' to.write(str(self._value))
def getVarianceComps(self, univariance=False): """ Return the estimated variance components Args: univariance: Boolean indicator, if True variance components are normalized to sum up to 1 for each trait Returns: variance components of all random effects on all phenotypes [P, n_randEffs matrix] """ RV=sp.zeros((self.P,self.n_randEffs)) for term_i in range(self.n_randEffs): RV[:,term_i] = self.getTraitCovar(term_i).diagonal() if univariance: RV /= RV.sum(1)[:,sp.newaxis] return RV
Return the estimated variance components Args: univariance: Boolean indicator, if True variance components are normalized to sum up to 1 for each trait Returns: variance components of all random effects on all phenotypes [P, n_randEffs matrix]
Below is the the instruction that describes the task: ### Input: Return the estimated variance components Args: univariance: Boolean indicator, if True variance components are normalized to sum up to 1 for each trait Returns: variance components of all random effects on all phenotypes [P, n_randEffs matrix] ### Response: def getVarianceComps(self, univariance=False): """ Return the estimated variance components Args: univariance: Boolean indicator, if True variance components are normalized to sum up to 1 for each trait Returns: variance components of all random effects on all phenotypes [P, n_randEffs matrix] """ RV=sp.zeros((self.P,self.n_randEffs)) for term_i in range(self.n_randEffs): RV[:,term_i] = self.getTraitCovar(term_i).diagonal() if univariance: RV /= RV.sum(1)[:,sp.newaxis] return RV
def check(self, request, user): """ check if the service is well configured :return: Boolean """ us = UserService.objects.get(user=user, name='ServiceWallabag') params = {'username': us.username, 'password': us.password, 'client_id': us.client_id, 'client_secret': us.client_secret} try: Wall.get_token(host=us.host, **params) return True except requests.exceptions.HTTPError as e: return e
check if the service is well configured :return: Boolean
Below is the the instruction that describes the task: ### Input: check if the service is well configured :return: Boolean ### Response: def check(self, request, user): """ check if the service is well configured :return: Boolean """ us = UserService.objects.get(user=user, name='ServiceWallabag') params = {'username': us.username, 'password': us.password, 'client_id': us.client_id, 'client_secret': us.client_secret} try: Wall.get_token(host=us.host, **params) return True except requests.exceptions.HTTPError as e: return e
def parse_json(self, req, name, field): """Pull a json value from the request.""" if not (req.body and is_json_request(req)): return core.missing json_data = req.json if json_data is None: return core.missing return core.get_value(json_data, name, field, allow_many_nested=True)
Pull a json value from the request.
Below is the the instruction that describes the task: ### Input: Pull a json value from the request. ### Response: def parse_json(self, req, name, field): """Pull a json value from the request.""" if not (req.body and is_json_request(req)): return core.missing json_data = req.json if json_data is None: return core.missing return core.get_value(json_data, name, field, allow_many_nested=True)
def _OnRtnNotice(self, pTradingNoticeInfo: CThostFtdcTradingNoticeInfoField): '''交易提醒''' msg = pTradingNoticeInfo.getFieldContent() if len(msg) > 0: threading.Thread(target=self.OnRtnNotice, args=(self, pTradingNoticeInfo.getSendTime(), msg)).start()
交易提醒
Below is the the instruction that describes the task: ### Input: 交易提醒 ### Response: def _OnRtnNotice(self, pTradingNoticeInfo: CThostFtdcTradingNoticeInfoField): '''交易提醒''' msg = pTradingNoticeInfo.getFieldContent() if len(msg) > 0: threading.Thread(target=self.OnRtnNotice, args=(self, pTradingNoticeInfo.getSendTime(), msg)).start()
def load(config_path, run_number=0, device='cuda:0'): """ Load a ModelConfig from filename """ model_config = ModelConfig.from_file(config_path, run_number, device=device) return model_config
Load a ModelConfig from filename
Below is the the instruction that describes the task: ### Input: Load a ModelConfig from filename ### Response: def load(config_path, run_number=0, device='cuda:0'): """ Load a ModelConfig from filename """ model_config = ModelConfig.from_file(config_path, run_number, device=device) return model_config
def set_current_limit(self, channel, value, unit='A'): '''Setting current limit Note: same limit for all channels. ''' dac_offset = self._ch_cal[channel]['DACI']['offset'] dac_gain = self._ch_cal[channel]['DACI']['gain'] if unit == 'raw': value = value elif unit == 'A': value = int((value - dac_offset) / dac_gain) elif unit == 'mA': value = int((value / 1000 - dac_offset) / dac_gain) else: raise TypeError("Invalid unit type.") DacMax5380._set_dac_value(self, channel, value)
Setting current limit Note: same limit for all channels.
Below is the the instruction that describes the task: ### Input: Setting current limit Note: same limit for all channels. ### Response: def set_current_limit(self, channel, value, unit='A'): '''Setting current limit Note: same limit for all channels. ''' dac_offset = self._ch_cal[channel]['DACI']['offset'] dac_gain = self._ch_cal[channel]['DACI']['gain'] if unit == 'raw': value = value elif unit == 'A': value = int((value - dac_offset) / dac_gain) elif unit == 'mA': value = int((value / 1000 - dac_offset) / dac_gain) else: raise TypeError("Invalid unit type.") DacMax5380._set_dac_value(self, channel, value)
def get(cls, resource_id=None, parent_id=None, grandparent_id=None): """"Retrieves the required resource.""" return cls._get(resource_id, parent_id, grandparent_id)
Retrieves the required resource.
Below is the the instruction that describes the task: ### Input: Retrieves the required resource. ### Response: def get(cls, resource_id=None, parent_id=None, grandparent_id=None): """"Retrieves the required resource.""" return cls._get(resource_id, parent_id, grandparent_id)
def to_python_package(classes, target_folder, parent_package=None, indent=DEFAULT_INDENT): ''' This function can be used to build a python package representation of pyschema classes. One module is created per namespace in a package matching the namespace hierarchy. Args: classes: A collection of classes to build the package from target_folder: Root folder of the package parent_package: Prepended on all import statements in order to support absolute imports. parent_package is not used when building the package file structure indent: Indent level. Defaults to 4 spaces ''' PackageBuilder(target_folder, parent_package, indent).from_classes_with_refs(classes)
This function can be used to build a python package representation of pyschema classes. One module is created per namespace in a package matching the namespace hierarchy. Args: classes: A collection of classes to build the package from target_folder: Root folder of the package parent_package: Prepended on all import statements in order to support absolute imports. parent_package is not used when building the package file structure indent: Indent level. Defaults to 4 spaces
Below is the the instruction that describes the task: ### Input: This function can be used to build a python package representation of pyschema classes. One module is created per namespace in a package matching the namespace hierarchy. Args: classes: A collection of classes to build the package from target_folder: Root folder of the package parent_package: Prepended on all import statements in order to support absolute imports. parent_package is not used when building the package file structure indent: Indent level. Defaults to 4 spaces ### Response: def to_python_package(classes, target_folder, parent_package=None, indent=DEFAULT_INDENT): ''' This function can be used to build a python package representation of pyschema classes. One module is created per namespace in a package matching the namespace hierarchy. Args: classes: A collection of classes to build the package from target_folder: Root folder of the package parent_package: Prepended on all import statements in order to support absolute imports. parent_package is not used when building the package file structure indent: Indent level. Defaults to 4 spaces ''' PackageBuilder(target_folder, parent_package, indent).from_classes_with_refs(classes)
def _get_user_dir(self): ''' Get the user's home directory. ''' try: # This should work in both Windows and Unix environments for envname in ['USERPROFILE', 'HOME']: user_dir = os.getenv(envname) if user_dir is not None: return user_dir except KeyboardInterrupt as e: raise e except Exception: pass return ''
Get the user's home directory.
Below is the the instruction that describes the task: ### Input: Get the user's home directory. ### Response: def _get_user_dir(self): ''' Get the user's home directory. ''' try: # This should work in both Windows and Unix environments for envname in ['USERPROFILE', 'HOME']: user_dir = os.getenv(envname) if user_dir is not None: return user_dir except KeyboardInterrupt as e: raise e except Exception: pass return ''
def ctrl_x(self, x, to=None): """ Sends a character to the currently active element with Ctrl pressed. This method takes care of pressing and releasing Ctrl. """ seq = [Keys.CONTROL, x, Keys.CONTROL] # This works around a bug in Selenium that happens in FF on # Windows, and in Chrome on Linux. # # The bug was reported here: # # https://code.google.com/p/selenium/issues/detail?id=7303 # if (self.firefox and self.windows) or (self.linux and self.chrome): seq.append(Keys.PAUSE) if to is None: ActionChains(self.driver) \ .send_keys(seq) \ .perform() else: self.send_keys(to, seq)
Sends a character to the currently active element with Ctrl pressed. This method takes care of pressing and releasing Ctrl.
Below is the the instruction that describes the task: ### Input: Sends a character to the currently active element with Ctrl pressed. This method takes care of pressing and releasing Ctrl. ### Response: def ctrl_x(self, x, to=None): """ Sends a character to the currently active element with Ctrl pressed. This method takes care of pressing and releasing Ctrl. """ seq = [Keys.CONTROL, x, Keys.CONTROL] # This works around a bug in Selenium that happens in FF on # Windows, and in Chrome on Linux. # # The bug was reported here: # # https://code.google.com/p/selenium/issues/detail?id=7303 # if (self.firefox and self.windows) or (self.linux and self.chrome): seq.append(Keys.PAUSE) if to is None: ActionChains(self.driver) \ .send_keys(seq) \ .perform() else: self.send_keys(to, seq)
def distinct(self, key): """Get a list of distinct values for `key` among all documents in the result set of this query. Raises :class:`TypeError` if `key` is not an instance of :class:`basestring` (:class:`str` in python 3). The :meth:`distinct` method obeys the :attr:`~pymongo.collection.Collection.read_preference` of the :class:`~pymongo.collection.Collection` instance on which :meth:`~pymongo.collection.Collection.find` was called. :Parameters: - `key`: name of key for which we want to get the distinct values .. seealso:: :meth:`pymongo.collection.Collection.distinct` """ options = {} if self.__spec: options["query"] = self.__spec if self.__max_time_ms is not None: options['maxTimeMS'] = self.__max_time_ms if self.__comment: options['$comment'] = self.__comment if self.__collation is not None: options['collation'] = self.__collation return self.__collection.distinct(key, **options)
Get a list of distinct values for `key` among all documents in the result set of this query. Raises :class:`TypeError` if `key` is not an instance of :class:`basestring` (:class:`str` in python 3). The :meth:`distinct` method obeys the :attr:`~pymongo.collection.Collection.read_preference` of the :class:`~pymongo.collection.Collection` instance on which :meth:`~pymongo.collection.Collection.find` was called. :Parameters: - `key`: name of key for which we want to get the distinct values .. seealso:: :meth:`pymongo.collection.Collection.distinct`
Below is the the instruction that describes the task: ### Input: Get a list of distinct values for `key` among all documents in the result set of this query. Raises :class:`TypeError` if `key` is not an instance of :class:`basestring` (:class:`str` in python 3). The :meth:`distinct` method obeys the :attr:`~pymongo.collection.Collection.read_preference` of the :class:`~pymongo.collection.Collection` instance on which :meth:`~pymongo.collection.Collection.find` was called. :Parameters: - `key`: name of key for which we want to get the distinct values .. seealso:: :meth:`pymongo.collection.Collection.distinct` ### Response: def distinct(self, key): """Get a list of distinct values for `key` among all documents in the result set of this query. Raises :class:`TypeError` if `key` is not an instance of :class:`basestring` (:class:`str` in python 3). The :meth:`distinct` method obeys the :attr:`~pymongo.collection.Collection.read_preference` of the :class:`~pymongo.collection.Collection` instance on which :meth:`~pymongo.collection.Collection.find` was called. :Parameters: - `key`: name of key for which we want to get the distinct values .. seealso:: :meth:`pymongo.collection.Collection.distinct` """ options = {} if self.__spec: options["query"] = self.__spec if self.__max_time_ms is not None: options['maxTimeMS'] = self.__max_time_ms if self.__comment: options['$comment'] = self.__comment if self.__collation is not None: options['collation'] = self.__collation return self.__collection.distinct(key, **options)
def _dict_merge(dct, merge_dct): """Recursive dict merge. Inspired by :meth:``dict.update()``, instead of updating only top-level keys, dict_merge recurses down into dicts nested to an arbitrary depth, updating keys. The ``merge_dct`` is merged into ``dct``. From https://gist.github.com/angstwad/bf22d1822c38a92ec0a9 Arguments: dct: dict onto which the merge is executed merge_dct: dct merged into dct """ for k, v in merge_dct.items(): if (k in dct and isinstance(dct[k], dict) and isinstance(merge_dct[k], collections.Mapping)): _dict_merge(dct[k], merge_dct[k]) else: dct[k] = merge_dct[k]
Recursive dict merge. Inspired by :meth:``dict.update()``, instead of updating only top-level keys, dict_merge recurses down into dicts nested to an arbitrary depth, updating keys. The ``merge_dct`` is merged into ``dct``. From https://gist.github.com/angstwad/bf22d1822c38a92ec0a9 Arguments: dct: dict onto which the merge is executed merge_dct: dct merged into dct
Below is the the instruction that describes the task: ### Input: Recursive dict merge. Inspired by :meth:``dict.update()``, instead of updating only top-level keys, dict_merge recurses down into dicts nested to an arbitrary depth, updating keys. The ``merge_dct`` is merged into ``dct``. From https://gist.github.com/angstwad/bf22d1822c38a92ec0a9 Arguments: dct: dict onto which the merge is executed merge_dct: dct merged into dct ### Response: def _dict_merge(dct, merge_dct): """Recursive dict merge. Inspired by :meth:``dict.update()``, instead of updating only top-level keys, dict_merge recurses down into dicts nested to an arbitrary depth, updating keys. The ``merge_dct`` is merged into ``dct``. From https://gist.github.com/angstwad/bf22d1822c38a92ec0a9 Arguments: dct: dict onto which the merge is executed merge_dct: dct merged into dct """ for k, v in merge_dct.items(): if (k in dct and isinstance(dct[k], dict) and isinstance(merge_dct[k], collections.Mapping)): _dict_merge(dct[k], merge_dct[k]) else: dct[k] = merge_dct[k]
def deepcopy_sqla_objects( startobjs: List[object], session: Session, flush: bool = True, debug: bool = False, debug_walk: bool = True, debug_rewrite_rel: bool = False, objmap: Dict[object, object] = None) -> None: """ Makes a copy of the specified SQLAlchemy ORM objects, inserting them into a new session. This function operates in several passes: 1. Walk the ORM tree through all objects and their relationships, copying every object thus found (via :func:`copy_sqla_object`, without their relationships), and building a map from each source-session object to its equivalent destination-session object. 2. Work through all the destination objects, rewriting their relationships (via :func:`rewrite_relationships`) so they relate to each other (rather than their source-session brethren). 3. Insert all the destination-session objects into the destination session. For this to succeed, every object must take an ``__init__`` call with no arguments (see :func:`copy_sqla_object`). (We can't specify the required ``args``/``kwargs``, since we are copying a tree of arbitrary objects.) Args: startobjs: SQLAlchemy ORM objects to copy session: destination SQLAlchemy :class:`Session` into which to insert the copies flush: flush the session when we've finished? debug: be verbose? debug_walk: be extra verbose when walking the ORM tree? debug_rewrite_rel: be extra verbose when rewriting relationships? objmap: starting object map from source-session to destination-session objects (see :func:`rewrite_relationships` for more detail); usually ``None`` to begin with. """ if objmap is None: objmap = {} # keys = old objects, values = new objects if debug: log.debug("deepcopy_sqla_objects: pass 1: create new objects") # Pass 1: iterate through all objects. (Can't guarantee to get # relationships correct until we've done this, since we don't know whether # or where the "root" of the PK tree is.) seen = set() for startobj in startobjs: for oldobj in walk_orm_tree(startobj, seen=seen, debug=debug_walk): if debug: log.debug("deepcopy_sqla_objects: copying {}", oldobj) newobj = copy_sqla_object(oldobj, omit_pk=True, omit_fk=True) # Don't insert the new object into the session here; it may trigger # an autoflush as the relationships are queried, and the new # objects are not ready for insertion yet (as their relationships # aren't set). # Note also the session.no_autoflush option: # "sqlalchemy.exc.OperationalError: (raised as a result of Query- # invoked autoflush; consider using a session.no_autoflush block if # this flush is occurring prematurely)..." objmap[oldobj] = newobj # Pass 2: set all relationship properties. if debug: log.debug("deepcopy_sqla_objects: pass 2: set relationships") for oldobj, newobj in objmap.items(): if debug: log.debug("deepcopy_sqla_objects: newobj: {}", newobj) rewrite_relationships(oldobj, newobj, objmap, debug=debug_rewrite_rel) # Now we can do session insert. if debug: log.debug("deepcopy_sqla_objects: pass 3: insert into session") for newobj in objmap.values(): session.add(newobj) # Done if debug: log.debug("deepcopy_sqla_objects: done") if flush: session.flush()
Makes a copy of the specified SQLAlchemy ORM objects, inserting them into a new session. This function operates in several passes: 1. Walk the ORM tree through all objects and their relationships, copying every object thus found (via :func:`copy_sqla_object`, without their relationships), and building a map from each source-session object to its equivalent destination-session object. 2. Work through all the destination objects, rewriting their relationships (via :func:`rewrite_relationships`) so they relate to each other (rather than their source-session brethren). 3. Insert all the destination-session objects into the destination session. For this to succeed, every object must take an ``__init__`` call with no arguments (see :func:`copy_sqla_object`). (We can't specify the required ``args``/``kwargs``, since we are copying a tree of arbitrary objects.) Args: startobjs: SQLAlchemy ORM objects to copy session: destination SQLAlchemy :class:`Session` into which to insert the copies flush: flush the session when we've finished? debug: be verbose? debug_walk: be extra verbose when walking the ORM tree? debug_rewrite_rel: be extra verbose when rewriting relationships? objmap: starting object map from source-session to destination-session objects (see :func:`rewrite_relationships` for more detail); usually ``None`` to begin with.
Below is the the instruction that describes the task: ### Input: Makes a copy of the specified SQLAlchemy ORM objects, inserting them into a new session. This function operates in several passes: 1. Walk the ORM tree through all objects and their relationships, copying every object thus found (via :func:`copy_sqla_object`, without their relationships), and building a map from each source-session object to its equivalent destination-session object. 2. Work through all the destination objects, rewriting their relationships (via :func:`rewrite_relationships`) so they relate to each other (rather than their source-session brethren). 3. Insert all the destination-session objects into the destination session. For this to succeed, every object must take an ``__init__`` call with no arguments (see :func:`copy_sqla_object`). (We can't specify the required ``args``/``kwargs``, since we are copying a tree of arbitrary objects.) Args: startobjs: SQLAlchemy ORM objects to copy session: destination SQLAlchemy :class:`Session` into which to insert the copies flush: flush the session when we've finished? debug: be verbose? debug_walk: be extra verbose when walking the ORM tree? debug_rewrite_rel: be extra verbose when rewriting relationships? objmap: starting object map from source-session to destination-session objects (see :func:`rewrite_relationships` for more detail); usually ``None`` to begin with. ### Response: def deepcopy_sqla_objects( startobjs: List[object], session: Session, flush: bool = True, debug: bool = False, debug_walk: bool = True, debug_rewrite_rel: bool = False, objmap: Dict[object, object] = None) -> None: """ Makes a copy of the specified SQLAlchemy ORM objects, inserting them into a new session. This function operates in several passes: 1. Walk the ORM tree through all objects and their relationships, copying every object thus found (via :func:`copy_sqla_object`, without their relationships), and building a map from each source-session object to its equivalent destination-session object. 2. Work through all the destination objects, rewriting their relationships (via :func:`rewrite_relationships`) so they relate to each other (rather than their source-session brethren). 3. Insert all the destination-session objects into the destination session. For this to succeed, every object must take an ``__init__`` call with no arguments (see :func:`copy_sqla_object`). (We can't specify the required ``args``/``kwargs``, since we are copying a tree of arbitrary objects.) Args: startobjs: SQLAlchemy ORM objects to copy session: destination SQLAlchemy :class:`Session` into which to insert the copies flush: flush the session when we've finished? debug: be verbose? debug_walk: be extra verbose when walking the ORM tree? debug_rewrite_rel: be extra verbose when rewriting relationships? objmap: starting object map from source-session to destination-session objects (see :func:`rewrite_relationships` for more detail); usually ``None`` to begin with. """ if objmap is None: objmap = {} # keys = old objects, values = new objects if debug: log.debug("deepcopy_sqla_objects: pass 1: create new objects") # Pass 1: iterate through all objects. (Can't guarantee to get # relationships correct until we've done this, since we don't know whether # or where the "root" of the PK tree is.) seen = set() for startobj in startobjs: for oldobj in walk_orm_tree(startobj, seen=seen, debug=debug_walk): if debug: log.debug("deepcopy_sqla_objects: copying {}", oldobj) newobj = copy_sqla_object(oldobj, omit_pk=True, omit_fk=True) # Don't insert the new object into the session here; it may trigger # an autoflush as the relationships are queried, and the new # objects are not ready for insertion yet (as their relationships # aren't set). # Note also the session.no_autoflush option: # "sqlalchemy.exc.OperationalError: (raised as a result of Query- # invoked autoflush; consider using a session.no_autoflush block if # this flush is occurring prematurely)..." objmap[oldobj] = newobj # Pass 2: set all relationship properties. if debug: log.debug("deepcopy_sqla_objects: pass 2: set relationships") for oldobj, newobj in objmap.items(): if debug: log.debug("deepcopy_sqla_objects: newobj: {}", newobj) rewrite_relationships(oldobj, newobj, objmap, debug=debug_rewrite_rel) # Now we can do session insert. if debug: log.debug("deepcopy_sqla_objects: pass 3: insert into session") for newobj in objmap.values(): session.add(newobj) # Done if debug: log.debug("deepcopy_sqla_objects: done") if flush: session.flush()
def read_json(json_metadata): """ Read the metadata object from file :param json_metadata: Path and file name of JSON-formatted metadata object file :return: metadata object """ # Load the metadata object from the file with open(json_metadata) as metadatareport: jsondata = json.load(metadatareport) # Create the metadata objects metadata = MetadataObject() # Initialise the metadata categories as GenObjects created using the appropriate key for attr in jsondata: if not isinstance(jsondata[attr], dict): setattr(metadata, attr, jsondata[attr]) else: setattr(metadata, attr, GenObject(jsondata[attr])) return metadata
Read the metadata object from file :param json_metadata: Path and file name of JSON-formatted metadata object file :return: metadata object
Below is the the instruction that describes the task: ### Input: Read the metadata object from file :param json_metadata: Path and file name of JSON-formatted metadata object file :return: metadata object ### Response: def read_json(json_metadata): """ Read the metadata object from file :param json_metadata: Path and file name of JSON-formatted metadata object file :return: metadata object """ # Load the metadata object from the file with open(json_metadata) as metadatareport: jsondata = json.load(metadatareport) # Create the metadata objects metadata = MetadataObject() # Initialise the metadata categories as GenObjects created using the appropriate key for attr in jsondata: if not isinstance(jsondata[attr], dict): setattr(metadata, attr, jsondata[attr]) else: setattr(metadata, attr, GenObject(jsondata[attr])) return metadata
def ltake(n: int, xs: Iterable[T]) -> List[T]: """ A non-lazy version of take. """ return list(take(n, xs))
A non-lazy version of take.
Below is the the instruction that describes the task: ### Input: A non-lazy version of take. ### Response: def ltake(n: int, xs: Iterable[T]) -> List[T]: """ A non-lazy version of take. """ return list(take(n, xs))
def delete_zone(zone, region=None, key=None, keyid=None, profile=None): ''' Delete a Route53 hosted zone. .. versionadded:: 2015.8.0 CLI Example:: salt myminion boto_route53.delete_zone example.org ''' if region is None: region = 'universal' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) _zone = conn.get_zone(zone) if _zone: conn.delete_hosted_zone(_zone.id) return True return False
Delete a Route53 hosted zone. .. versionadded:: 2015.8.0 CLI Example:: salt myminion boto_route53.delete_zone example.org
Below is the the instruction that describes the task: ### Input: Delete a Route53 hosted zone. .. versionadded:: 2015.8.0 CLI Example:: salt myminion boto_route53.delete_zone example.org ### Response: def delete_zone(zone, region=None, key=None, keyid=None, profile=None): ''' Delete a Route53 hosted zone. .. versionadded:: 2015.8.0 CLI Example:: salt myminion boto_route53.delete_zone example.org ''' if region is None: region = 'universal' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) _zone = conn.get_zone(zone) if _zone: conn.delete_hosted_zone(_zone.id) return True return False
def flatten(cls, errors): """ Unwind `MultipleErrors` to have a plain list of `Invalid` :type errors: list[Invalid|MultipleInvalid] :rtype: list[Invalid] """ ers = [] for e in errors: if isinstance(e, MultipleInvalid): ers.extend(cls.flatten(e.errors)) else: ers.append(e) return ers
Unwind `MultipleErrors` to have a plain list of `Invalid` :type errors: list[Invalid|MultipleInvalid] :rtype: list[Invalid]
Below is the the instruction that describes the task: ### Input: Unwind `MultipleErrors` to have a plain list of `Invalid` :type errors: list[Invalid|MultipleInvalid] :rtype: list[Invalid] ### Response: def flatten(cls, errors): """ Unwind `MultipleErrors` to have a plain list of `Invalid` :type errors: list[Invalid|MultipleInvalid] :rtype: list[Invalid] """ ers = [] for e in errors: if isinstance(e, MultipleInvalid): ers.extend(cls.flatten(e.errors)) else: ers.append(e) return ers
def _evaluate(self): """Lazily retrieve and build User instances from returned data""" if self._elements: for element in self._elements: yield element else: for user_id in self.__user_ids: element = self._swimlane.users.get(id=user_id) self._elements.append(element) yield element
Lazily retrieve and build User instances from returned data
Below is the the instruction that describes the task: ### Input: Lazily retrieve and build User instances from returned data ### Response: def _evaluate(self): """Lazily retrieve and build User instances from returned data""" if self._elements: for element in self._elements: yield element else: for user_id in self.__user_ids: element = self._swimlane.users.get(id=user_id) self._elements.append(element) yield element