text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def stream(identifier=None, priority=LOG_INFO, level_prefix=False): r"""Return a file object wrapping a stream to journal. Log messages written to this file as simple newline sepearted text strings are written to the journal. The file will be line buffered, so messages are actually sent after a newline character is written. >>> from systemd import journal >>> stream = journal.stream('myapp') # doctest: +SKIP >>> res = stream.write('message...\n') # doctest: +SKIP will produce the following message in the journal:: PRIORITY=7 SYSLOG_IDENTIFIER=myapp MESSAGE=message... If identifier is None, a suitable default based on sys.argv[0] will be used. This interface can be used conveniently with the print function: >>> from __future__ import print_function >>> stream = journal.stream() # doctest: +SKIP >>> print('message...', file=stream) # doctest: +SKIP priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`, `LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`. level_prefix is a boolean. If true, kernel-style log priority level prefixes (such as '<1>') are interpreted. See sd-daemon(3) for more information. """ if identifier is None: if not _sys.argv or not _sys.argv[0] or _sys.argv[0] == '-c': identifier = 'python' else: identifier = _sys.argv[0] fd = stream_fd(identifier, priority, level_prefix) return _os.fdopen(fd, 'w', 1)
[ "def", "stream", "(", "identifier", "=", "None", ",", "priority", "=", "LOG_INFO", ",", "level_prefix", "=", "False", ")", ":", "if", "identifier", "is", "None", ":", "if", "not", "_sys", ".", "argv", "or", "not", "_sys", ".", "argv", "[", "0", "]", ...
37.714286
27.119048
def T_dependent_property_integral_over_T(self, T1, T2): r'''Method to calculate the integral of a property over temperature with respect to temperature, using a specified method. Methods found valid by `select_valid_methods` are attempted until a method succeeds. If no methods are valid and succeed, None is returned. Calls `calculate_integral_over_T` internally to perform the actual calculation. .. math:: \text{integral} = \int_{T_1}^{T_2} \frac{\text{property}}{T} \; dT Parameters ---------- T1 : float Lower limit of integration, [K] T2 : float Upper limit of integration, [K] method : str Method for which to find the integral Returns ------- integral : float Calculated integral of the property over the given range, [`units`] ''' Tavg = 0.5*(T1+T2) if self.method: # retest within range if self.test_method_validity(Tavg, self.method): try: return self.calculate_integral_over_T(T1, T2, self.method) except: # pragma: no cover pass sorted_valid_methods = self.select_valid_methods(Tavg) for method in sorted_valid_methods: try: return self.calculate_integral_over_T(T1, T2, method) except: pass return None
[ "def", "T_dependent_property_integral_over_T", "(", "self", ",", "T1", ",", "T2", ")", ":", "Tavg", "=", "0.5", "*", "(", "T1", "+", "T2", ")", "if", "self", ".", "method", ":", "# retest within range", "if", "self", ".", "test_method_validity", "(", "Tavg...
35.046512
22.627907
def _url_val(val, key, obj, **kwargs): """Function applied by `HyperlinksField` to get the correct value in the schema. """ if isinstance(val, URLFor): return val.serialize(key, obj, **kwargs) else: return val
[ "def", "_url_val", "(", "val", ",", "key", ",", "obj", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "val", ",", "URLFor", ")", ":", "return", "val", ".", "serialize", "(", "key", ",", "obj", ",", "*", "*", "kwargs", ")", "else", ...
29.75
12.625
def hmean_int(a, a_min=5778, a_max=1149851): """ Harmonic mean of an array, returns the closest int """ from scipy.stats import hmean return int(round(hmean(np.clip(a, a_min, a_max))))
[ "def", "hmean_int", "(", "a", ",", "a_min", "=", "5778", ",", "a_max", "=", "1149851", ")", ":", "from", "scipy", ".", "stats", "import", "hmean", "return", "int", "(", "round", "(", "hmean", "(", "np", ".", "clip", "(", "a", ",", "a_min", ",", "...
39.2
5
def _create_regex_pattern_add_optional_spaces_to_word_characters(word): """Add the regex special characters (\s*) to allow optional spaces between the characters in a word. @param word: (string) the word to be inserted into a regex pattern. @return: string: the regex pattern for that word with optional spaces between all of its characters. """ new_word = u"" for ch in word: if ch.isspace(): new_word += ch else: new_word += ch + ur'\s*' return new_word
[ "def", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "word", ")", ":", "new_word", "=", "u\"\"", "for", "ch", "in", "word", ":", "if", "ch", ".", "isspace", "(", ")", ":", "new_word", "+=", "ch", "else", ":", "new_word", "+=", "ch", "+...
37.928571
16.857143
def propose(self): """Use the trained model to propose a new pipeline. Returns: int: Index corresponding to pipeline to try in ``dpp_matrix``. """ # generate a list of all the untried candidate pipelines candidates = self._get_candidates() # get_candidates() returns None when every possibility has been tried if candidates is None: return None # predict() returns a predicted values for each candidate predictions = self.predict(candidates) # acquire() evaluates the list of predictions, selects one, and returns # its index. idx = self._acquire(predictions) return candidates[idx]
[ "def", "propose", "(", "self", ")", ":", "# generate a list of all the untried candidate pipelines", "candidates", "=", "self", ".", "_get_candidates", "(", ")", "# get_candidates() returns None when every possibility has been tried", "if", "candidates", "is", "None", ":", "r...
34.75
21.55
def dist(ctx, devpi=False, egg=False, wheel=False, auto=True): """Distribute the project.""" config.load() cmd = ["python", "setup.py", "sdist"] # Automatically create wheels if possible if auto: egg = sys.version_info.major == 2 try: import wheel as _ wheel = True except ImportError: wheel = False if egg: cmd.append("bdist_egg") if wheel: cmd.append("bdist_wheel") ctx.run("invoke clean --all build --docs test check") ctx.run(' '.join(cmd)) if devpi: ctx.run("devpi upload dist/*")
[ "def", "dist", "(", "ctx", ",", "devpi", "=", "False", ",", "egg", "=", "False", ",", "wheel", "=", "False", ",", "auto", "=", "True", ")", ":", "config", ".", "load", "(", ")", "cmd", "=", "[", "\"python\"", ",", "\"setup.py\"", ",", "\"sdist\"", ...
25.652174
18.043478
def sentiment(symbol, type='daily', date=None, token='', version=''): '''This endpoint provides social sentiment data from StockTwits. Data can be viewed as a daily value, or by minute for a given date. https://iexcloud.io/docs/api/#social-sentiment Continuous Args: symbol (string); Ticker to request type (string); 'daily' or 'minute' date (string); date in YYYYMMDD or datetime token (string); Access token version (string); API version Returns: dict: result ''' _raiseIfNotStr(symbol) if date: date = _strOrDate(date) return _getJson('stock/{symbol}/sentiment/{type}/{date}'.format(symbol=symbol, type=type, date=date), token, version) return _getJson('stock/{symbol}/sentiment/{type}/'.format(symbol=symbol, type=type), token, version)
[ "def", "sentiment", "(", "symbol", ",", "type", "=", "'daily'", ",", "date", "=", "None", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "_raiseIfNotStr", "(", "symbol", ")", "if", "date", ":", "date", "=", "_strOrDate", "(", "date", ...
39.190476
29.285714
def losc_frame_urls(ifo, start_time, end_time): """ Get a list of urls to losc frame files Parameters ---------- ifo: str The name of the IFO to find the information about. start_time: int The gps time in GPS seconds end_time: int The end time in GPS seconds Returns ------- frame_files: list A dictionary containing information about the files that span the requested times. """ data = losc_frame_json(ifo, start_time, end_time)['strain'] return [d['url'] for d in data if d['format'] == 'gwf']
[ "def", "losc_frame_urls", "(", "ifo", ",", "start_time", ",", "end_time", ")", ":", "data", "=", "losc_frame_json", "(", "ifo", ",", "start_time", ",", "end_time", ")", "[", "'strain'", "]", "return", "[", "d", "[", "'url'", "]", "for", "d", "in", "dat...
28.3
20.35
def main(argString=None): """The main function. :param argString: the options. :type argString: list These are the steps of this module: 1. Prints the options. 2. Finds the overlapping markers between the three reference panels and the source panel (:py:func:`findOverlappingSNPsWithReference`). 3. Extract the required markers from all the data sets (:py:func:`extractSNPs`). 4. Renames the reference panel's marker names to that they are the same as the source panel (for all populations) (:py:func:`renameSNPs`). 5. Combines the three reference panels together (:py:func:`combinePlinkBinaryFiles`). 6. Compute the frequency of all the markers from the reference and the source panels (:py:func:`computeFrequency`). 7. Finds the markers to flip in the reference panel (when compared to the source panel) (:py:func:`findFlippedSNPs`). 8. Excludes the unflippable markers from the reference and the source panels (:py:func:`excludeSNPs`). 9. Flips the markers that need flipping in their reference panel (:py:func:`flipSNPs`). 10. Combines the reference and the source panels (:py:func:`combinePlinkBinaryFiles`). 11. Runs part of :py:mod:`pyGenClean.RelatedSamples.find_related_samples` on the combined data set (:py:func:`runRelatedness`). 12. Creates the ``mds`` file from the combined data set and the result of previous step (:py:func:`createMDSFile`). 13. Creates the population file (:py:func:`createPopulationFile`). 14. Plots the ``mds`` values (:py:func:`plotMDS`). 15. Finds the outliers of a given reference population (:py:func:`find_the_outliers`). 16. If required, computes the Eigenvalues using smartpca (:py:func:`compute_eigenvalues`). 17. If required, creates a scree plot from smartpca resutls (:py:func:`create_scree_plot`). """ # Getting and checking the options args = parseArgs(argString) checkArgs(args) logger.info("Options used:") for key, value in vars(args).iteritems(): logger.info(" --{} {}".format(key.replace("_", "-"), value)) newBfile = None popNames = ["CEU", "YRI", "JPT-CHB"] referencePrefixes = [args.ceu_bfile, args.yri_bfile, args.jpt_chb_bfile] if not args.skip_ref_pops: # Find overlap with the reference file logger.info("Finding overlapping SNPs between reference and " "source panels") findOverlappingSNPsWithReference( prefix=args.bfile, referencePrefixes=referencePrefixes, referencePopulations=popNames, outPrefix=args.out, ) # Extract the required SNPs using Plink (reference panels) logger.info("Extracting overlapping SNPs from the reference panels") extractSNPs( snpToExtractFileNames=[ args.out + ".{}_snp_to_extract".format(popName) for popName in popNames ], referencePrefixes=referencePrefixes, popNames=popNames, outPrefix=args.out + ".reference_panel", runSGE=args.sge, options=args, ) # Extract the required SNPs using Plink (source panel) logger.info("Extracting overlapping SNPs from the source panel") extractSNPs( snpToExtractFileNames=[args.out + ".source_snp_to_extract"], referencePrefixes=[args.bfile], popNames=["ALL"], outPrefix=args.out + ".source_panel", runSGE=False, options=args, ) # Renaming the reference file, so that the SNP names are the same for pop in popNames: logger.info("Renaming reference panel's SNPs {} to match source " "panel".format(pop)) renameSNPs( inPrefix=args.out + ".reference_panel.{}".format(pop), updateFileName=args.out + ".{}_update_names".format(pop), outPrefix=args.out + ".reference_panel.{}.rename".format(pop), ) # Combining the reference panel logger.info("Combining the reference panels") combinePlinkBinaryFiles( prefixes=[ args.out + ".reference_panel.{}.rename".format(pop) for pop in popNames ], outPrefix=args.out + ".reference_panel.ALL.rename", ) # Computing the frequency (reference panel) logger.info("Computing reference panel frequencies") computeFrequency( prefix=args.out + ".reference_panel.ALL.rename", outPrefix=args.out + ".reference_panel.ALL.rename.frequency", ) # Computing the frequency (source panel) logger.info("Computing source panel frequencies") computeFrequency( prefix=args.out + ".source_panel.ALL", outPrefix=args.out + ".source_panel.ALL.frequency", ) # Finding the SNPs to flip and flip them in the reference panel logger.info("Finding SNPs to flip or to exclude from reference panel") findFlippedSNPs( frqFile1=args.out + ".reference_panel.ALL.rename.frequency.frq", frqFile2=args.out + ".source_panel.ALL.frequency.frq", outPrefix=args.out, ) # Excluding SNPs (reference panel) logger.info("Excluding SNPs from reference panel") excludeSNPs( inPrefix=args.out + ".reference_panel.ALL.rename", outPrefix=args.out + ".reference_panel.ALL.rename.cleaned", exclusionFileName=args.out + ".snp_to_remove", ) # Excluding SNPs (source panel) logger.info("Excluding SNPs from source panel") excludeSNPs(args.out + ".source_panel.ALL", args.out + ".source_panel.ALL.cleaned", args.out + ".snp_to_remove") # Flipping the SNP that need to be flip in the reference logger.info("Flipping SNPs in reference panel") flipSNPs( inPrefix=args.out + ".reference_panel.ALL.rename.cleaned", outPrefix=args.out + ".reference_panel.ALL.rename.cleaned.flipped", flipFileName=args.out + ".snp_to_flip_in_reference", ) # Combining the reference panel logger.info("Combining reference and source panels") combinePlinkBinaryFiles( prefixes=[args.out + ".reference_panel.ALL.rename.cleaned.flipped", args.out + ".source_panel.ALL.cleaned"], outPrefix=args.out + ".final_dataset_for_genome", ) # Runing the relatedness step logger.info("Creating the genome file using Plink") newBfile = runRelatedness( inputPrefix=args.out + ".final_dataset_for_genome", outPrefix=args.out, options=args, ) else: # Just run relatedness on the dataset newBfile = runRelatedness( inputPrefix=args.bfile, outPrefix=args.out, options=args, ) # Creating the MDS file logger.info("Creating the MDS file using Plink") createMDSFile( nb_components=args.nb_components, inPrefix=newBfile, outPrefix=args.out + ".mds", genomeFileName=args.out + ".ibs.genome.genome", ) if not args.skip_ref_pops: # Creating the population files logger.info("Creating a population file") famFiles = [ args.out + ".reference_panel." + i + ".fam" for i in popNames ] famFiles.append(args.out + ".source_panel.ALL.fam") labels = popNames + ["SOURCE"] createPopulationFile( inputFiles=famFiles, labels=labels, outputFileName=args.out + ".population_file", ) # Plot the MDS value logger.info("Creating the MDS plot") plotMDS( inputFileName=args.out + ".mds.mds", outPrefix=args.out + ".mds", populationFileName=args.out + ".population_file", options=args, ) # Finding the outliers logger.info("Finding the outliers") find_the_outliers( mds_file_name=args.out + ".mds.mds", population_file_name=args.out + ".population_file", ref_pop_name=args.outliers_of, multiplier=args.multiplier, out_prefix=args.out, ) # De we need to create a scree plot? if args.create_scree_plot: # Computing the eigenvalues using smartpca logger.info("Computing eigenvalues") compute_eigenvalues( in_prefix=args.out + ".ibs.pruned_data", out_prefix=args.out + ".smartpca", ) logger.info("Creating scree plot") create_scree_plot( in_filename=args.out + ".smartpca.evec.txt", out_filename=args.out + ".smartpca.scree_plot.png", plot_title=args.scree_plot_title, )
[ "def", "main", "(", "argString", "=", "None", ")", ":", "# Getting and checking the options", "args", "=", "parseArgs", "(", "argString", ")", "checkArgs", "(", "args", ")", "logger", ".", "info", "(", "\"Options used:\"", ")", "for", "key", ",", "value", "i...
37.918803
19.542735
def is_fuse_exec(cmd): ''' Returns true if the command passed is a fuse mountable application. CLI Example: .. code-block:: bash salt '*' mount.is_fuse_exec sshfs ''' cmd_path = salt.utils.path.which(cmd) # No point in running ldd on a command that doesn't exist if not cmd_path: return False elif not salt.utils.path.which('ldd'): raise CommandNotFoundError('ldd') out = __salt__['cmd.run']('ldd {0}'.format(cmd_path), python_shell=False) return 'libfuse' in out
[ "def", "is_fuse_exec", "(", "cmd", ")", ":", "cmd_path", "=", "salt", ".", "utils", ".", "path", ".", "which", "(", "cmd", ")", "# No point in running ldd on a command that doesn't exist", "if", "not", "cmd_path", ":", "return", "False", "elif", "not", "salt", ...
25.85
23.55
def addCallSetFromName(self, sampleName): """ Adds a CallSet for the specified sample name. """ callSet = CallSet(self, sampleName) self.addCallSet(callSet)
[ "def", "addCallSetFromName", "(", "self", ",", "sampleName", ")", ":", "callSet", "=", "CallSet", "(", "self", ",", "sampleName", ")", "self", ".", "addCallSet", "(", "callSet", ")" ]
31.833333
4.166667
def notify(self, *args, **kwargs): "See signal" loop = kwargs.pop('loop', self.loop) return self.signal.prepare_notification( subscribers=self.subscribers, instance=self.instance, loop=loop).run(*args, **kwargs)
[ "def", "notify", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "loop", "=", "kwargs", ".", "pop", "(", "'loop'", ",", "self", ".", "loop", ")", "return", "self", ".", "signal", ".", "prepare_notification", "(", "subscribers", "=",...
42.333333
11
def discover(service="ssdp:all", timeout=1, retries=2, ipAddress="239.255.255.250", port=1900): """Discovers UPnP devices in the local network. Try to discover all devices in the local network which do support UPnP. The discovery process can fail for various reasons and it is recommended to do at least two discoveries, which you can specify with the ``retries`` parameter. The default ``service`` parameter tries to address all devices also if you know which kind of service type you are looking for you should set it as some devices do not respond or respond differently otherwise. :param service: the service type or list of service types of devices you look for :type service: str or list[str] :param float timeout: the socket timeout for each try :param int retries: how often should be a discovery request send :param str ipAddress: the multicast ip address to use :param int port: the port to use :return: a list of DiscoveryResponse objects or empty if no device was found :rtype: list[DiscoveryResponse] Example: :: results = discover() for result in results: print("Host: " + result.locationHost + " Port: " + result.locationPort + " Device definitions: " + \\ result.location) .. seealso:: :class:`~simpletr64.DiscoveryResponse`, :meth:`~simpletr64.Discover.discoverParticularHost` """ socket.setdefaulttimeout(timeout) messages = [] if isinstance(service, str): services = [service] elif isinstance(service, list): services = service for service in services: message = 'M-SEARCH * HTTP/1.1\r\nMX: 5\r\nMAN: "ssdp:discover"\r\nHOST: ' + \ ipAddress + ':' + str(port) + '\r\n' message += "ST: " + service + "\r\n\r\n" messages.append(message) responses = {} for _ in range(retries): # setup the socket sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2) # noinspection PyAssignmentToLoopOrWithParameter for _ in range(2): # send the messages with different service types for message in messages: # send message more often to make sure all devices will get it sock.sendto(message.encode('utf-8'), (ipAddress, port)) while True: try: # read the message until timeout data = sock.recv(1024) except socket.timeout: break else: # no time out, read the response data and create response object response = DiscoveryResponse(data) # filter duplicated responses responses[response.location] = response # return a list of all responses return list(responses.values())
[ "def", "discover", "(", "service", "=", "\"ssdp:all\"", ",", "timeout", "=", "1", ",", "retries", "=", "2", ",", "ipAddress", "=", "\"239.255.255.250\"", ",", "port", "=", "1900", ")", ":", "socket", ".", "setdefaulttimeout", "(", "timeout", ")", "messages...
40.653846
26.371795
def sub_dsp_nodes(self): """ Returns all sub-dispatcher nodes of the dispatcher. :return: All sub-dispatcher nodes of the dispatcher. :rtype: dict[str, dict] """ return {k: v for k, v in self.nodes.items() if v['type'] == 'dispatcher'}
[ "def", "sub_dsp_nodes", "(", "self", ")", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "nodes", ".", "items", "(", ")", "if", "v", "[", "'type'", "]", "==", "'dispatcher'", "}" ]
27.545455
16.272727
def read_frames(file_path, frame_size, hop_size, start=0.0, end=float('inf'), buffer_size=5760000): """ Read an audio file frame by frame. The frames are yielded one after another. Args: file_path (str): Path to the file to read. frame_size (int): The number of samples per frame. hop_size (int): The number of samples between two frames. start (float): Start in seconds to read from. end (float): End in seconds to read to. ``inf`` means to the end of the file. buffer_size (int): Number of samples to load into memory at once and return as a single block. The exact number of loaded samples depends on the block-size of the audioread library. So it can be of x higher, where the x is typically 1024 or 4096. Returns: Generator: A generator yielding a tuple for every frame. The first item is the frame and the second a boolean indicating if it is the last frame. """ rest_samples = np.array([], dtype=np.float32) for block in read_blocks(file_path, start=start, end=end, buffer_size=buffer_size): # Prepend rest samples from previous block block = np.concatenate([rest_samples, block]) current_sample = 0 # Get frames that are fully contained in the block while current_sample + frame_size < block.size: frame = block[current_sample:current_sample + frame_size] yield frame, False current_sample += hop_size # Store rest samples for next block rest_samples = block[current_sample:] if rest_samples.size > 0: rest_samples = np.pad( rest_samples, (0, frame_size - rest_samples.size), mode='constant', constant_values=0 ) yield rest_samples, True
[ "def", "read_frames", "(", "file_path", ",", "frame_size", ",", "hop_size", ",", "start", "=", "0.0", ",", "end", "=", "float", "(", "'inf'", ")", ",", "buffer_size", "=", "5760000", ")", ":", "rest_samples", "=", "np", ".", "array", "(", "[", "]", "...
39.142857
21.061224
def receive_accept(self, msg): ''' Returns either an Accepted or Nack message in response. The Acceptor's state must be persisted to disk prior to transmitting the Accepted message. ''' if self.promised_id is None or msg.proposal_id >= self.promised_id: self.promised_id = msg.proposal_id self.accepted_id = msg.proposal_id self.accepted_value = msg.proposal_value return Accepted(self.network_uid, msg.proposal_id, msg.proposal_value) else: return Nack(self.network_uid, msg.from_uid, msg.proposal_id, self.promised_id)
[ "def", "receive_accept", "(", "self", ",", "msg", ")", ":", "if", "self", ".", "promised_id", "is", "None", "or", "msg", ".", "proposal_id", ">=", "self", ".", "promised_id", ":", "self", ".", "promised_id", "=", "msg", ".", "proposal_id", "self", ".", ...
51.416667
27.25
def get_repo_info(repo_name, profile='github', ignore_cache=False): ''' Return information for a given repo. .. versionadded:: 2016.11.0 repo_name The name of the repository. profile The name of the profile configuration to use. Defaults to ``github``. CLI Example: .. code-block:: bash salt myminion github.get_repo_info salt salt myminion github.get_repo_info salt profile='my-github-profile' ''' org_name = _get_config_value(profile, 'org_name') key = "github.{0}:{1}:repo_info".format( _get_config_value(profile, 'org_name'), repo_name.lower() ) if key not in __context__ or ignore_cache: client = _get_client(profile) try: repo = client.get_repo('/'.join([org_name, repo_name])) if not repo: return {} # client.get_repo can return a github.Repository.Repository object, # even if the repo is invalid. We need to catch the exception when # we try to perform actions on the repo object, rather than above # the if statement. ret = _repo_to_dict(repo) __context__[key] = ret except github.UnknownObjectException: raise CommandExecutionError( 'The \'{0}\' repository under the \'{1}\' organization could not ' 'be found.'.format( repo_name, org_name ) ) return __context__[key]
[ "def", "get_repo_info", "(", "repo_name", ",", "profile", "=", "'github'", ",", "ignore_cache", "=", "False", ")", ":", "org_name", "=", "_get_config_value", "(", "profile", ",", "'org_name'", ")", "key", "=", "\"github.{0}:{1}:repo_info\"", ".", "format", "(", ...
30.367347
22.857143
def verify(self, signing_cert_str, cert_str): """ Verifies if a certificate is valid and signed by a given certificate. :param signing_cert_str: This certificate will be used to verify the signature. Must be a string representation of the certificate. If you only have a file use the method read_str_from_file to get a string representation. :param cert_str: This certificate will be verified if it is correct. Must be a string representation of the certificate. If you only have a file use the method read_str_from_file to get a string representation. :return: Valid, Message Valid = True if the certificate is valid, otherwise false. Message = Why the validation failed. """ try: ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM, signing_cert_str) cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str) if self.certificate_not_valid_yet(ca_cert): return False, "CA certificate is not valid yet." if ca_cert.has_expired() == 1: return False, "CA certificate is expired." if cert.has_expired() == 1: return False, "The signed certificate is expired." if self.certificate_not_valid_yet(cert): return False, "The signed certificate is not valid yet." if ca_cert.get_subject().CN == cert.get_subject().CN: return False, ("CN may not be equal for CA certificate and the " "signed certificate.") cert_algorithm = cert.get_signature_algorithm() if six.PY3: cert_algorithm = cert_algorithm.decode('ascii') cert_str = cert_str.encode('ascii') cert_crypto = saml2.cryptography.pki.load_pem_x509_certificate( cert_str) try: crypto.verify(ca_cert, cert_crypto.signature, cert_crypto.tbs_certificate_bytes, cert_algorithm) return True, "Signed certificate is valid and correctly signed by CA certificate." except crypto.Error as e: return False, "Certificate is incorrectly signed." except Exception as e: return False, "Certificate is not valid for an unknown reason. %s" % str(e)
[ "def", "verify", "(", "self", ",", "signing_cert_str", ",", "cert_str", ")", ":", "try", ":", "ca_cert", "=", "crypto", ".", "load_certificate", "(", "crypto", ".", "FILETYPE_PEM", ",", "signing_cert_str", ")", "cert", "=", "crypto", ".", "load_certificate", ...
48.421053
25.368421
def get_asset_admin_session(self, proxy=None): """Gets an asset administration session for creating, updating and deleting assets. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetAdminSession) - an ``AssetAdminSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_asset_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_admin()`` is ``true``.* """ asset_lookup_session = self._provider_manager.get_asset_lookup_session(proxy) return AssetAdminSession( self._provider_manager.get_asset_admin_session(proxy), self._config_map, asset_lookup_session)
[ "def", "get_asset_admin_session", "(", "self", ",", "proxy", "=", "None", ")", ":", "asset_lookup_session", "=", "self", ".", "_provider_manager", ".", "get_asset_lookup_session", "(", "proxy", ")", "return", "AssetAdminSession", "(", "self", ".", "_provider_manager...
46.333333
17.111111
def import_locations(self, data): """Parse `GNU miscfiles`_ cities data files. ``import_locations()`` returns a list containing :class:`City` objects. It expects data files in the same format that `GNU miscfiles`_ provides, that is:: ID : 1 Type : City Population : 210700 Size : Name : Aberdeen Country : UK Region : Scotland Location : Earth Longitude : -2.083 Latitude : 57.150 Elevation : Date : 19961206 Entered-By : Rob.Hooft@EMBL-Heidelberg.DE // ID : 2 Type : City Population : 1950000 Size : Name : Abidjan Country : Ivory Coast Region : Location : Earth Longitude : -3.867 Latitude : 5.333 Elevation : Date : 19961206 Entered-By : Rob.Hooft@EMBL-Heidelberg.DE When processed by ``import_locations()`` will return ``list`` object in the following style:: [City(1, "City", 210700, None, "Aberdeen", "UK", "Scotland", "Earth", -2.083, 57.15, None, (1996, 12, 6, 0, 0, 0, 4, 341, -1), "Rob.Hooft@EMBL-Heidelberg.DE"), City(2, "City", 1950000, None, "Abidjan", "Ivory Coast", "", "Earth", -3.867, 5.333, None, (1996, 12, 6, 0, 0, 0, 4, 341, -1), "Rob.Hooft@EMBL-Heidelberg.DE")]) Args: data (iter): :abbr:`NOAA (National Oceanographic and Atmospheric Administration)` station data to read Returns: list: Places as ``City`` objects Raises: TypeError: Invalid value for data .. _GNU miscfiles: http://directory.fsf.org/project/miscfiles/ """ self._data = data if hasattr(data, 'read'): data = data.read().split('//\n') elif isinstance(data, list): pass elif isinstance(data, basestring): data = open(data).read().split('//\n') else: raise TypeError('Unable to handle data of type %r' % type(data)) keys = ('identifier', 'ptype', 'population', 'size', 'name', 'country', 'region', 'location', 'longitude', 'latitude', 'altitude', 'date', 'entered') for record in data: # We truncate after splitting because the v1.4.2 datafile contains # a broken separator between 229 and 230 that would otherwise break # the import data = [i.split(':')[1].strip() for i in record.splitlines()[:13]] entries = dict(zip(keys, data)) # Entry for Utrecht has the incorrect value of 0.000 for elevation. if entries['altitude'] == '0.000': logging.debug("Ignoring `0.000' value for elevation in %r " 'entry' % record) entries['altitude'] = '' for i in ('identifier', 'population', 'size', 'altitude'): entries[i] = int(entries[i]) if entries[i] else None for i in ('longitude', 'latitude'): entries[i] = float(entries[i]) if entries[i] else None entries['date'] = time.strptime(entries['date'], '%Y%m%d') self.append(City(**entries))
[ "def", "import_locations", "(", "self", ",", "data", ")", ":", "self", ".", "_data", "=", "data", "if", "hasattr", "(", "data", ",", "'read'", ")", ":", "data", "=", "data", ".", "read", "(", ")", ".", "split", "(", "'//\\n'", ")", "elif", "isinsta...
38.733333
20.077778
def import_phantom_module(xml_file): """ Insert a fake Python module to sys.modules, based on a XML file. The XML file is expected to conform to Pydocweb DTD. The fake module will contain dummy objects, which guarantee the following: - Docstrings are correct. - Class inheritance relationships are correct (if present in XML). - Function argspec is *NOT* correct (even if present in XML). Instead, the function signature is prepended to the function docstring. - Class attributes are *NOT* correct; instead, they are dummy objects. Parameters ---------- xml_file : str Name of an XML file to read """ import lxml.etree as etree object_cache = {} tree = etree.parse(xml_file) root = tree.getroot() # Sort items so that # - Base classes come before classes inherited from them # - Modules come before their contents all_nodes = dict([(n.attrib['id'], n) for n in root]) def _get_bases(node, recurse=False): bases = [x.attrib['ref'] for x in node.findall('base')] if recurse: j = 0 while True: try: b = bases[j] except IndexError: break if b in all_nodes: bases.extend(_get_bases(all_nodes[b])) j += 1 return bases type_index = ['module', 'class', 'callable', 'object'] def base_cmp(a, b): x = cmp(type_index.index(a.tag), type_index.index(b.tag)) if x != 0: return x if a.tag == 'class' and b.tag == 'class': a_bases = _get_bases(a, recurse=True) b_bases = _get_bases(b, recurse=True) x = cmp(len(a_bases), len(b_bases)) if x != 0: return x if a.attrib['id'] in b_bases: return -1 if b.attrib['id'] in a_bases: return 1 return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) nodes = root.getchildren() nodes.sort(base_cmp) # Create phantom items for node in nodes: name = node.attrib['id'] doc = (node.text or '').decode('string-escape') + "\n" if doc == "\n": doc = "" # create parent, if missing parent = name while True: parent = '.'.join(parent.split('.')[:-1]) if not parent: break if parent in object_cache: break obj = imp.new_module(parent) object_cache[parent] = obj sys.modules[parent] = obj # create object if node.tag == 'module': obj = imp.new_module(name) obj.__doc__ = doc sys.modules[name] = obj elif node.tag == 'class': bases = [object_cache[b] for b in _get_bases(node) if b in object_cache] bases.append(object) init = lambda self: None init.__doc__ = doc obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) obj.__name__ = name.split('.')[-1] elif node.tag == 'callable': funcname = node.attrib['id'].split('.')[-1] argspec = node.attrib.get('argspec') if argspec: argspec = re.sub('^[^(]*', '', argspec) doc = "%s%s\n\n%s" % (funcname, argspec, doc) obj = lambda: 0 obj.__argspec_is_invalid_ = True if sys.version_info[0] >= 3: obj.__name__ = funcname else: obj.__name__ = funcname obj.__name__ = name obj.__doc__ = doc if inspect.isclass(object_cache[parent]): obj.__objclass__ = object_cache[parent] else: class Dummy(object): pass obj = Dummy() obj.__name__ = name obj.__doc__ = doc if inspect.isclass(object_cache[parent]): obj.__get__ = lambda: None object_cache[name] = obj if parent: if inspect.ismodule(object_cache[parent]): obj.__module__ = parent setattr(object_cache[parent], name.split('.')[-1], obj) # Populate items for node in root: obj = object_cache.get(node.attrib['id']) if obj is None: continue for ref in node.findall('ref'): if node.tag == 'class': if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): setattr(obj, ref.attrib['name'], object_cache.get(ref.attrib['ref'])) else: setattr(obj, ref.attrib['name'], object_cache.get(ref.attrib['ref']))
[ "def", "import_phantom_module", "(", "xml_file", ")", ":", "import", "lxml", ".", "etree", "as", "etree", "object_cache", "=", "{", "}", "tree", "=", "etree", ".", "parse", "(", "xml_file", ")", "root", "=", "tree", ".", "getroot", "(", ")", "# Sort item...
34.231343
16.947761
def _grid_in_property(field_name, docstring, read_only=False, closed_only=False): """Create a GridIn property.""" def getter(self): if closed_only and not self._closed: raise AttributeError("can only get %r on a closed file" % field_name) # Protect against PHP-237 if field_name == 'length': return self._file.get(field_name, 0) return self._file.get(field_name, None) def setter(self, value): if self._closed: self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) self._file[field_name] = value if read_only: docstring += "\n\nThis attribute is read-only." elif closed_only: docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and " "can only be read after :meth:`close` " "has been called.") if not read_only and not closed_only: return property(getter, setter, doc=docstring) return property(getter, doc=docstring)
[ "def", "_grid_in_property", "(", "field_name", ",", "docstring", ",", "read_only", "=", "False", ",", "closed_only", "=", "False", ")", ":", "def", "getter", "(", "self", ")", ":", "if", "closed_only", "and", "not", "self", ".", "_closed", ":", "raise", ...
40.678571
16.857143
def advance_permutation(a, increasing=True, forward=True): """ Advance a list of unique, ordered elements in-place, lexicographically increasing or backward, by rightmost or leftmost digit. Returns False if the permutation wrapped around - i.e. went from lexicographically greatest to least, and True in all other cases. If the length of the list is N, then this function will repeat values after N! steps, and will return False exactly once. See also https://stackoverflow.com/a/34325140/43839 """ if not forward: a.reverse() cmp = operator.lt if increasing else operator.gt try: i = next(i for i in reversed(range(len(a) - 1)) if cmp(a[i], a[i + 1])) j = next(j for j in reversed(range(i + 1, len(a))) if cmp(a[i], a[j])) except StopIteration: # This is the lexicographically last permutation. if forward: a.reverse() return False a[i], a[j] = a[j], a[i] a[i + 1:] = reversed(a[i + 1:]) if not forward: a.reverse() return True
[ "def", "advance_permutation", "(", "a", ",", "increasing", "=", "True", ",", "forward", "=", "True", ")", ":", "if", "not", "forward", ":", "a", ".", "reverse", "(", ")", "cmp", "=", "operator", ".", "lt", "if", "increasing", "else", "operator", ".", ...
31.484848
24.515152
def _get_record_attrs(out_keys): """Check for records, a single key plus output attributes. """ if len(out_keys) == 1: attr = list(out_keys.keys())[0] if out_keys[attr]: return attr, out_keys[attr] return None, None
[ "def", "_get_record_attrs", "(", "out_keys", ")", ":", "if", "len", "(", "out_keys", ")", "==", "1", ":", "attr", "=", "list", "(", "out_keys", ".", "keys", "(", ")", ")", "[", "0", "]", "if", "out_keys", "[", "attr", "]", ":", "return", "attr", ...
31.5
7.125
def clean(self): """Delete all of the records""" # Deleting seems to be really weird and unrelable. self._session \ .query(Process) \ .filter(Process.d_vid == self._d_vid) \ .delete(synchronize_session='fetch') for r in self.records: self._session.delete(r) self._session.commit()
[ "def", "clean", "(", "self", ")", ":", "# Deleting seems to be really weird and unrelable.", "self", ".", "_session", ".", "query", "(", "Process", ")", ".", "filter", "(", "Process", ".", "d_vid", "==", "self", ".", "_d_vid", ")", ".", "delete", "(", "synch...
27.615385
18
def json_changebase(obj, changer): """ Given a primitive compound Python object (i.e. a dict, string, int, or list) and a changer function that takes a primitive Python object as an argument, apply the changer function to the object and each sub-component. Return the newly-reencoded object. """ if isinstance(obj, (str, unicode)): return changer(obj) elif isinstance(obj, (int, long)) or obj is None: return obj elif isinstance(obj, list): return [json_changebase(x, changer) for x in obj] elif isinstance(obj, dict): return dict((x, json_changebase(obj[x], changer)) for x in obj) else: raise ValueError("Invalid object")
[ "def", "json_changebase", "(", "obj", ",", "changer", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "str", ",", "unicode", ")", ")", ":", "return", "changer", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "(", "int", ",", "long", ")...
28.916667
19.416667
def display_element_selected(self, f): """Decorator routes Alexa Display.ElementSelected request to the wrapped view function. @ask.display_element_selected def eval_element(): return "", 200 The wrapped function is registered as the display_element_selected view function and renders the response for requests. Arguments: f {function} -- display_element_selected view function """ self._display_element_selected_func = f @wraps(f) def wrapper(*args, **kw): self._flask_view_func(*args, **kw) return f
[ "def", "display_element_selected", "(", "self", ",", "f", ")", ":", "self", ".", "_display_element_selected_func", "=", "f", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "self", ".", "_flask_view_func", ...
32.157895
19
def merge(self, other): """ Merge another object as needed. """ other.qualify() for n in ('name', 'qname', 'min', 'max', 'default', 'type', 'nillable', 'form_qualified',): if getattr(self, n) is not None: continue v = getattr(other, n) if v is None: continue setattr(self, n, v)
[ "def", "merge", "(", "self", ",", "other", ")", ":", "other", ".", "qualify", "(", ")", "for", "n", "in", "(", "'name'", ",", "'qname'", ",", "'min'", ",", "'max'", ",", "'default'", ",", "'type'", ",", "'nillable'", ",", "'form_qualified'", ",", ")"...
26.631579
10.736842
def write_file(self, what, filename, data): """Write `data` to `filename` (if not a dry run) after announcing it `what` is used in a log message to identify what is being written to the file. """ log.info("writing %s to %s", what, filename) if sys.version_info >= (3,): data = data.encode("utf-8") if not self.dry_run: f = open(filename, 'wb') f.write(data) f.close()
[ "def", "write_file", "(", "self", ",", "what", ",", "filename", ",", "data", ")", ":", "log", ".", "info", "(", "\"writing %s to %s\"", ",", "what", ",", "filename", ")", "if", "sys", ".", "version_info", ">=", "(", "3", ",", ")", ":", "data", "=", ...
35.384615
12.538462
def make_iml4(R, iml_disagg, imtls=None, poes_disagg=(None,), curves=()): """ :returns: an ArrayWrapper over a 4D array of shape (N, R, M, P) """ if imtls is None: imtls = {imt: [iml] for imt, iml in iml_disagg.items()} N = len(curves) or 1 M = len(imtls) P = len(poes_disagg) arr = numpy.zeros((N, R, M, P)) imts = [from_string(imt) for imt in imtls] for m, imt in enumerate(imtls): imls = imtls[imt] for p, poe in enumerate(poes_disagg): for r in range(R): arr[:, r, m, p] = _imls(curves, poe, imt, imls, r) return ArrayWrapper(arr, dict(poes_disagg=poes_disagg, imts=imts))
[ "def", "make_iml4", "(", "R", ",", "iml_disagg", ",", "imtls", "=", "None", ",", "poes_disagg", "=", "(", "None", ",", ")", ",", "curves", "=", "(", ")", ")", ":", "if", "imtls", "is", "None", ":", "imtls", "=", "{", "imt", ":", "[", "iml", "]"...
38.588235
15.176471
def gen_data_files(src_dir): """ generates a list of files contained in the given directory (and its subdirectories) in the format required by the ``package_data`` parameter of the ``setuptools.setup`` function. Parameters ---------- src_dir : str (relative) path to the directory structure containing the files to be included in the package distribution Returns ------- fpaths : list(str) a list of file paths """ fpaths = [] base = os.path.dirname(src_dir) for root, dir, files in os.walk(src_dir): if len(files) != 0: for f in files: fpaths.append(os.path.relpath(os.path.join(root, f), base)) return fpaths
[ "def", "gen_data_files", "(", "src_dir", ")", ":", "fpaths", "=", "[", "]", "base", "=", "os", ".", "path", ".", "dirname", "(", "src_dir", ")", "for", "root", ",", "dir", ",", "files", "in", "os", ".", "walk", "(", "src_dir", ")", ":", "if", "le...
29.541667
20.125
def make_connector(self, app=None, bind=None): """Creates the connector for a given state and bind.""" return _EngineConnector(self, self.get_app(app), bind)
[ "def", "make_connector", "(", "self", ",", "app", "=", "None", ",", "bind", "=", "None", ")", ":", "return", "_EngineConnector", "(", "self", ",", "self", ".", "get_app", "(", "app", ")", ",", "bind", ")" ]
57
9.333333
def autodiscover(): """Import all `ddp` submodules from `settings.INSTALLED_APPS`.""" from django.utils.module_loading import autodiscover_modules from dddp.api import API autodiscover_modules('ddp', register_to=API) return API
[ "def", "autodiscover", "(", ")", ":", "from", "django", ".", "utils", ".", "module_loading", "import", "autodiscover_modules", "from", "dddp", ".", "api", "import", "API", "autodiscover_modules", "(", "'ddp'", ",", "register_to", "=", "API", ")", "return", "AP...
40.333333
15.166667
def compress_subproperties (properties): """ Combine all subproperties into their parent properties Requires: for every subproperty, there is a parent property. All features are explicitly expressed. This rule probably shouldn't be needed, but build-request.expand-no-defaults is being abused for unintended purposes and it needs help """ from .property import Property assert is_iterable_typed(properties, Property) result = [] matched_subs = set() all_subs = set() for p in properties: f = p.feature if not f.subfeature: subs = [x for x in properties if is_subfeature_of(p, x.feature)] if subs: matched_subs.update(subs) subvalues = '-'.join (sub.value for sub in subs) result.append(Property( p.feature, p.value + '-' + subvalues, p.condition)) else: result.append(p) else: all_subs.add(p) # TODO: this variables are used just for debugging. What's the overhead? assert all_subs == matched_subs return result
[ "def", "compress_subproperties", "(", "properties", ")", ":", "from", ".", "property", "import", "Property", "assert", "is_iterable_typed", "(", "properties", ",", "Property", ")", "result", "=", "[", "]", "matched_subs", "=", "set", "(", ")", "all_subs", "=",...
30.078947
20.210526
def to_json(self): """ Represented as a list of edges: dependent: index of child dep: dependency label governer: index of parent dependentgloss: gloss of parent governergloss: gloss of parent """ edges = [] for root in self.roots: edges.append({ 'governer': 0, 'dep': "root", 'dependent': root+1, 'governergloss': "root", 'dependentgloss': self.sentence[root].word, }) for gov, dependents in self.graph.items(): for dependent, dep in dependents: edges.append({ 'governer': gov+1, 'dep': dep, 'dependent': dependent+1, 'governergloss': self.sentence[gov].word, 'dependentgloss': self.sentence[dependent].word, }) return edges
[ "def", "to_json", "(", "self", ")", ":", "edges", "=", "[", "]", "for", "root", "in", "self", ".", "roots", ":", "edges", ".", "append", "(", "{", "'governer'", ":", "0", ",", "'dep'", ":", "\"root\"", ",", "'dependent'", ":", "root", "+", "1", "...
33.413793
11
def add_db_germline_flag(line): """Adds a DB flag for Germline filters, allowing downstream compatibility with PureCN. """ if line.startswith("#CHROM"): headers = ['##INFO=<ID=DB,Number=0,Type=Flag,Description="Likely germline variant">'] return "\n".join(headers) + "\n" + line elif line.startswith("#"): return line else: parts = line.split("\t") if parts[7].find("STATUS=Germline") >= 0: parts[7] += ";DB" return "\t".join(parts)
[ "def", "add_db_germline_flag", "(", "line", ")", ":", "if", "line", ".", "startswith", "(", "\"#CHROM\"", ")", ":", "headers", "=", "[", "'##INFO=<ID=DB,Number=0,Type=Flag,Description=\"Likely germline variant\">'", "]", "return", "\"\\n\"", ".", "join", "(", "headers...
38.461538
13.461538
def main(): '''Base58 encode or decode FILE, or standard input, to standard output.''' import sys import argparse stdout = buffer(sys.stdout) parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( 'file', metavar='FILE', nargs='?', type=argparse.FileType('r'), default='-') parser.add_argument( '-d', '--decode', action='store_true', help='decode data') parser.add_argument( '-c', '--check', action='store_true', help='append a checksum before encoding') args = parser.parse_args() fun = { (False, False): b58encode, (False, True): b58encode_check, (True, False): b58decode, (True, True): b58decode_check }[(args.decode, args.check)] data = buffer(args.file).read().rstrip(b'\n') try: result = fun(data) except Exception as e: sys.exit(e) if not isinstance(result, bytes): result = result.encode('ascii') stdout.write(result)
[ "def", "main", "(", ")", ":", "import", "sys", "import", "argparse", "stdout", "=", "buffer", "(", "sys", ".", "stdout", ")", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "main", ".", "__doc__", ")", "parser", ".", "add_argu...
24.214286
20.738095
def getNextNode(nodes,usednodes,parent): '''Get next node in a breadth-first traversal of nodes that have not been used yet''' for e in edges: if e.source==parent: if e.target in usednodes: x = e.target break elif e.target==parent: if e.source in usednoes: x = e.source break return x
[ "def", "getNextNode", "(", "nodes", ",", "usednodes", ",", "parent", ")", ":", "for", "e", "in", "edges", ":", "if", "e", ".", "source", "==", "parent", ":", "if", "e", ".", "target", "in", "usednodes", ":", "x", "=", "e", ".", "target", "break", ...
32.583333
15.583333
def register_on_machine_data_changed(self, callback): """Set the callback function to consume on machine data changed events. Callback receives a IMachineDataChangedEvent object. Returns the callback_id """ event_type = library.VBoxEventType.on_machine_data_changed return self.event_source.register_callback(callback, event_type)
[ "def", "register_on_machine_data_changed", "(", "self", ",", "callback", ")", ":", "event_type", "=", "library", ".", "VBoxEventType", ".", "on_machine_data_changed", "return", "self", ".", "event_source", ".", "register_callback", "(", "callback", ",", "event_type", ...
41.333333
20
def doublefork(pidfile, logfile, cwd, umask): # pragma: nocover '''Daemonize current process. After first fork we return to the shell and removing our self from controling terminal via `setsid`. After second fork we are not session leader any more and cant get controlling terminal when opening files.''' try: if os.fork(): os._exit(os.EX_OK) except OSError as e: sys.exit('fork #1 failed: ({}) {}'.format(e.errno, e.strerror)) os.setsid() os.chdir(cwd) os.umask(umask) try: if os.fork(): os._exit(os.EX_OK) except OSError as e: sys.exit('fork #2 failed: ({}) {}'.format(e.errno, e.strerror)) if logfile is not None: si = open('/dev/null') if six.PY2: so = open(logfile, 'a+', 0) else: so = io.open(logfile, 'ab+', 0) so = io.TextIOWrapper(so, write_through=True, encoding="utf-8") os.dup2(si.fileno(), 0) os.dup2(so.fileno(), 1) os.dup2(so.fileno(), 2) sys.stdin = si sys.stdout = sys.stderr = so with open(pidfile, 'w') as f: f.write(str(os.getpid()))
[ "def", "doublefork", "(", "pidfile", ",", "logfile", ",", "cwd", ",", "umask", ")", ":", "# pragma: nocover", "try", ":", "if", "os", ".", "fork", "(", ")", ":", "os", ".", "_exit", "(", "os", ".", "EX_OK", ")", "except", "OSError", "as", "e", ":",...
33.558824
17.558824
def EndVector(self, vectorNumElems): """EndVector writes data necessary to finish vector construction.""" self.assertNested() ## @cond FLATBUFFERS_INTERNAL self.nested = False ## @endcond # we already made space for this, so write without PrependUint32 self.PlaceUOffsetT(vectorNumElems) return self.Offset()
[ "def", "EndVector", "(", "self", ",", "vectorNumElems", ")", ":", "self", ".", "assertNested", "(", ")", "## @cond FLATBUFFERS_INTERNAL", "self", ".", "nested", "=", "False", "## @endcond", "# we already made space for this, so write without PrependUint32", "self", ".", ...
36.4
14
def cache_last_modified(request, *argz, **kwz): '''Last modification date for a cached page. Intended for usage in conditional views (@condition decorator).''' response, site, cachekey = kwz.get('_view_data') or initview(request) if not response: return None return response[1]
[ "def", "cache_last_modified", "(", "request", ",", "*", "argz", ",", "*", "*", "kwz", ")", ":", "response", ",", "site", ",", "cachekey", "=", "kwz", ".", "get", "(", "'_view_data'", ")", "or", "initview", "(", "request", ")", "if", "not", "response", ...
46.333333
17
def _sample_points(X, centers, oversampling_factor, random_state): r""" Sample points independently with probability .. math:: p_x = \frac{\ell \cdot d^2(x, \mathcal{C})}{\phi_X(\mathcal{C})} """ # re-implement evaluate_cost here, to avoid redundant computation distances = pairwise_distances(X, centers).min(1) ** 2 denom = distances.sum() p = oversampling_factor * distances / denom draws = random_state.uniform(size=len(p), chunks=p.chunks) picked = p > draws new_idxs, = da.where(picked) return new_idxs
[ "def", "_sample_points", "(", "X", ",", "centers", ",", "oversampling_factor", ",", "random_state", ")", ":", "# re-implement evaluate_cost here, to avoid redundant computation", "distances", "=", "pairwise_distances", "(", "X", ",", "centers", ")", ".", "min", "(", "...
27.45
23.4
def _parse_process_name(name_str): """Parses the process string and returns the process name and its directives Process strings my contain directive information with the following syntax:: proc_name={'directive':'val'} This method parses this string and returns the process name as a string and the directives information as a dictionary. Parameters ---------- name_str : str Raw string with process name and, potentially, directive information Returns ------- str Process name dict or None Process directives """ directives = None fields = name_str.split("=") process_name = fields[0] if len(fields) == 2: _directives = fields[1].replace("'", '"') try: directives = json.loads(_directives) except json.decoder.JSONDecodeError: raise eh.ProcessError( "Could not parse directives for process '{}'. The raw" " string is: {}\n" "Possible causes include:\n" "\t1. Spaces inside directives\n" "\t2. Missing '=' symbol before directives\n" "\t3. Missing quotes (' or \") around directives\n" "A valid example: process_name={{'cpus':'2'}}".format( process_name, name_str)) return process_name, directives
[ "def", "_parse_process_name", "(", "name_str", ")", ":", "directives", "=", "None", "fields", "=", "name_str", ".", "split", "(", "\"=\"", ")", "process_name", "=", "fields", "[", "0", "]", "if", "len", "(", "fields", ")", "==", "2", ":", "_directives", ...
32
21.021277
def logparse(*args, **kwargs): """ Parse access log on the terminal application. If list of files are given, parse each file. Otherwise, parse standard input. :param args: supporting functions after processed raw log line :type: list of callables :rtype: tuple of (statistics, key/value report) """ from clitool.cli import clistream from clitool.processor import SimpleDictReporter lst = [parse] + args reporter = SimpleDictReporter() stats = clistream(reporter, *lst, **kwargs) return stats, reporter.report()
[ "def", "logparse", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "clitool", ".", "cli", "import", "clistream", "from", "clitool", ".", "processor", "import", "SimpleDictReporter", "lst", "=", "[", "parse", "]", "+", "args", "reporter", "="...
34.3125
15.6875
def simple_callable(decorator): '''Decorator used to create consistent decorators. Consistent in the meaning that the wrapper do not have to care if the wrapped callable is a function or a method, it will always receive a valid callable. If the decorator is used with a function, the wrapper will receive the function itself, but if the decorator is used with a method, the wrapper will receive a bound method callable directly and the first argument (self) will be removed. This allows writing decorators behaving consistently with function and method. Note that when using reflect or annotate module functions, depth should be incremented by one. Example:: @decorator.simple_callable def mydecorator(original_function): def wrapper(callable, call, arguments): # processing return callable(call, arguments) return wrapper @mydecorator def myfunction(): pass ''' def meta_decorator(function): wrapper = decorator(function) if reflect.inside_class_definition(depth=2): def method_wrapper(*args, **kwargs): obj, args = args[0], args[1:] method = types.MethodType(function, obj, obj.__class__) return wrapper(method, *args, **kwargs) meta_wrapper = _function_mimicry(function, method_wrapper) else: def function_wrapper(*args, **kwargs): return wrapper(function, *args, **kwargs) meta_wrapper = _function_mimicry(function, function_wrapper) return meta_wrapper return meta_decorator
[ "def", "simple_callable", "(", "decorator", ")", ":", "def", "meta_decorator", "(", "function", ")", ":", "wrapper", "=", "decorator", "(", "function", ")", "if", "reflect", ".", "inside_class_definition", "(", "depth", "=", "2", ")", ":", "def", "method_wra...
30
23.345455
def receive(self): """ Returns a single request. Takes the first request from the list of requests and returns it. If the list is empty, None is returned. Returns: Response: If a new request is available a Request object is returned, otherwise None is returned. """ pickled_request = self._connection.connection.lpop(self._request_key) return pickle.loads(pickled_request) if pickled_request is not None else None
[ "def", "receive", "(", "self", ")", ":", "pickled_request", "=", "self", ".", "_connection", ".", "connection", ".", "lpop", "(", "self", ".", "_request_key", ")", "return", "pickle", ".", "loads", "(", "pickled_request", ")", "if", "pickled_request", "is", ...
41.166667
25.666667
def members(self): """ Children of the collection's item :rtype: [Collection] """ return list( [ self.children_class(child) for child in self.graph.subjects(RDF_NAMESPACES.DTS.parent, self.asNode()) ] )
[ "def", "members", "(", "self", ")", ":", "return", "list", "(", "[", "self", ".", "children_class", "(", "child", ")", "for", "child", "in", "self", ".", "graph", ".", "subjects", "(", "RDF_NAMESPACES", ".", "DTS", ".", "parent", ",", "self", ".", "a...
26.272727
21
def CaptureVariableInternal(self, value, depth, limits, can_enqueue=True): """Captures a single nameless object into Variable message. TODO(vlif): safely evaluate iterable types. TODO(vlif): safely call str(value) Args: value: data to capture depth: nested depth of dictionaries and vectors so far. limits: Per-object limits for capturing variable data. can_enqueue: allows referencing the object in variables table. Returns: Formatted captured data as per Variable proto. """ if depth == limits.max_depth: return {'varTableIndex': 0} # Buffer full. if value is None: self._total_size += 4 return {'value': 'None'} if isinstance(value, _PRIMITIVE_TYPES): r = _TrimString(repr(value), # Primitive type, always immutable. min(limits.max_value_len, self.max_size - self._total_size)) self._total_size += len(r) return {'value': r, 'type': type(value).__name__} if isinstance(value, _DATE_TYPES): r = str(value) # Safe to call str(). self._total_size += len(r) return {'value': r, 'type': 'datetime.'+ type(value).__name__} if isinstance(value, dict): # Do not use iteritems() here. If GC happens during iteration (which it # often can for dictionaries containing large variables), you will get a # RunTimeError exception. items = [(repr(k), v) for (k, v) in value.items()] return {'members': self.CaptureVariablesList(items, depth + 1, EMPTY_DICTIONARY, limits), 'type': 'dict'} if isinstance(value, _VECTOR_TYPES): fields = self.CaptureVariablesList( (('[%d]' % i, x) for i, x in enumerate(value)), depth + 1, EMPTY_COLLECTION, limits) return {'members': fields, 'type': type(value).__name__} if isinstance(value, types.FunctionType): self._total_size += len(value.__name__) # TODO(vlif): set value to func_name and type to 'function' return {'value': 'function ' + value.__name__} if isinstance(value, Exception): fields = self.CaptureVariablesList( (('[%d]' % i, x) for i, x in enumerate(value.args)), depth + 1, EMPTY_COLLECTION, limits) return {'members': fields, 'type': type(value).__name__} if can_enqueue: index = self._var_table_index.get(id(value)) if index is None: index = len(self._var_table) self._var_table_index[id(value)] = index self._var_table.append(value) self._total_size += 4 # number of characters to accommodate a number. return {'varTableIndex': index} for pretty_printer in CaptureCollector.pretty_printers: pretty_value = pretty_printer(value) if not pretty_value: continue fields, object_type = pretty_value return {'members': self.CaptureVariablesList(fields, depth + 1, OBJECT_HAS_NO_FIELDS, limits), 'type': object_type} if not hasattr(value, '__dict__'): # TODO(vlif): keep "value" empty and populate the "type" field instead. r = str(type(value)) self._total_size += len(r) return {'value': r} # Add an additional depth for the object itself items = value.__dict__.items() if six.PY3: # Make a list of the iterator in Python 3, to avoid 'dict changed size # during iteration' errors from GC happening in the middle. # Only limits.max_list_items + 1 items are copied, anything past that will # get ignored by CaptureVariablesList(). items = list(itertools.islice(items, limits.max_list_items + 1)) members = self.CaptureVariablesList(items, depth + 2, OBJECT_HAS_NO_FIELDS, limits) v = {'members': members} type_string = DetermineType(value) if type_string: v['type'] = type_string return v
[ "def", "CaptureVariableInternal", "(", "self", ",", "value", ",", "depth", ",", "limits", ",", "can_enqueue", "=", "True", ")", ":", "if", "depth", "==", "limits", ".", "max_depth", ":", "return", "{", "'varTableIndex'", ":", "0", "}", "# Buffer full.", "i...
37.490385
19.144231
def remove_index_from_handle(handle_with_index): ''' Returns index and handle separately, in a tuple. :handle_with_index: The handle string with an index (e.g. 500:prefix/suffix) :return: index and handle as a tuple. ''' split = handle_with_index.split(':') if len(split) == 2: split[0] = int(split[0]) return split elif len(split) == 1: return (None, handle_with_index) elif len(split) > 2: raise handleexceptions.HandleSyntaxError( msg='Too many colons', handle=handle_with_index, expected_syntax='index:prefix/suffix')
[ "def", "remove_index_from_handle", "(", "handle_with_index", ")", ":", "split", "=", "handle_with_index", ".", "split", "(", "':'", ")", "if", "len", "(", "split", ")", "==", "2", ":", "split", "[", "0", "]", "=", "int", "(", "split", "[", "0", "]", ...
30.8
15.3
def undisplayable_info(obj, html=False): "Generate helpful message regarding an undisplayable object" collate = '<tt>collate</tt>' if html else 'collate' info = "For more information, please consult the Composing Data tutorial (http://git.io/vtIQh)" if isinstance(obj, HoloMap): error = "HoloMap of %s objects cannot be displayed." % obj.type.__name__ remedy = "Please call the %s method to generate a displayable object" % collate elif isinstance(obj, Layout): error = "Layout containing HoloMaps of Layout or GridSpace objects cannot be displayed." remedy = "Please call the %s method on the appropriate elements." % collate elif isinstance(obj, GridSpace): error = "GridSpace containing HoloMaps of Layouts cannot be displayed." remedy = "Please call the %s method on the appropriate elements." % collate if not html: return '\n'.join([error, remedy, info]) else: return "<center>{msg}</center>".format(msg=('<br>'.join( ['<b>%s</b>' % error, remedy, '<i>%s</i>' % info])))
[ "def", "undisplayable_info", "(", "obj", ",", "html", "=", "False", ")", ":", "collate", "=", "'<tt>collate</tt>'", "if", "html", "else", "'collate'", "info", "=", "\"For more information, please consult the Composing Data tutorial (http://git.io/vtIQh)\"", "if", "isinstanc...
53.35
28.75
def encode(self, value): ''' :param value: value to encode ''' encoded = strToBytes(value) + b'\x00' return Bits(bytes=encoded)
[ "def", "encode", "(", "self", ",", "value", ")", ":", "encoded", "=", "strToBytes", "(", "value", ")", "+", "b'\\x00'", "return", "Bits", "(", "bytes", "=", "encoded", ")" ]
27
14.666667
def purge(self, jid, node): """ Delete all items from a node. :param jid: JID of the PubSub service :param node: Name of the PubSub node :type node: :class:`str` Requires :attr:`.xso.Feature.PURGE`. """ iq = aioxmpp.stanza.IQ( type_=aioxmpp.structs.IQType.SET, to=jid, payload=pubsub_xso.OwnerRequest( pubsub_xso.OwnerPurge( node ) ) ) yield from self.client.send(iq)
[ "def", "purge", "(", "self", ",", "jid", ",", "node", ")", ":", "iq", "=", "aioxmpp", ".", "stanza", ".", "IQ", "(", "type_", "=", "aioxmpp", ".", "structs", ".", "IQType", ".", "SET", ",", "to", "=", "jid", ",", "payload", "=", "pubsub_xso", "."...
24.090909
15.272727
def maybe_cast_to_datetime(value, dtype, errors='raise'): """ try to cast the array/value to a datetimelike dtype, converting float nan to iNaT """ from pandas.core.tools.timedeltas import to_timedelta from pandas.core.tools.datetimes import to_datetime if dtype is not None: if isinstance(dtype, str): dtype = np.dtype(dtype) is_datetime64 = is_datetime64_dtype(dtype) is_datetime64tz = is_datetime64tz_dtype(dtype) is_timedelta64 = is_timedelta64_dtype(dtype) if is_datetime64 or is_datetime64tz or is_timedelta64: # Force the dtype if needed. msg = ("The '{dtype}' dtype has no unit. " "Please pass in '{dtype}[ns]' instead.") if is_datetime64 and not is_dtype_equal(dtype, _NS_DTYPE): if dtype.name in ('datetime64', 'datetime64[ns]'): if dtype.name == 'datetime64': raise ValueError(msg.format(dtype=dtype.name)) dtype = _NS_DTYPE else: raise TypeError("cannot convert datetimelike to " "dtype [{dtype}]".format(dtype=dtype)) elif is_datetime64tz: # our NaT doesn't support tz's # this will coerce to DatetimeIndex with # a matching dtype below if is_scalar(value) and isna(value): value = [value] elif is_timedelta64 and not is_dtype_equal(dtype, _TD_DTYPE): if dtype.name in ('timedelta64', 'timedelta64[ns]'): if dtype.name == 'timedelta64': raise ValueError(msg.format(dtype=dtype.name)) dtype = _TD_DTYPE else: raise TypeError("cannot convert timedeltalike to " "dtype [{dtype}]".format(dtype=dtype)) if is_scalar(value): if value == iNaT or isna(value): value = iNaT else: value = np.array(value, copy=False) # have a scalar array-like (e.g. NaT) if value.ndim == 0: value = iNaT # we have an array of datetime or timedeltas & nulls elif np.prod(value.shape) or not is_dtype_equal(value.dtype, dtype): try: if is_datetime64: value = to_datetime(value, errors=errors) # GH 25843: Remove tz information since the dtype # didn't specify one if value.tz is not None: value = value.tz_localize(None) value = value._values elif is_datetime64tz: # The string check can be removed once issue #13712 # is solved. String data that is passed with a # datetime64tz is assumed to be naive which should # be localized to the timezone. is_dt_string = is_string_dtype(value) value = to_datetime(value, errors=errors).array if is_dt_string: # Strings here are naive, so directly localize value = value.tz_localize(dtype.tz) else: # Numeric values are UTC at this point, # so localize and convert value = (value.tz_localize('UTC') .tz_convert(dtype.tz)) elif is_timedelta64: value = to_timedelta(value, errors=errors)._values except (AttributeError, ValueError, TypeError): pass # coerce datetimelike to object elif is_datetime64_dtype(value) and not is_datetime64_dtype(dtype): if is_object_dtype(dtype): if value.dtype != _NS_DTYPE: value = value.astype(_NS_DTYPE) ints = np.asarray(value).view('i8') return tslib.ints_to_pydatetime(ints) # we have a non-castable dtype that was passed raise TypeError('Cannot cast datetime64 to {dtype}' .format(dtype=dtype)) else: is_array = isinstance(value, np.ndarray) # catch a datetime/timedelta that is not of ns variety # and no coercion specified if is_array and value.dtype.kind in ['M', 'm']: dtype = value.dtype if dtype.kind == 'M' and dtype != _NS_DTYPE: value = value.astype(_NS_DTYPE) elif dtype.kind == 'm' and dtype != _TD_DTYPE: value = to_timedelta(value) # only do this if we have an array and the dtype of the array is not # setup already we are not an integer/object, so don't bother with this # conversion elif not (is_array and not (issubclass(value.dtype.type, np.integer) or value.dtype == np.object_)): value = maybe_infer_to_datetimelike(value) return value
[ "def", "maybe_cast_to_datetime", "(", "value", ",", "dtype", ",", "errors", "=", "'raise'", ")", ":", "from", "pandas", ".", "core", ".", "tools", ".", "timedeltas", "import", "to_timedelta", "from", "pandas", ".", "core", ".", "tools", ".", "datetimes", "...
44.07377
21.188525
def _get_stringlist_from_commastring(self, field): # type: (str) -> List[str] """Return list of strings from comma separated list Args: field (str): Field containing comma separated list Returns: List[str]: List of strings """ strings = self.data.get(field) if strings: return strings.split(',') else: return list()
[ "def", "_get_stringlist_from_commastring", "(", "self", ",", "field", ")", ":", "# type: (str) -> List[str]", "strings", "=", "self", ".", "data", ".", "get", "(", "field", ")", "if", "strings", ":", "return", "strings", ".", "split", "(", "','", ")", "else"...
27.666667
15.933333
def entities(self): """ Access the entities :returns: twilio.rest.authy.v1.service.entity.EntityList :rtype: twilio.rest.authy.v1.service.entity.EntityList """ if self._entities is None: self._entities = EntityList(self._version, service_sid=self._solution['sid'], ) return self._entities
[ "def", "entities", "(", "self", ")", ":", "if", "self", ".", "_entities", "is", "None", ":", "self", ".", "_entities", "=", "EntityList", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "r...
34.8
18.8
def put(self, coro): """Put a coroutine in the queue to be executed.""" # Avoid logging when a coroutine is queued or executed to avoid log # spam from coroutines that are started on every keypress. assert asyncio.iscoroutine(coro) self._queue.put_nowait(coro)
[ "def", "put", "(", "self", ",", "coro", ")", ":", "# Avoid logging when a coroutine is queued or executed to avoid log", "# spam from coroutines that are started on every keypress.", "assert", "asyncio", ".", "iscoroutine", "(", "coro", ")", "self", ".", "_queue", ".", "put...
49.166667
14.166667
def on_options(self, req, resp, **kwargs): """Respond with JSON formatted resource description on OPTIONS request. Args: req (falcon.Request): Optional request object. Defaults to None. resp (falcon.Response): Optional response object. Defaults to None. kwargs (dict): Dictionary of values created by falcon from resource uri template. Returns: None .. versionchanged:: 0.2.0 Default ``OPTIONS`` responses include ``Allow`` header with list of allowed HTTP methods. """ resp.set_header('Allow', ', '.join(self.allowed_methods())) resp.body = json.dumps(self.describe(req, resp)) resp.content_type = 'application/json'
[ "def", "on_options", "(", "self", ",", "req", ",", "resp", ",", "*", "*", "kwargs", ")", ":", "resp", ".", "set_header", "(", "'Allow'", ",", "', '", ".", "join", "(", "self", ".", "allowed_methods", "(", ")", ")", ")", "resp", ".", "body", "=", ...
37.6
22.3
def growthfromrange(rangegrowth, startdate, enddate): """ Annual growth given growth from start date to end date. """ _yrs = (pd.Timestamp(enddate) - pd.Timestamp(startdate)).total_seconds() /\ dt.timedelta(365.25).total_seconds() return yrlygrowth(rangegrowth, _yrs)
[ "def", "growthfromrange", "(", "rangegrowth", ",", "startdate", ",", "enddate", ")", ":", "_yrs", "=", "(", "pd", ".", "Timestamp", "(", "enddate", ")", "-", "pd", ".", "Timestamp", "(", "startdate", ")", ")", ".", "total_seconds", "(", ")", "/", "dt",...
41.857143
11.285714
def symlink_list(self, load): ''' Return a list of symlinked files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {} if 'saltenv' not in load: return {} if not isinstance(load['saltenv'], six.string_types): load['saltenv'] = six.text_type(load['saltenv']) for fsb in self.backends(load.pop('fsbackend', None)): symlstr = '{0}.symlink_list'.format(fsb) if symlstr in self.servers: ret = self.servers[symlstr](load) # some *fs do not handle prefix. Ensure it is filtered prefix = load.get('prefix', '').strip('/') if prefix != '': ret = dict([ (x, y) for x, y in six.iteritems(ret) if x.startswith(prefix) ]) return ret
[ "def", "symlink_list", "(", "self", ",", "load", ")", ":", "if", "'env'", "in", "load", ":", "# \"env\" is not supported; Use \"saltenv\".", "load", ".", "pop", "(", "'env'", ")", "ret", "=", "{", "}", "if", "'saltenv'", "not", "in", "load", ":", "return",...
34.64
19.28
def update_in_hdx(self): # type: () -> None """Check if user exists in HDX and if so, update user Returns: None """ capacity = self.data.get('capacity') if capacity is not None: del self.data['capacity'] # remove capacity (which comes from users from Organization) self._update_in_hdx('user', 'id') if capacity is not None: self.data['capacity'] = capacity
[ "def", "update_in_hdx", "(", "self", ")", ":", "# type: () -> None", "capacity", "=", "self", ".", "data", ".", "get", "(", "'capacity'", ")", "if", "capacity", "is", "not", "None", ":", "del", "self", ".", "data", "[", "'capacity'", "]", "# remove capacit...
34.307692
15.538462
def rescale(self, fun): """ perform raster computations with custom functions and assign them to the existing raster object in memory Parameters ---------- fun: function the custom function to compute on the data Examples -------- >>> with Raster('filename') as ras: >>> ras.rescale(lambda x: 10 * x) """ if self.bands != 1: raise ValueError('only single band images are currently supported') # load array mat = self.matrix() # scale values scaled = fun(mat) # assign newly computed array to raster object self.assign(scaled, band=0)
[ "def", "rescale", "(", "self", ",", "fun", ")", ":", "if", "self", ".", "bands", "!=", "1", ":", "raise", "ValueError", "(", "'only single band images are currently supported'", ")", "# load array", "mat", "=", "self", ".", "matrix", "(", ")", "# scale values"...
27
22.153846
def create_cache_database(self): """ Create a new SQLite3 database for use with Cache objects :raises: IOError if there is a problem creating the database file """ conn = sqlite3.connect(self.database) conn.text_factory = str c = conn.cursor() c.execute("""CREATE TABLE items (url text, metadata text, datetime text)""") c.execute("""CREATE TABLE documents (url text, path text, datetime text)""") c.execute("""CREATE TABLE primary_texts (item_url text, primary_text text, datetime text)""") conn.commit() conn.close()
[ "def", "create_cache_database", "(", "self", ")", ":", "conn", "=", "sqlite3", ".", "connect", "(", "self", ".", "database", ")", "conn", ".", "text_factory", "=", "str", "c", "=", "conn", ".", "cursor", "(", ")", "c", ".", "execute", "(", "\"\"\"CREAT...
32.75
15.45
def convert_coordinates(coords, origin, wgs84, wrapped): """ Convert coordinates from one crs to another """ if isinstance(coords, list) or isinstance(coords, tuple): try: if isinstance(coords[0], list) or isinstance(coords[0], tuple): return [convert_coordinates(list(c), origin, wgs84, wrapped) for c in coords] elif isinstance(coords[0], float): c = list(transform(origin, wgs84, *coords)) if wrapped and c[0] < -170: c[0] = c[0] + 360 return c except IndexError: pass return None
[ "def", "convert_coordinates", "(", "coords", ",", "origin", ",", "wgs84", ",", "wrapped", ")", ":", "if", "isinstance", "(", "coords", ",", "list", ")", "or", "isinstance", "(", "coords", ",", "tuple", ")", ":", "try", ":", "if", "isinstance", "(", "co...
38.625
21.4375
def AgregarTributo(self, cod_tributo, descripcion=None, base_imponible=None, alicuota=None, importe=None): "Agrega la información referente a los tributos de la liquidación" trib = {'codTributo': cod_tributo, 'descripcion': descripcion, 'baseImponible': base_imponible, 'alicuota': alicuota, 'importe': importe} if 'ajusteFinanciero' in self.solicitud: self.solicitud['ajusteFinanciero']['tributo'].append(trib) else: self.solicitud['tributo'].append(trib) return True
[ "def", "AgregarTributo", "(", "self", ",", "cod_tributo", ",", "descripcion", "=", "None", ",", "base_imponible", "=", "None", ",", "alicuota", "=", "None", ",", "importe", "=", "None", ")", ":", "trib", "=", "{", "'codTributo'", ":", "cod_tributo", ",", ...
53.090909
22.727273
def cmd(self, args=None, interact=True): """Process command-line arguments.""" if args is None: parsed_args = arguments.parse_args() else: parsed_args = arguments.parse_args(args) self.exit_code = 0 with self.handling_exceptions(): self.use_args(parsed_args, interact, original_args=args) self.exit_on_error()
[ "def", "cmd", "(", "self", ",", "args", "=", "None", ",", "interact", "=", "True", ")", ":", "if", "args", "is", "None", ":", "parsed_args", "=", "arguments", ".", "parse_args", "(", ")", "else", ":", "parsed_args", "=", "arguments", ".", "parse_args",...
38.4
11.7
def _get_supervisorctl_bin(bin_env): ''' Return supervisorctl command to call, either from a virtualenv, an argument passed in, or from the global modules options ''' cmd = 'supervisorctl' if not bin_env: which_result = __salt__['cmd.which_bin']([cmd]) if which_result is None: raise CommandNotFoundError( 'Could not find a `{0}` binary'.format(cmd) ) return which_result # try to get binary from env if os.path.isdir(bin_env): cmd_bin = os.path.join(bin_env, 'bin', cmd) if os.path.isfile(cmd_bin): return cmd_bin raise CommandNotFoundError('Could not find a `{0}` binary'.format(cmd)) return bin_env
[ "def", "_get_supervisorctl_bin", "(", "bin_env", ")", ":", "cmd", "=", "'supervisorctl'", "if", "not", "bin_env", ":", "which_result", "=", "__salt__", "[", "'cmd.which_bin'", "]", "(", "[", "cmd", "]", ")", "if", "which_result", "is", "None", ":", "raise", ...
32.636364
19.363636
def crypto_sign_seed_keypair(seed): """ Computes and returns the public key and secret key using the seed ``seed``. :param seed: bytes :rtype: (bytes(public_key), bytes(secret_key)) """ if len(seed) != crypto_sign_SEEDBYTES: raise exc.ValueError("Invalid seed") pk = ffi.new("unsigned char[]", crypto_sign_PUBLICKEYBYTES) sk = ffi.new("unsigned char[]", crypto_sign_SECRETKEYBYTES) rc = lib.crypto_sign_seed_keypair(pk, sk, seed) ensure(rc == 0, 'Unexpected library error', raising=exc.RuntimeError) return ( ffi.buffer(pk, crypto_sign_PUBLICKEYBYTES)[:], ffi.buffer(sk, crypto_sign_SECRETKEYBYTES)[:], )
[ "def", "crypto_sign_seed_keypair", "(", "seed", ")", ":", "if", "len", "(", "seed", ")", "!=", "crypto_sign_SEEDBYTES", ":", "raise", "exc", ".", "ValueError", "(", "\"Invalid seed\"", ")", "pk", "=", "ffi", ".", "new", "(", "\"unsigned char[]\"", ",", "cryp...
30.954545
18.772727
def _CopyFromDateTimeValues(self, date_time_values): """Copies time elements from date and time values. Args: date_time_values (dict[str, int]): date and time values, such as year, month, day of month, hours, minutes, seconds, microseconds. Raises: ValueError: if no helper can be created for the current precision. """ year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) microseconds = date_time_values.get('microseconds', 0) precision_helper = precisions.PrecisionHelperFactory.CreatePrecisionHelper( self._precision) fraction_of_second = precision_helper.CopyMicrosecondsToFractionOfSecond( microseconds) self._normalized_timestamp = None self._number_of_seconds = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) self._time_elements_tuple = ( year, month, day_of_month, hours, minutes, seconds) self.fraction_of_second = fraction_of_second self.is_local_time = False
[ "def", "_CopyFromDateTimeValues", "(", "self", ",", "date_time_values", ")", ":", "year", "=", "date_time_values", ".", "get", "(", "'year'", ",", "0", ")", "month", "=", "date_time_values", ".", "get", "(", "'month'", ",", "0", ")", "day_of_month", "=", "...
39.580645
20.516129
def eval_objfn(self): """Compute components of objective function as well as total contribution to objective function. """ fval = self.obfn_f(self.obfn_fvar()) g0val = self.obfn_g0(self.obfn_g0var()) g1val = self.obfn_g1(self.obfn_g1var()) obj = fval + g0val + g1val return (obj, fval, g0val, g1val)
[ "def", "eval_objfn", "(", "self", ")", ":", "fval", "=", "self", ".", "obfn_f", "(", "self", ".", "obfn_fvar", "(", ")", ")", "g0val", "=", "self", ".", "obfn_g0", "(", "self", ".", "obfn_g0var", "(", ")", ")", "g1val", "=", "self", ".", "obfn_g1",...
35.5
8.6
def find_obfuscatables(tokens, obfunc, ignore_length=False): """ Iterates over *tokens*, which must be an equivalent output to what tokenize.generate_tokens() produces, calling *obfunc* on each with the following parameters: - **tokens:** The current list of tokens. - **index:** The current position in the list. *obfunc* is expected to return the token string if that token can be safely obfuscated **or** one of the following optional values which will instruct find_obfuscatables() how to proceed: - **'__skipline__'** Keep skipping tokens until a newline is reached. - **'__skipnext__'** Skip the next token in the sequence. If *ignore_length* is ``True`` then single-character obfuscatables will be obfuscated anyway (even though it wouldn't save any space). """ global keyword_args keyword_args = analyze.enumerate_keyword_args(tokens) global imported_modules imported_modules = analyze.enumerate_imports(tokens) #print("imported_modules: %s" % imported_modules) skip_line = False skip_next = False obfuscatables = [] for index, tok in enumerate(tokens): token_type = tok[0] if token_type == tokenize.NEWLINE: skip_line = False if skip_line: continue result = obfunc(tokens, index, ignore_length=ignore_length) if result: if skip_next: skip_next = False elif result == '__skipline__': skip_line = True elif result == '__skipnext__': skip_next = True elif result in obfuscatables: pass else: obfuscatables.append(result) else: # If result is empty we need to reset skip_next so we don't skip_next = False # accidentally skip the next identifier return obfuscatables
[ "def", "find_obfuscatables", "(", "tokens", ",", "obfunc", ",", "ignore_length", "=", "False", ")", ":", "global", "keyword_args", "keyword_args", "=", "analyze", ".", "enumerate_keyword_args", "(", "tokens", ")", "global", "imported_modules", "imported_modules", "=...
39.104167
18.9375
def _get_baremetal_switches(self, port): """Get switch ip addresses from baremetal transaction. This method is used to extract switch information from the transaction where VNIC_TYPE is baremetal. :param port: Received port transaction :returns: list of all switches :returns: list of only switches which are active """ all_switches = set() active_switches = set() all_link_info = port[bc.portbindings.PROFILE]['local_link_information'] for link_info in all_link_info: switch_info = self._get_baremetal_switch_info(link_info) if not switch_info: continue switch_ip = switch_info['switch_ip'] # If not for Nexus if not self._switch_defined(switch_ip): continue all_switches.add(switch_ip) if self.is_switch_active(switch_ip): active_switches.add(switch_ip) return list(all_switches), list(active_switches)
[ "def", "_get_baremetal_switches", "(", "self", ",", "port", ")", ":", "all_switches", "=", "set", "(", ")", "active_switches", "=", "set", "(", ")", "all_link_info", "=", "port", "[", "bc", ".", "portbindings", ".", "PROFILE", "]", "[", "'local_link_informat...
34.827586
16.862069
def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors
[ "def", "lock", "(", "remote", "=", "None", ")", ":", "def", "_do_lock", "(", "repo", ")", ":", "success", "=", "[", "]", "failed", "=", "[", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "repo", "[", "'lockfile'", "]", ")", ":", "try...
31.622222
19.444444
def write_trailer(self, sector, key_a=(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF), auth_bits=(0xFF, 0x07, 0x80), user_data=0x69, key_b=(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)): """ Writes sector trailer of specified sector. Tag and auth must be set - does auth. If value is None, value of byte is kept. Returns error state. """ addr = self.block_addr(sector, 3) return self.rewrite(addr, key_a[:6] + auth_bits[:3] + (user_data, ) + key_b[:6])
[ "def", "write_trailer", "(", "self", ",", "sector", ",", "key_a", "=", "(", "0xFF", ",", "0xFF", ",", "0xFF", ",", "0xFF", ",", "0xFF", ",", "0xFF", ")", ",", "auth_bits", "=", "(", "0xFF", ",", "0x07", ",", "0x80", ")", ",", "user_data", "=", "0...
55.888889
25
def summoner_names_to_id(summoners): """ Gets a list of summoners names and return a dictionary mapping the player name to his/her summoner id :param summoners: a list of player names :return: a dictionary name -> id """ ids = {} for start, end in _slice(0, len(summoners), 40): result = get_summoners_by_name(summoners[start:end]) for name, summoner in result.items(): ids[name] = summoner.id return ids
[ "def", "summoner_names_to_id", "(", "summoners", ")", ":", "ids", "=", "{", "}", "for", "start", ",", "end", "in", "_slice", "(", "0", ",", "len", "(", "summoners", ")", ",", "40", ")", ":", "result", "=", "get_summoners_by_name", "(", "summoners", "["...
37.75
14.416667
def fetch(self, category=CATEGORY_QUESTION, offset=DEFAULT_OFFSET): """Fetch questions from the Kitsune url. :param category: the category of items to fetch :offset: obtain questions after offset :returns: a generator of questions """ if not offset: offset = DEFAULT_OFFSET kwargs = {"offset": offset} items = super().fetch(category, **kwargs) return items
[ "def", "fetch", "(", "self", ",", "category", "=", "CATEGORY_QUESTION", ",", "offset", "=", "DEFAULT_OFFSET", ")", ":", "if", "not", "offset", ":", "offset", "=", "DEFAULT_OFFSET", "kwargs", "=", "{", "\"offset\"", ":", "offset", "}", "items", "=", "super"...
30.714286
16.214286
def set(self, time, value, compact=False): """Set the value for the time series. If compact is True, only set the value if it's different from what it would be anyway. """ if (len(self) == 0) or (not compact) or \ (compact and self.get(time) != value): self._d[time] = value
[ "def", "set", "(", "self", ",", "time", ",", "value", ",", "compact", "=", "False", ")", ":", "if", "(", "len", "(", "self", ")", "==", "0", ")", "or", "(", "not", "compact", ")", "or", "(", "compact", "and", "self", ".", "get", "(", "time", ...
41
11.625
def _to_r(o, as_data=False, level=0): """Helper function to convert python data structures to R equivalents TODO: a single model for transforming to r to handle * function args * lists as function args """ if o is None: return "NA" if isinstance(o, basestring): return o if hasattr(o, "r"): # bridge to @property r on GGStatement(s) return o.r elif isinstance(o, bool): return "TRUE" if o else "FALSE" elif isinstance(o, (list, tuple)): inner = ",".join([_to_r(x, True, level+1) for x in o]) return "c({})".format(inner) if as_data else inner elif isinstance(o, dict): inner = ",".join(["{}={}".format(k, _to_r(v, True, level+1)) for k, v in sorted(o.iteritems(), key=lambda x: x[0])]) return "list({})".format(inner) if as_data else inner return str(o)
[ "def", "_to_r", "(", "o", ",", "as_data", "=", "False", ",", "level", "=", "0", ")", ":", "if", "o", "is", "None", ":", "return", "\"NA\"", "if", "isinstance", "(", "o", ",", "basestring", ")", ":", "return", "o", "if", "hasattr", "(", "o", ",", ...
36.5
16.333333
def spring_project( adata, project_dir, embedding_method, subplot_name = None, cell_groupings=None, custom_color_tracks=None, total_counts_key = 'n_counts', overwrite = False ): """Exports to a SPRING project directory [Weinreb17]_. Visualize annotation present in `adata`. By default, export all gene expression data from `adata.raw` and categorical and continuous annotations present in `adata.obs`. See `SPRING <https://github.com/AllonKleinLab/SPRING>`__ or [Weinreb17]_ for details. Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix: `adata.uns['neighbors']` needs to be present. project_dir : `str` Path to directory for exported SPRING files. embedding_method: `str` Name of a 2-D embedding in `adata.obsm` subplot_name: `str`, optional (default: `None`) Name of subplot folder to be created at `project_dir+"/"+subplot_name` cell_groupings : `str`, `list` of `str`, optional (default: `None`) Instead of importing all categorical annotations when `None`, pass a list of keys for `adata.obs`. custom_color_tracks : `str`, `list` of `str`, optional (default: `None`) Specify specific `adata.obs` keys for continuous coloring. total_counts_key: `str`, optional (default: "n_counts") Name of key for total transcript counts in `adata.obs`. overwrite: `boolean`, optional (default: `False`) When `True`, existing counts matrices in `project_dir` are overwritten. Examples -------- See this `tutorial <https://github.com/theislab/scanpy_usage/tree/master/171111_SPRING_export>`__. """ # need to get nearest neighbors first if 'neighbors' not in adata.uns: raise ValueError('Run `sc.pp.neighbors` first.') # check that requested 2-D embedding has been generated if embedding_method not in adata.obsm_keys(): if 'X_' + embedding_method in adata.obsm_keys(): embedding_method = 'X_' + embedding_method else: if embedding_method in adata.uns: embedding_method = 'X_' + embedding_method + '_' + adata.uns[embedding_method]['params']['layout'] else: raise ValueError('Run the specified embedding method `%s` first.' %embedding_method) coords = adata.obsm[embedding_method] # Make project directory and subplot directory (subplot has same name as project) # For now, the subplot is just all cells in adata project_dir = project_dir.rstrip('/') + '/' if subplot_name is None: subplot_dir = project_dir + project_dir.split('/')[-2] + '/' else: subplot_dir = project_dir + subplot_name + '/' if not os.path.exists(subplot_dir): os.makedirs(subplot_dir) print("Writing subplot to %s" %subplot_dir) # Write counts matrices as hdf5 files and npz if they do not already exist # or if user requires overwrite. # To do: check if Alex's h5sparse format will allow fast loading from just # one file. write_counts_matrices = True base_dir_filelist = ['counts_norm_sparse_genes.hdf5', 'counts_norm_sparse_cells.hdf5', 'counts_norm.npz', 'total_counts.txt', 'genes.txt'] if all([os.path.isfile(project_dir + f) for f in base_dir_filelist]): if not overwrite: logg.warning('%s is an existing SPRING folder. A new subplot will be created, but ' 'you must set `overwrite=True` to overwrite counts matrices.' %project_dir) write_counts_matrices = False else: logg.warning('Overwriting the files in %s.' %project_dir) # Ideally, all genes will be written from adata.raw if adata.raw is not None: E = adata.raw.X.tocsc() gene_list = list(adata.raw.var_names) else: E = adata.X.tocsc() gene_list = list(adata.var_names) # Keep track of total counts per cell if present if total_counts_key in adata.obs: total_counts = np.array(adata.obs[total_counts_key]) else: total_counts = E.sum(1).A1 # Write the counts matrices to project directory if write_counts_matrices: write_hdf5_genes(E, gene_list, project_dir + 'counts_norm_sparse_genes.hdf5') write_hdf5_cells(E, project_dir + 'counts_norm_sparse_cells.hdf5') write_sparse_npz(E, project_dir + 'counts_norm.npz') with open(project_dir + 'genes.txt', 'w') as o: for g in gene_list: o.write(g + '\n') np.savetxt(project_dir + 'total_counts.txt', total_counts) # Get categorical and continuous metadata categorical_extras = {} continuous_extras = {} if cell_groupings is None: for obs_name in adata.obs: if is_categorical(adata.obs[obs_name]): categorical_extras[obs_name] = [str(x) for x in adata.obs[obs_name]] else: if isinstance(cell_groupings, str): cell_groupings = [cell_groupings] for obs_name in cell_groupings: if obs_name not in adata.obs: logg.warning('Cell grouping "%s" is not in adata.obs' %obs_name) elif is_categorical(adata.obs[obs_name]): categorical_extras[obs_name] = [str(x) for x in adata.obs[obs_name]] else: logg.warning('Cell grouping "%s" is not a categorical variable' %obs_name) if custom_color_tracks is None: for obs_name in adata.obs: if not is_categorical(adata.obs[obs_name]): continuous_extras[obs_name] = np.array(adata.obs[obs_name]) else: if isinstance(custom_color_tracks, str): custom_color_tracks = [custom_color_tracks] for obs_name in custom_color_tracks: if obs_name not in adata.obs: logg.warning('Custom color track "%s" is not in adata.obs' %obs_name) elif not is_categorical(adata.obs[obs_name]): continuous_extras[obs_name] = np.array(adata.obs[obs_name]) else: logg.warning('Custom color track "%s" is not a continuous variable' %obs_name) # Write continuous colors continuous_extras['Uniform'] = np.zeros(E.shape[0]) write_color_tracks(continuous_extras, subplot_dir+'color_data_gene_sets.csv') # Create and write a dictionary of color profiles to be used by the visualizer color_stats = {} color_stats = get_color_stats_genes(color_stats, E, gene_list) color_stats = get_color_stats_custom(color_stats, continuous_extras) write_color_stats(subplot_dir + 'color_stats.json', color_stats) # Write categorical data categorical_coloring_data = {} categorical_coloring_data = build_categ_colors(categorical_coloring_data, categorical_extras) write_cell_groupings(subplot_dir+'categorical_coloring_data.json', categorical_coloring_data) # Write graph in two formats for backwards compatibility edges = get_edges(adata) write_graph(subplot_dir + 'graph_data.json', E.shape[0], edges) write_edges(subplot_dir + 'edges.csv', edges) # Write cell filter; for now, subplots must be generated from within SPRING, # so cell filter includes all cells. np.savetxt(subplot_dir + 'cell_filter.txt', np.arange(E.shape[0]), fmt='%i') np.save(subplot_dir + 'cell_filter.npy', np.arange(E.shape[0])) # Write 2-D coordinates, after adjusting to roughly match SPRING's default d3js force layout parameters coords = coords - coords.min(0)[None,:] coords = coords * (np.array([1000, 1000]) / coords.ptp(0))[None,:] + np.array([200,-200])[None,:] np.savetxt(subplot_dir + 'coordinates.txt', np.hstack((np.arange(E.shape[0])[:,None], coords)), fmt='%i,%.6f,%.6f') # Write some useful intermediates, if they exist if 'X_pca' in adata.obsm_keys(): np.savez_compressed(subplot_dir + 'intermediates.npz', Epca=adata.obsm['X_pca'], total_counts = total_counts) # Write PAGA data, if present if 'paga' in adata.uns: clusts = np.array(adata.obs[adata.uns['paga']['groups']].cat.codes) uniq_clusts = adata.obs[adata.uns['paga']['groups']].cat.categories paga_coords = [coords[clusts==i,:].mean(0) for i in range(len(uniq_clusts))] export_PAGA_to_SPRING(adata, paga_coords, subplot_dir + 'PAGA_data.json') return None
[ "def", "spring_project", "(", "adata", ",", "project_dir", ",", "embedding_method", ",", "subplot_name", "=", "None", ",", "cell_groupings", "=", "None", ",", "custom_color_tracks", "=", "None", ",", "total_counts_key", "=", "'n_counts'", ",", "overwrite", "=", ...
44.972973
24.297297
def censor_entity_types(self, entity_types): # type: (set) -> TermDocMatrixFactory ''' Entity types to exclude from feature construction. Terms matching specificed entities, instead of labeled by their lower case orthographic form or lemma, will be labeled by their entity type. Parameters ---------- entity_types : set of entity types outputted by spaCy 'TIME', 'WORK_OF_ART', 'PERSON', 'MONEY', 'ORG', 'ORDINAL', 'DATE', 'CARDINAL', 'LAW', 'QUANTITY', 'GPE', 'PERCENT' Returns --------- self ''' assert type(entity_types) == set self._entity_types_to_censor = entity_types self._feats_from_spacy_doc = FeatsFromSpacyDoc( use_lemmas=self._use_lemmas, entity_types_to_censor=self._entity_types_to_censor ) return self
[ "def", "censor_entity_types", "(", "self", ",", "entity_types", ")", ":", "# type: (set) -> TermDocMatrixFactory", "assert", "type", "(", "entity_types", ")", "==", "set", "self", ".", "_entity_types_to_censor", "=", "entity_types", "self", ".", "_feats_from_spacy_doc",...
36.5
22.333333
def limit(limit, every=1): """This decorator factory creates a decorator that can be applied to functions in order to limit the rate the function can be invoked. The rate is `limit` over `every`, where limit is the number of invocation allowed every `every` seconds. limit(4, 60) creates a decorator that limit the function calls to 4 per minute. If not specified, every defaults to 1 second.""" def limitdecorator(fn): """This is the actual decorator that performs the rate-limiting.""" semaphore = _threading.Semaphore(limit) @_functools.wraps(fn) def wrapper(*args, **kwargs): semaphore.acquire() try: return fn(*args, **kwargs) finally: # ensure semaphore release timer = _threading.Timer(every, semaphore.release) timer.setDaemon(True) # allows the timer to be canceled on exit timer.start() return wrapper return limitdecorator
[ "def", "limit", "(", "limit", ",", "every", "=", "1", ")", ":", "def", "limitdecorator", "(", "fn", ")", ":", "\"\"\"This is the actual decorator that performs the rate-limiting.\"\"\"", "semaphore", "=", "_threading", ".", "Semaphore", "(", "limit", ")", "@", "_f...
37.740741
20.592593
def zrangebylex(self, name, min, max, start=None, num=None): """ Return the lexicographical range of values from sorted set ``name`` between ``min`` and ``max``. If ``start`` and ``num`` are specified, then return a slice of the range. """ if (start is not None and num is None) or \ (num is not None and start is None): raise DataError("``start`` and ``num`` must both be specified") pieces = ['ZRANGEBYLEX', name, min, max] if start is not None and num is not None: pieces.extend([Token.get_token('LIMIT'), start, num]) return self.execute_command(*pieces)
[ "def", "zrangebylex", "(", "self", ",", "name", ",", "min", ",", "max", ",", "start", "=", "None", ",", "num", "=", "None", ")", ":", "if", "(", "start", "is", "not", "None", "and", "num", "is", "None", ")", "or", "(", "num", "is", "not", "None...
44.333333
17.533333
def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj
[ "def", "__dict_to_service_spec", "(", "spec", ")", ":", "spec_obj", "=", "kubernetes", ".", "client", ".", "V1ServiceSpec", "(", ")", "for", "key", ",", "value", "in", "iteritems", "(", "spec", ")", ":", "# pylint: disable=too-many-nested-blocks", "if", "key", ...
39.238095
17.333333
def write_to_disk(filename, delete=False, content=get_time()): """ Write filename out to disk """ if not os.path.exists(os.path.dirname(filename)): return if delete: if os.path.lexists(filename): os.remove(filename) else: with open(filename, 'wb') as f: f.write(content.encode('utf-8'))
[ "def", "write_to_disk", "(", "filename", ",", "delete", "=", "False", ",", "content", "=", "get_time", "(", ")", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", ":", "retur...
28.916667
12.083333
def update_g(self, fs=None, qinv=None, fc=None, kappa_tst_re=1.0, kappa_tst_im=0.0, kappa_pu_re=1.0, kappa_pu_im=0.0, kappa_c=1.0): """ Calculate the open loop gain g(f,t) given the new parameters kappa_c(t), kappa_a(t), f_c(t), fs, and qinv. Parameters ---------- fc : float Coupled-cavity (CC) pole at time t. kappa_c : float Scalar correction factor for sensing function c at time t. kappa_tst_re : float Real part of scalar correction factor for actuation function a_tst0 at time t. kappa_pu_re : float Real part of scalar correction factor for actuation function a_pu0 at time t. kappa_tst_im : float Imaginary part of scalar correction factor for actuation function a_tst0 at time t. kappa_pu_im : float Imaginary part of scalar correction factor for actuation function a_pu0 at time t. fs : float Spring frequency for signal recycling cavity. qinv : float Inverse quality factor for signal recycling cavity. Returns ------- g : numpy.array The new open loop gain g(f,t). """ c = self.update_c(fs=fs, qinv=qinv, fc=fc, kappa_c=kappa_c) a_tst = self.a_tst0 * (kappa_tst_re + 1.0j * kappa_tst_im) a_pu = self.a_pu0 * (kappa_pu_re + 1.0j * kappa_pu_im) return c * self.d0 * (a_tst + a_pu)
[ "def", "update_g", "(", "self", ",", "fs", "=", "None", ",", "qinv", "=", "None", ",", "fc", "=", "None", ",", "kappa_tst_re", "=", "1.0", ",", "kappa_tst_im", "=", "0.0", ",", "kappa_pu_re", "=", "1.0", ",", "kappa_pu_im", "=", "0.0", ",", "kappa_c"...
39.631579
19.447368
def default_decoder(self, obj): """Handle a dict that might contain a wrapped state for a custom type.""" typename, marshalled_state = self.unwrap_callback(obj) if typename is None: return obj try: cls, unmarshaller = self.serializer.unmarshallers[typename] except KeyError: raise LookupError('no unmarshaller found for type "{}"'.format(typename)) from None if cls is not None: instance = cls.__new__(cls) unmarshaller(instance, marshalled_state) return instance else: return unmarshaller(marshalled_state)
[ "def", "default_decoder", "(", "self", ",", "obj", ")", ":", "typename", ",", "marshalled_state", "=", "self", ".", "unwrap_callback", "(", "obj", ")", "if", "typename", "is", "None", ":", "return", "obj", "try", ":", "cls", ",", "unmarshaller", "=", "se...
37.235294
20.352941
def write_table(self): """ |write_table| with HTML table format. :Example: :ref:`example-html-table-writer` .. note:: - |None| is not written """ tags = _get_tags_module() with self._logger: self._verify_property() self._preprocess() if typepy.is_not_null_string(self.table_name): self._table_tag = tags.table(id=sanitize_python_var_name(self.table_name)) self._table_tag += tags.caption(MultiByteStrDecoder(self.table_name).unicode_str) else: self._table_tag = tags.table() try: self._write_header() except EmptyHeaderError: pass self._write_body()
[ "def", "write_table", "(", "self", ")", ":", "tags", "=", "_get_tags_module", "(", ")", "with", "self", ".", "_logger", ":", "self", ".", "_verify_property", "(", ")", "self", ".", "_preprocess", "(", ")", "if", "typepy", ".", "is_not_null_string", "(", ...
26.62069
21.034483
def prior_H0(self, H0, H0_min=0, H0_max=200): """ checks whether the parameter vector has left its bound, if so, adds a big number """ if H0 < H0_min or H0 > H0_max: penalty = -10**15 return penalty, False else: return 0, True
[ "def", "prior_H0", "(", "self", ",", "H0", ",", "H0_min", "=", "0", ",", "H0_max", "=", "200", ")", ":", "if", "H0", "<", "H0_min", "or", "H0", ">", "H0_max", ":", "penalty", "=", "-", "10", "**", "15", "return", "penalty", ",", "False", "else", ...
32.666667
12.666667
def get_cookies_from_cache(username): """ Returns a RequestsCookieJar containing the cached cookies for the given user. """ logging.debug('Trying to get cookies from the cache.') path = get_cookies_cache_path(username) cj = requests.cookies.RequestsCookieJar() try: cached_cj = get_cookie_jar(path) for cookie in cached_cj: cj.set_cookie(cookie) logging.debug( 'Loaded cookies from %s', get_cookies_cache_path(username)) except IOError: logging.debug('Could not load cookies from the cache.') return cj
[ "def", "get_cookies_from_cache", "(", "username", ")", ":", "logging", ".", "debug", "(", "'Trying to get cookies from the cache.'", ")", "path", "=", "get_cookies_cache_path", "(", "username", ")", "cj", "=", "requests", ".", "cookies", ".", "RequestsCookieJar", "(...
29.1
19.1
def index_all(self, index_name): """Index all available documents, using streaming_bulk for speed Args: index_name (string): The index """ oks = 0 notoks = 0 for ok, item in streaming_bulk( self.es_client, self._iter_documents(index_name) ): if ok: oks += 1 else: notoks += 1 logging.info( "Import results: %d ok, %d not ok", oks, notoks )
[ "def", "index_all", "(", "self", ",", "index_name", ")", ":", "oks", "=", "0", "notoks", "=", "0", "for", "ok", ",", "item", "in", "streaming_bulk", "(", "self", ".", "es_client", ",", "self", ".", "_iter_documents", "(", "index_name", ")", ")", ":", ...
24.571429
16.619048
def before_reject(analysis): """Function triggered before 'unassign' transition takes place """ worksheet = analysis.getWorksheet() if not worksheet: return # Rejection of a routine analysis causes the removal of their duplicates for dup in worksheet.get_duplicates_for(analysis): doActionFor(dup, "unassign")
[ "def", "before_reject", "(", "analysis", ")", ":", "worksheet", "=", "analysis", ".", "getWorksheet", "(", ")", "if", "not", "worksheet", ":", "return", "# Rejection of a routine analysis causes the removal of their duplicates", "for", "dup", "in", "worksheet", ".", "...
34.1
15.2
def OnUpView(self, event): """Request to move up the hierarchy to highest-weight parent""" node = self.activated_node parents = [] selected_parent = None if node: if hasattr( self.adapter, 'best_parent' ): selected_parent = self.adapter.best_parent( node ) else: parents = self.adapter.parents( node ) if parents: if not selected_parent: parents.sort(key = lambda a: self.adapter.value(node, a)) selected_parent = parents[-1] class event: node = selected_parent self.OnNodeActivated(event) else: self.SetStatusText(_('No parents for the currently selected node: %(node_name)s') % dict(node_name=self.adapter.label(node))) else: self.SetStatusText(_('No currently selected node'))
[ "def", "OnUpView", "(", "self", ",", "event", ")", ":", "node", "=", "self", ".", "activated_node", "parents", "=", "[", "]", "selected_parent", "=", "None", "if", "node", ":", "if", "hasattr", "(", "self", ".", "adapter", ",", "'best_parent'", ")", ":...
41.956522
18.782609
def add(self, name, filt, info='', params=(), setn=None): """ Add filter. Parameters ---------- name : str filter name filt : array_like boolean filter array info : str informative description of the filter params : tuple parameters used to make the filter Returns ------- None """ iname = '{:.0f}_'.format(self.n) + name self.index[self.n] = iname if setn is None: setn = self.maxset + 1 self.maxset = setn if setn not in self.sets.keys(): self.sets[setn] = [iname] else: self.sets[setn].append(iname) # self.keys is not added to? self.components[iname] = filt self.info[iname] = info self.params[iname] = params for a in self.analytes: self.switches[a][iname] = False self.n += 1 return
[ "def", "add", "(", "self", ",", "name", ",", "filt", ",", "info", "=", "''", ",", "params", "=", "(", ")", ",", "setn", "=", "None", ")", ":", "iname", "=", "'{:.0f}_'", ".", "format", "(", "self", ".", "n", ")", "+", "name", "self", ".", "in...
23.775
16.925
def get_composition_repository_session(self, proxy): """Gets the session for retrieving composition to repository mappings. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionRepositorySession) - a CompositionRepositorySession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_repository() is false compliance: optional - This method must be implemented if supports_composition_repository() is true. """ if not self.supports_composition_repository(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.CompositionRepositorySession(proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
[ "def", "get_composition_repository_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_composition_repository", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportErro...
39
17.192308
def get_strokes(self): """Return a css snippet containing all stroke style options""" def stroke_dict_to_css(stroke, i=None): """Return a css style for the given option""" css = [ '%s.series%s {\n' % (self.id, '.serie-%d' % i if i is not None else '') ] for key in ('width', 'linejoin', 'linecap', 'dasharray', 'dashoffset'): if stroke.get(key): css.append(' stroke-%s: %s;\n' % (key, stroke[key])) css.append('}') return '\n'.join(css) css = [] if self.graph.stroke_style is not None: css.append(stroke_dict_to_css(self.graph.stroke_style)) for serie in self.graph.series: if serie.stroke_style is not None: css.append(stroke_dict_to_css(serie.stroke_style, serie.index)) for secondary_serie in self.graph.secondary_series: if secondary_serie.stroke_style is not None: css.append( stroke_dict_to_css( secondary_serie.stroke_style, secondary_serie.index ) ) return '\n'.join(css)
[ "def", "get_strokes", "(", "self", ")", ":", "def", "stroke_dict_to_css", "(", "stroke", ",", "i", "=", "None", ")", ":", "\"\"\"Return a css style for the given option\"\"\"", "css", "=", "[", "'%s.series%s {\\n'", "%", "(", "self", ".", "id", ",", "'.serie-%d'...
39.387097
17.870968
def format_h4(s, format="text", indents=0): """ Encloses string in format text Args, Returns: see format_h1() """ _CHAR = "^" if format.startswith("text"): return format_underline(s, _CHAR, indents) elif format.startswith("markdown"): return ["#### {}".format(s)] elif format.startswith("rest"): return format_underline(s, _CHAR, 0)
[ "def", "format_h4", "(", "s", ",", "format", "=", "\"text\"", ",", "indents", "=", "0", ")", ":", "_CHAR", "=", "\"^\"", "if", "format", ".", "startswith", "(", "\"text\"", ")", ":", "return", "format_underline", "(", "s", ",", "_CHAR", ",", "indents",...
27.857143
10.285714
def propagate(self, **args): """ Propagates activation through the network. Optionally, takes input layer names as keywords, and their associated activations. If input layer(s) are given, then propagate() will return the output layer's activation. If there is more than one output layer, then a dictionary is returned. Examples: >>> net = Network() # doctest: +ELLIPSIS Conx using seed: ... >>> net.addLayers(2, 5, 1) >>> len(net.propagate(input = [0, .5])) 1 """ self.prePropagate(**args) for key in args: layer = self.getLayer(key) if layer.kind == 'Input': if self[key].verify and not self[key].activationSet == 0: raise AttributeError("attempt to set activations on input layer '%s' without reset" % key) self.copyActivations(layer, args[key]) elif layer.kind == 'Context': self.copyActivations(layer, args[key]) elif layer.kind == 'Output' and len(args[key]) == layer.size: # in case you expect propagate to handle the outputs self.copyTargets(layer, args[key]) self.verifyInputs() # better have inputs set if self.verbosity > 2: print("Propagate Network '" + self.name + "':") # initialize netinput: for layer in self.layers: if layer.type != 'Input' and layer.active: layer.netinput = (layer.weight).copy() # for each connection, in order: for layer in self.layers: if layer.active: for connection in self.connections: if (connection.toLayer.name == layer.name and connection.fromLayer.active and connection.active): a = connection.fromLayer.activation w = connection.weight m = Numeric.matrixmultiply(a, w) ni = m + connection.toLayer.netinput connection.toLayer.netinput = ni #connection.toLayer.netinput = \ # (connection.toLayer.netinput + # Numeric.matrixmultiply(connection.fromLayer.activation, # connection.weight)) # propagate! if layer.type != 'Input': layer.activation = self.activationFunction(layer.netinput) for layer in self.layers: if layer.log and layer.active: layer.writeLog(self) self.count += 1 # counts number of times propagate() is called if len(args) != 0: dict = {} for layer in self.layers: if layer.type == "Output": dict[layer.name] = layer.activation.copy() if len(dict) == 1: return dict[list(dict.keys())[0]] else: return dict
[ "def", "propagate", "(", "self", ",", "*", "*", "args", ")", ":", "self", ".", "prePropagate", "(", "*", "*", "args", ")", "for", "key", "in", "args", ":", "layer", "=", "self", ".", "getLayer", "(", "key", ")", "if", "layer", ".", "kind", "==", ...
44.447761
19.671642
def hexdump_iter(logger, fd, width=16, skip=True, hexii=False, begin=0, highlight=None): r""" Return a hexdump-dump of a string as a generator of lines. Unless you have massive amounts of data you probably want to use :meth:`hexdump`. Arguments: logger(FastLogger): Logger object fd(file): File object to dump. Use :meth:`StringIO.StringIO` or :meth:`hexdump` to dump a string. width(int): The number of characters per line skip(bool): Set to True, if repeated lines should be replaced by a "*" hexii(bool): Set to True, if a hexii-dump should be returned instead of a hexdump. begin(int): Offset of the first byte to print in the left column highlight(iterable): Byte values to highlight. Returns: A generator producing the hexdump-dump one line at a time. """ style = logger.style.hexdump.copy() highlight = highlight or [] for b in highlight: if isinstance(b, str): b = ord(b) style['%02x' % b] = style['highlight'] _style = style skipping = False lines = [] last_unique = '' byte_width = len('00 ') column_sep = ' ' line_fmt = '%%(offset)08x %%(hexbytes)-%is │%%(printable)s│' % (len(column_sep)+(width*byte_width)) spacer = ' ' marker = (style.get('marker') or (lambda s:s))('│') if hexii: column_sep = '' line_fmt = '%%(offset)08x %%(hexbytes)-%is│' % (len(column_sep)+(width*byte_width)) else: def style_byte(b): hbyte = '%02x' % ord(b) abyte = b if isprint(b) else '·' if hbyte in style: st = style[hbyte] elif isprint(b): st = style.get('printable') else: st = style.get('nonprintable') if st: hbyte = st(hbyte) abyte = st(abyte) return hbyte, abyte cache = [style_byte(chr(b)) for b in range(256)] numb = 0 while True: offset = begin + numb # If a tube is passed in as fd, it will raise EOFError when it runs # out of data, unlike a file or StringIO object, which return an empty # string. try: chunk = fd.read(width) except EOFError: chunk = '' # We have run out of data, exit the loop if chunk == '': break # Advance the cursor by the number of bytes we actually read numb += len(chunk) # If this chunk is the same as the last unique chunk, # use a '*' instead. if skip and last_unique: same_as_last_line = (last_unique == chunk) lines_are_sequential = False last_unique = chunk if same_as_last_line or lines_are_sequential: # If we have not already printed a "*", do so if not skipping: yield '*' skipping = True # Move on to the next chunk continue # Chunk is unique, no longer skipping skipping = False last_unique = chunk # Generate contents for line hexbytes = '' printable = '' for i, b in enumerate(chunk): if not hexii: hbyte, abyte = cache[ord(b)] else: hbyte, abyte = _hexiichar(b), '' if i % 4 == 3 and i < width - 1: hbyte += spacer abyte += marker hexbytes += hbyte + ' ' printable += abyte if i + 1 < width: delta = width - i - 1 # How many hex-bytes would we have printed? count = byte_width * delta # How many dividers do we need to fill out the line? dividers_per_line = (width // 4) - (1 if width % 4 == 0 else 0) dividers_printed = (i // 4) + (1 if i % 4 == 3 else 0) count += dividers_per_line - dividers_printed hexbytes += ' ' * count # Python2 -> 3 wew if isinstance(line_fmt, six.binary_type): line_fmt = line_fmt.decode('utf-8') if isinstance(offset, six.binary_type): offset = printable.decode('utf-8') line = line_fmt % {'offset': offset, 'hexbytes': hexbytes, 'printable': printable} yield line line = "%08x" % (begin + numb) yield line
[ "def", "hexdump_iter", "(", "logger", ",", "fd", ",", "width", "=", "16", ",", "skip", "=", "True", ",", "hexii", "=", "False", ",", "begin", "=", "0", ",", "highlight", "=", "None", ")", ":", "style", "=", "logger", ".", "style", ".", "hexdump", ...
32.111111
21.303704
def banlist(self, channel): """ Get the channel banlist. Required arguments: * channel - Channel of which to get the banlist for. """ with self.lock: self.is_in_channel(channel) self.send('MODE %s b' % channel) bans = [] while self.readable(): msg = self._recv(expected_replies=('367', '368')) if msg[0] == '367': banmask, who, timestamp = msg[2].split()[1:] bans.append((self._from_(banmask), who, \ self._m_time.localtime(int(timestamp)))) elif msg[0] == '368': break return bans
[ "def", "banlist", "(", "self", ",", "channel", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "is_in_channel", "(", "channel", ")", "self", ".", "send", "(", "'MODE %s b'", "%", "channel", ")", "bans", "=", "[", "]", "while", "self", ".", ...
33.904762
15.428571