text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def show(only_path=False): """Show the current config.""" logger.setLevel(logging.INFO) infos = ["\n", f'Instance path: "{current_app.instance_path}"'] logger.info("\n ".join(infos)) if not only_path: log_config(current_app.config)
[ "def", "show", "(", "only_path", "=", "False", ")", ":", "logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "infos", "=", "[", "\"\\n\"", ",", "f'Instance path: \"{current_app.instance_path}\"'", "]", "logger", ".", "info", "(", "\"\\n \"", ".", "...
28.222222
17.111111
def find_numeration(docbody, title): """Find numeration pattern 1st try to find numeration in the title e.g. References [4] Riotto... 2nd find the numeration alone in the line after the title e.g. References 1 Riotto 3rnd find the numeration in the following line e.g. References [1] Riotto """ ref_details, found_title = find_numeration_in_title(docbody, title) if not ref_details: ref_details, found_title = find_numeration_in_body(docbody) return ref_details, found_title
[ "def", "find_numeration", "(", "docbody", ",", "title", ")", ":", "ref_details", ",", "found_title", "=", "find_numeration_in_title", "(", "docbody", ",", "title", ")", "if", "not", "ref_details", ":", "ref_details", ",", "found_title", "=", "find_numeration_in_bo...
23.130435
23
def _find_links(k_vec, sfx, rsfx, k): """Find sfx/rsfx recursively.""" k_vec.sort() if 0 in k_vec: return k_vec else: if sfx[k] not in k_vec: k_vec.append(sfx[k]) for i in range(len(rsfx[k])): if rsfx[k][i] not in k_vec: k_vec.append(rsfx[k][i]) for i in range(len(k_vec)): k_vec = _find_links(k_vec, sfx, rsfx, k_vec[i]) if 0 in k_vec: break return k_vec
[ "def", "_find_links", "(", "k_vec", ",", "sfx", ",", "rsfx", ",", "k", ")", ":", "k_vec", ".", "sort", "(", ")", "if", "0", "in", "k_vec", ":", "return", "k_vec", "else", ":", "if", "sfx", "[", "k", "]", "not", "in", "k_vec", ":", "k_vec", ".",...
29.75
12.375
def check_file_for_tabs(filename, verbose=True): """identifies if the file contains tabs and returns True if it does. It also prints the location of the lines and columns. If verbose is set to False, the location is not printed. :param verbose: if true prints information about issues :param filename: the filename :rtype: True if there are tabs in the file """ file_contains_tabs = False with open(filename) as f: lines = f.read().split("\n") line_no = 1 for line in lines: if "\t" in line: file_contains_tabs = True location = [ i for i in range(len(line)) if line.startswith('\t', i)] if verbose: Console.error("Tab found in line {} and column(s) {}" .format(line_no, str(location).replace("[", "").replace( "]", "")), traceflag=False) line_no += 1 return file_contains_tabs
[ "def", "check_file_for_tabs", "(", "filename", ",", "verbose", "=", "True", ")", ":", "file_contains_tabs", "=", "False", "with", "open", "(", "filename", ")", "as", "f", ":", "lines", "=", "f", ".", "read", "(", ")", ".", "split", "(", "\"\\n\"", ")",...
38.259259
16.148148
def generateLatticeFile(self, beamline, filename=None, format='elegant'): """ generate simulation files for lattice analysis, e.g. ".lte" for elegant, ".madx" for madx input parameters: :param beamline: keyword for beamline :param filename: name of lte/mad file, if None, output to stdout; if 'sio', output to a string as return value; other cases, output to filename; :param format: madx, elegant, 'elegant' by default, generated lattice is for elegant tracking """ """ if not self.isBeamline(beamline): print("%s is a valid defined beamline, do not process." % (beamline)) return False """ if filename is None: f = sys.stdout elif filename == 'sio': f = StringIO() else: f = open(os.path.expanduser(filename), 'w') # write filehead, mainly resolving prefix string lines cl1 = "This file is automatically generated by 'generateLatticeFile()' method," cl2 = 'could be used as ' + format + ' lattice file.' cl3 = 'Author: Tong Zhang (zhangtong@sinap.ac.cn)' cl4 = 'Generated Date: ' + time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime()) f.write('!{str1:<73s}!\n'.format(str1='-' * 73)) f.write('!{str1:^73s}!\n'.format(str1=cl1)) f.write('!{str1:^73s}!\n'.format(str1=cl2)) f.write('!{str1:^73s}!\n'.format(str1='-' * 24)) f.write('!{str1:^73s}!\n'.format(str1=cl3)) f.write('!{str1:^73s}!\n'.format(str1=cl4)) f.write('!{str1:<73s}!\n'.format(str1='-' * 73)) f.write('\n') """ do not need to dump stoed variables now, 2016-03-21 # write global variables f.write('! {str1:<73s}\n'.format(str1= 'Global variable definitions:')) f.write('\n'.join(self.all_elements['_prefixstr'])) f.write('\n') f.write('\n') """ # write EPICS control configuration part if contains '_epics' key if '_epics' in self.all_elements: f.write('! {str1:<73s}\n'.format(str1='EPICS control definitions:')) for k, v in self.all_elements['_epics'].items(): f.write('!!epics {k:<10s}:{v:>50s}\n'.format(k=k, v=json.dumps(v))) f.write('\n') # write element definitions and lattice f.write('! {str1:<72s}\n'.format(str1='Element definitions:')) elelist = self.getFullBeamline(beamline, extend=True) if self.getElementType(elelist[0]) != 'CHARGE': elelist.insert(0, self.getChargeElement()) for ele in sorted(set(elelist)): elestring = self.rinseElement(ele)['name'] f.write(self.formatElement(elestring, format='elegant') + '\n') # write beamline lattice definition f.write('\n') f.write('! {str1:<72s}\n'.format(str1='Beamline definitions:')) f.write('{bl:<10s}: line = ({lattice})'.format(bl=beamline.upper(), lattice=', '.join(elelist))) if filename == 'sio': retval = f.getvalue() else: retval = True f.close() # if everything's OK, return True or string ('sio') mode return retval
[ "def", "generateLatticeFile", "(", "self", ",", "beamline", ",", "filename", "=", "None", ",", "format", "=", "'elegant'", ")", ":", "\"\"\"\n if not self.isBeamline(beamline):\n print(\"%s is a valid defined beamline, do not process.\" % (beamline))\n re...
41.670886
20.43038
def step_it_should_fail_with(context): ''' EXAMPLE: ... when I run "behave ..." then it should fail with: """ TEXT """ ''' assert context.text is not None, "ENSURE: multiline text is provided." step_command_output_should_contain(context) assert_that(context.command_result.returncode, is_not(equal_to(0)))
[ "def", "step_it_should_fail_with", "(", "context", ")", ":", "assert", "context", ".", "text", "is", "not", "None", ",", "\"ENSURE: multiline text is provided.\"", "step_command_output_should_contain", "(", "context", ")", "assert_that", "(", "context", ".", "command_re...
29
18.230769
def teardown(file): # pylint:disable=redefined-builtin """Teardown a polyaxon deployment given a config file.""" config = read_deployment_config(file) manager = DeployManager(config=config, filepath=file) exception = None try: if click.confirm('Would you like to execute pre-delete hooks?', default=True): manager.teardown(hooks=True) else: manager.teardown(hooks=False) except Exception as e: Printer.print_error('Polyaxon could not teardown the deployment.') exception = e if exception: Printer.print_error('Error message `{}`.'.format(exception))
[ "def", "teardown", "(", "file", ")", ":", "# pylint:disable=redefined-builtin", "config", "=", "read_deployment_config", "(", "file", ")", "manager", "=", "DeployManager", "(", "config", "=", "config", ",", "filepath", "=", "file", ")", "exception", "=", "None",...
39.25
19.8125
def _GetUnsortedNotifications(self, queue_shard, notifications_by_session_id=None): """Returns all the available notifications for a queue_shard. Args: queue_shard: urn of queue shard notifications_by_session_id: store notifications in this dict rather than creating a new one Returns: dict of notifications. keys are session ids. """ if notifications_by_session_id is None: notifications_by_session_id = {} end_time = self.frozen_timestamp or rdfvalue.RDFDatetime.Now() for notification in self.data_store.GetNotifications(queue_shard, end_time): existing = notifications_by_session_id.get(notification.session_id) if existing: # If we have a notification for this session_id already, we only store # the one that was scheduled last. if notification.first_queued > existing.first_queued: notifications_by_session_id[notification.session_id] = notification elif notification.first_queued == existing.first_queued and ( notification.last_status > existing.last_status): # Multiple notifications with the same timestamp should not happen. # We can still do the correct thing and use the latest one. logging.warning( "Notifications with equal first_queued fields detected: %s %s", notification, existing) notifications_by_session_id[notification.session_id] = notification else: notifications_by_session_id[notification.session_id] = notification return notifications_by_session_id
[ "def", "_GetUnsortedNotifications", "(", "self", ",", "queue_shard", ",", "notifications_by_session_id", "=", "None", ")", ":", "if", "notifications_by_session_id", "is", "None", ":", "notifications_by_session_id", "=", "{", "}", "end_time", "=", "self", ".", "froze...
45.083333
22.694444
def iterSourceCode(paths): """ Iterate over all Python source files in C{paths}. @param paths: A list of paths. Directories will be recursed into and any .py files found will be yielded. Any non-directories will be yielded as-is. """ for path in paths: if os.path.isdir(path): for dirpath, dirnames, filenames in os.walk(path): for filename in filenames: if filename.endswith('.py'): yield os.path.join(dirpath, filename) else: yield path
[ "def", "iterSourceCode", "(", "paths", ")", ":", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "path", ")", ":", ...
35.125
17.25
def get_golden_topics(self, lang): """Return the topics mastered ("golden") by a user in a language.""" return [topic['title'] for topic in self.user_data.language_data[lang]['skills'] if topic['learned'] and topic['strength'] == 1.0]
[ "def", "get_golden_topics", "(", "self", ",", "lang", ")", ":", "return", "[", "topic", "[", "'title'", "]", "for", "topic", "in", "self", ".", "user_data", ".", "language_data", "[", "lang", "]", "[", "'skills'", "]", "if", "topic", "[", "'learned'", ...
55.6
14.8
def _check_pong(self): """Checks if a Pong message was received. :return: """ self.pong_timer.cancel() if self.pong_received: self.log.debug("_check_pong(): Pong received in time.") self.pong_received = False else: # reconnect self.log.debug("_check_pong(): Pong not received in time." "Issuing reconnect..") self.reconnect()
[ "def", "_check_pong", "(", "self", ")", ":", "self", ".", "pong_timer", ".", "cancel", "(", ")", "if", "self", ".", "pong_received", ":", "self", ".", "log", ".", "debug", "(", "\"_check_pong(): Pong received in time.\"", ")", "self", ".", "pong_received", "...
32
16
def ParseFileObject(self, parser_mediator, file_object): """Parses a Java WebStart Cache IDX file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dvfvs.FileIO): a file-like object to parse. Raises: UnableToParseFile: when the file cannot be parsed. """ file_header_map = self._GetDataTypeMap('java_idx_file_header') try: file_header, file_offset = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse file header with error: {0!s}'.format( exception)) if not file_header.format_version in self._SUPPORTED_FORMAT_VERSIONS: raise errors.UnableToParseFile('Unsupported format version.') if file_header.format_version == 602: section1_map = self._GetDataTypeMap('java_idx_602_section1') elif file_header.format_version in (603, 604): section1_map = self._GetDataTypeMap('java_idx_603_section1') elif file_header.format_version == 605: section1_map = self._GetDataTypeMap('java_idx_605_section1') try: section1, data_size = self._ReadStructureFromFileObject( file_object, file_offset, section1_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile(( 'Unable to parse section 1 (format version: {0:d}) with error: ' '{1!s}').format(file_header.format_version, exception)) file_offset += data_size if file_header.format_version == 602: section2_map = self._GetDataTypeMap('java_idx_602_section2') elif file_header.format_version in (603, 604, 605): file_offset = 128 section2_map = self._GetDataTypeMap('java_idx_603_section2') try: section2, data_size = self._ReadStructureFromFileObject( file_object, file_offset, section2_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile(( 'Unable to parse section 2 (format version: {0:d}) with error: ' '{1!s}').format(file_header.format_version, exception)) file_offset += data_size if not section2.url: raise errors.UnableToParseFile('URL not found in file.') date_http_header = None for _ in range(section2.number_of_http_headers): http_header_map = self._GetDataTypeMap('java_idx_http_header') try: http_header, data_size = self._ReadStructureFromFileObject( file_object, file_offset, http_header_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'Unable to parse HTTP header value at offset: 0x{0:08x}'.format( file_offset)) break file_offset += data_size if http_header.name == 'date': date_http_header = http_header break event_data = JavaIDXEventData() event_data.idx_version = file_header.format_version event_data.ip_address = getattr(section2, 'ip_address', None) event_data.url = section2.url date_time = dfdatetime_java_time.JavaTime( timestamp=section1.modification_time) # TODO: Move the timestamp description into definitions. event = time_events.DateTimeValuesEvent(date_time, 'File Hosted Date') parser_mediator.ProduceEventWithEventData(event, event_data) if section1.expiration_time: date_time = dfdatetime_java_time.JavaTime( timestamp=section1.expiration_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) if date_http_header: # A HTTP header date and string "should" be in UTC or have an associated # time zone information in the string itself. If that is not the case # then there is no reliable method for plaso to determine the proper # time zone, so the assumption is that it is UTC. try: download_date = timelib.Timestamp.FromTimeString( date_http_header.value, gmt_as_timezone=False) except errors.TimestampError: parser_mediator.ProduceExtractionWarning( 'Unable to parse date HTTP header value: {0:s}'.format( date_http_header.value)) if download_date: event = time_events.TimestampEvent( download_date, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED) parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "ParseFileObject", "(", "self", ",", "parser_mediator", ",", "file_object", ")", ":", "file_header_map", "=", "self", ".", "_GetDataTypeMap", "(", "'java_idx_file_header'", ")", "try", ":", "file_header", ",", "file_offset", "=", "self", ".", "_ReadStructur...
40.274336
21.778761
def setCredentialValues(self, username=None, password=None, public_key=None, private_key=None, new=False): """Set the values in disk.0.os.credentials.*.""" credentials_base = "disk.0.os.credentials." if new: credentials_base = "disk.0.os.credentials.new." if username: self.setValue(credentials_base + "username", username) if password: self.setValue(credentials_base + "password", password) if public_key: self.setValue(credentials_base + "public_key", public_key) if private_key: self.setValue(credentials_base + "private_key", private_key)
[ "def", "setCredentialValues", "(", "self", ",", "username", "=", "None", ",", "password", "=", "None", ",", "public_key", "=", "None", ",", "private_key", "=", "None", ",", "new", "=", "False", ")", ":", "credentials_base", "=", "\"disk.0.os.credentials.\"", ...
43.066667
26
def split_elements(value): """Split a string with comma or space-separated elements into a list.""" l = [v.strip() for v in value.split(',')] if len(l) == 1: l = value.split() return l
[ "def", "split_elements", "(", "value", ")", ":", "l", "=", "[", "v", ".", "strip", "(", ")", "for", "v", "in", "value", ".", "split", "(", "','", ")", "]", "if", "len", "(", "l", ")", "==", "1", ":", "l", "=", "value", ".", "split", "(", ")...
33.833333
13.833333
def delete(self): """ Deletes the space """ return self._client._delete( self.__class__.base_url( self.sys['id'] ) )
[ "def", "delete", "(", "self", ")", ":", "return", "self", ".", "_client", ".", "_delete", "(", "self", ".", "__class__", ".", "base_url", "(", "self", ".", "sys", "[", "'id'", "]", ")", ")" ]
18.8
15.4
def config_control(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument ''' Will check if the configuration was changed. If differences found, will try to commit. In case commit unsuccessful, will try to rollback. :return: A tuple with a boolean that specifies if the config was changed/committed/rollbacked on the device.\ And a string that provides more details of the reason why the configuration was not committed properly. CLI Example: .. code-block:: bash salt '*' net.config_control ''' result = True comment = '' changed, not_changed_rsn = config_changed(inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable if not changed: return (changed, not_changed_rsn) # config changed, thus let's try to commit try_commit = commit() if not try_commit.get('result'): result = False comment = 'Unable to commit the changes: {reason}.\n\ Will try to rollback now!'.format( reason=try_commit.get('comment') ) try_rollback = rollback() if not try_rollback.get('result'): comment += '\nCannot rollback! {reason}'.format( reason=try_rollback.get('comment') ) return result, comment
[ "def", "config_control", "(", "inherit_napalm_device", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "result", "=", "True", "comment", "=", "''", "changed", ",", "not_changed_rsn", "=", "config_changed", "(", "inherit_napalm_de...
32.564103
26.153846
def exit_after(s): """ Use as decorator to exit process if function takes longer than s seconds. Direct call is available via exit_after(TIMEOUT_IN_S)(fce)(args). Inspired by https://stackoverflow.com/a/31667005 """ def outer(fn): def inner(*args, **kwargs): timer = threading.Timer(s, thread.interrupt_main) timer.start() try: result = fn(*args, **kwargs) except KeyboardInterrupt: raise TimeoutError("Function '{}' hit the timeout ({}s).".format(fn.__name__, s)) finally: timer.cancel() return result return inner return outer
[ "def", "exit_after", "(", "s", ")", ":", "def", "outer", "(", "fn", ")", ":", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "timer", "=", "threading", ".", "Timer", "(", "s", ",", "thread", ".", "interrupt_main", ")", "time...
27.08
20.2
def gene_by_protein_id(self, protein_id): """ Get the gene ID associated with the given protein ID, return its Gene object """ gene_id = self.gene_id_of_protein_id(protein_id) return self.gene_by_id(gene_id)
[ "def", "gene_by_protein_id", "(", "self", ",", "protein_id", ")", ":", "gene_id", "=", "self", ".", "gene_id_of_protein_id", "(", "protein_id", ")", "return", "self", ".", "gene_by_id", "(", "gene_id", ")" ]
35.571429
7
def _generate_features(self, feature_extractors): """Run all FeatureExtractors and record results in a key-value format. :param feature_extractors: iterable of `FeatureExtractor` objects. """ results = [pd.DataFrame()] n_ext = len(feature_extractors) for i, extractor in enumerate(feature_extractors): log.info("generating: '%s' (%d/%d)", extractor.name, i + 1, n_ext) cached_extractor = self._cache[extractor.name] if extractor.same(cached_extractor): log.info('pulling from cache') extractor = cached_extractor else: log.info('running...') extractor.extract() results.append(extractor.result) if self.cache_path: self._cache[extractor.name] = extractor if self.cache_path: with open(self.cache_path, 'wb') as f: pickle.dump(self._cache, f) return pd.concat(results, axis=1)
[ "def", "_generate_features", "(", "self", ",", "feature_extractors", ")", ":", "results", "=", "[", "pd", ".", "DataFrame", "(", ")", "]", "n_ext", "=", "len", "(", "feature_extractors", ")", "for", "i", ",", "extractor", "in", "enumerate", "(", "feature_e...
38.384615
14.884615
def duplicate(self): ''' Returns a copy of the current group, including its lines. @returns: Group ''' return self.__class__(amount=self.amount, date=self.date, method=self.method, ref=self.ref)
[ "def", "duplicate", "(", "self", ")", ":", "return", "self", ".", "__class__", "(", "amount", "=", "self", ".", "amount", ",", "date", "=", "self", ".", "date", ",", "method", "=", "self", ".", "method", ",", "ref", "=", "self", ".", "ref", ")" ]
36.857143
24
def _concat_datetime(to_concat, axis=0, typs=None): """ provide concatenation of an datetimelike array of arrays each of which is a single M8[ns], datetimet64[ns, tz] or m8[ns] dtype Parameters ---------- to_concat : array of arrays axis : axis to provide concatenation typs : set of to_concat dtypes Returns ------- a single array, preserving the combined dtypes """ if typs is None: typs = get_dtype_kinds(to_concat) # multiple types, need to coerce to object if len(typs) != 1: return _concatenate_2d([_convert_datetimelike_to_object(x) for x in to_concat], axis=axis) # must be single dtype if any(typ.startswith('datetime') for typ in typs): if 'datetime' in typs: to_concat = [x.astype(np.int64, copy=False) for x in to_concat] return _concatenate_2d(to_concat, axis=axis).view(_NS_DTYPE) else: # when to_concat has different tz, len(typs) > 1. # thus no need to care return _concat_datetimetz(to_concat) elif 'timedelta' in typs: return _concatenate_2d([x.view(np.int64) for x in to_concat], axis=axis).view(_TD_DTYPE) elif any(typ.startswith('period') for typ in typs): assert len(typs) == 1 cls = to_concat[0] new_values = cls._concat_same_type(to_concat) return new_values
[ "def", "_concat_datetime", "(", "to_concat", ",", "axis", "=", "0", ",", "typs", "=", "None", ")", ":", "if", "typs", "is", "None", ":", "typs", "=", "get_dtype_kinds", "(", "to_concat", ")", "# multiple types, need to coerce to object", "if", "len", "(", "t...
32.177778
19.911111
def task_loop(tasks, execute, wait=None, store=TaskStore()): """ The inner task loop for a task runner. execute: A function that runs a task. It should take a task as its sole argument, and may optionally return a TaskResult. wait: (optional, None) A function to run whenever there aren't any runnable tasks (but there are still tasks listed as running). If given, this function should take no arguments, and should return an iterable of TaskResults. """ completed = set() failed = set() exceptions = [] def collect(task): args = [] kwargs = {} for arg in task.args: if isinstance(arg, Task): args.append(store.get(arg.name)) else: args.append(arg) for key in task.kwargs: if isinstance(task.kwargs[key], Task): kwargs[key] = store.get(task.kwargs[key].name) else: kwargs[key] = task.kwargs[key] return args, kwargs def complete(scheduler, result): store.put(result.name, result.data) scheduler.end_task(result.name, result.successful) if result.exception: exceptions.append(result.exception) with Scheduler(tasks, completed=completed, failed=failed) as scheduler: while not scheduler.is_finished(): task = scheduler.start_task() while task is not None: # Collect any dependent results args, kwargs = collect(task) func = partial(task.function, *args, **kwargs) if task.handler: func = partial(task.handler, func) result = execute(func, task.name) # result exists iff execute is synchroous if result: complete(scheduler, result) task = scheduler.start_task() if wait: for result in wait(): complete(scheduler, result) # TODO: if in debug mode print out all failed tasks? return Results(completed, failed, exceptions)
[ "def", "task_loop", "(", "tasks", ",", "execute", ",", "wait", "=", "None", ",", "store", "=", "TaskStore", "(", ")", ")", ":", "completed", "=", "set", "(", ")", "failed", "=", "set", "(", ")", "exceptions", "=", "[", "]", "def", "collect", "(", ...
32.59375
18.625
def get(self, q, limit=None): """ Performs a search against the predict endpoint :param q: query to be searched for [STRING] :return: { score: [0|1] } """ uri = '{}/predict?q={}'.format(self.client.remote, q) self.logger.debug(uri) body = self.client.get(uri) return body['score']
[ "def", "get", "(", "self", ",", "q", ",", "limit", "=", "None", ")", ":", "uri", "=", "'{}/predict?q={}'", ".", "format", "(", "self", ".", "client", ".", "remote", ",", "q", ")", "self", ".", "logger", ".", "debug", "(", "uri", ")", "body", "=",...
28.583333
14.25
def pipeline(self): """Returns :class:`Pipeline` object to execute bulk of commands. It is provided for convenience. Commands can be pipelined without it. Example: >>> pipe = redis.pipeline() >>> fut1 = pipe.incr('foo') # NO `await` as it will block forever! >>> fut2 = pipe.incr('bar') >>> result = await pipe.execute() >>> result [1, 1] >>> await asyncio.gather(fut1, fut2) [1, 1] >>> # >>> # The same can be done without pipeline: >>> # >>> fut1 = redis.incr('foo') # the 'INCRY foo' command already sent >>> fut2 = redis.incr('bar') >>> await asyncio.gather(fut1, fut2) [2, 2] """ return Pipeline(self._pool_or_conn, self.__class__, loop=self._pool_or_conn._loop)
[ "def", "pipeline", "(", "self", ")", ":", "return", "Pipeline", "(", "self", ".", "_pool_or_conn", ",", "self", ".", "__class__", ",", "loop", "=", "self", ".", "_pool_or_conn", ".", "_loop", ")" ]
32.307692
17.884615
def get_service( service_name, inactive=False, completed=False ): """ Get a dictionary describing a service :param service_name: the service name :type service_name: str :param inactive: whether to include inactive services :type inactive: bool :param completed: whether to include completed services :type completed: bool :return: a dict describing a service :rtype: dict, or None """ services = mesos.get_master().frameworks(inactive=inactive, completed=completed) for service in services: if service['name'] == service_name: return service return None
[ "def", "get_service", "(", "service_name", ",", "inactive", "=", "False", ",", "completed", "=", "False", ")", ":", "services", "=", "mesos", ".", "get_master", "(", ")", ".", "frameworks", "(", "inactive", "=", "inactive", ",", "completed", "=", "complete...
27.625
19.541667
def _normalize_key(value): """Return a key from an entity, model instance, key, or key string.""" if ndb is not None and isinstance(value, (ndb.Model, ndb.Key)): return None if getattr(value, "key", None): return value.key() elif isinstance(value, basestring): return datastore.Key(value) else: return value
[ "def", "_normalize_key", "(", "value", ")", ":", "if", "ndb", "is", "not", "None", "and", "isinstance", "(", "value", ",", "(", "ndb", ".", "Model", ",", "ndb", ".", "Key", ")", ")", ":", "return", "None", "if", "getattr", "(", "value", ",", "\"key...
32.4
15.8
def _parse_interval(value): ''' Convert an interval string like 1w3d6h into the number of seconds, time resolution (1 unit of the smallest specified time unit) and the modifier( '+', '-', or ''). w = week d = day h = hour m = minute s = second ''' match = _INTERVAL_REGEX.match(six.text_type(value)) if match is None: raise ValueError('invalid time interval: \'{0}\''.format(value)) result = 0 resolution = None for name, multiplier in [('second', 1), ('minute', 60), ('hour', 60 * 60), ('day', 60 * 60 * 24), ('week', 60 * 60 * 24 * 7)]: if match.group(name) is not None: result += float(match.group(name)) * multiplier if resolution is None: resolution = multiplier return result, resolution, match.group('modifier')
[ "def", "_parse_interval", "(", "value", ")", ":", "match", "=", "_INTERVAL_REGEX", ".", "match", "(", "six", ".", "text_type", "(", "value", ")", ")", "if", "match", "is", "None", ":", "raise", "ValueError", "(", "'invalid time interval: \\'{0}\\''", ".", "f...
33.892857
20.035714
def resettable_cached_property(func): """Decorator to add cached computed properties to an object. Similar to Django's `cached_property` decorator, except stores all the data under a single well-known key so that it can easily be blown away. """ def wrapper(self): if not hasattr(self, '_resettable_cached_properties'): self._resettable_cached_properties = {} if func.__name__ not in self._resettable_cached_properties: self._resettable_cached_properties[func.__name__] = func(self) return self._resettable_cached_properties[func.__name__] # Returns a property whose getter is the 'wrapper' function return property(wrapper)
[ "def", "resettable_cached_property", "(", "func", ")", ":", "def", "wrapper", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_resettable_cached_properties'", ")", ":", "self", ".", "_resettable_cached_properties", "=", "{", "}", "if", "func...
43.1875
20.625
def complete(self): """ When *local_workflow_require_branches* of the task was set to *True*, returns whether the :py:meth:`run` method has been called before. Otherwise, the call is forwarded to the super class. """ if self.task.local_workflow_require_branches: return self._has_run else: return super(LocalWorkflowProxy, self).complete()
[ "def", "complete", "(", "self", ")", ":", "if", "self", ".", "task", ".", "local_workflow_require_branches", ":", "return", "self", ".", "_has_run", "else", ":", "return", "super", "(", "LocalWorkflowProxy", ",", "self", ")", ".", "complete", "(", ")" ]
41
23.2
def BDEVolumeOpen(bde_volume, path_spec, file_object, key_chain): """Opens the BDE volume using the path specification. Args: bde_volume (pybde.volume): BDE volume. path_spec (PathSpec): path specification. file_object (FileIO): file-like object. key_chain (KeyChain): key chain. """ password = key_chain.GetCredential(path_spec, 'password') if password: bde_volume.set_password(password) recovery_password = key_chain.GetCredential(path_spec, 'recovery_password') if recovery_password: bde_volume.set_recovery_password(recovery_password) startup_key = key_chain.GetCredential(path_spec, 'startup_key') if startup_key: bde_volume.read_startup_key(startup_key) bde_volume.open_file_object(file_object)
[ "def", "BDEVolumeOpen", "(", "bde_volume", ",", "path_spec", ",", "file_object", ",", "key_chain", ")", ":", "password", "=", "key_chain", ".", "GetCredential", "(", "path_spec", ",", "'password'", ")", "if", "password", ":", "bde_volume", ".", "set_password", ...
33.227273
18.318182
def TR(self,**kwargs): #pragma: no cover """ NAME: TR PURPOSE: Calculate the radial period for a power-law rotation curve INPUT: scipy.integrate.quadrature keywords OUTPUT: T_R(R,vT,vT)*vc/ro + estimate of the error HISTORY: 2010-12-01 - Written - Bovy (NYU) """ if hasattr(self,'_TR'): return self._TR (rperi,rap)= self.calcRapRperi(**kwargs) if nu.fabs(rap-rperi)/rap < 10.**-4.: #Rough limit self._TR= 2.*m.pi/epifreq(self._pot,self._R,use_physical=False) return self._TR Rmean= m.exp((m.log(rperi)+m.log(rap))/2.) EL= self.calcEL(**kwargs) E, L= EL TR= 0. if Rmean > rperi: TR+= integrate.quadrature(_TRAxiIntegrandSmall, 0.,m.sqrt(Rmean-rperi), args=(E,L,self._pot,rperi), **kwargs)[0] if Rmean < rap: TR+= integrate.quadrature(_TRAxiIntegrandLarge, 0.,m.sqrt(rap-Rmean), args=(E,L,self._pot,rap), **kwargs)[0] self._TR= 2.*TR return self._TR
[ "def", "TR", "(", "self", ",", "*", "*", "kwargs", ")", ":", "#pragma: no cover", "if", "hasattr", "(", "self", ",", "'_TR'", ")", ":", "return", "self", ".", "_TR", "(", "rperi", ",", "rap", ")", "=", "self", ".", "calcRapRperi", "(", "*", "*", ...
37.142857
16.571429
def _combine_attr_fast_update(self, attr, typ): '''Avoids having to call _update for each intermediate base. Only works for class attr of type UpdateDict. ''' values = dict(getattr(self, attr, {})) for base in self._class_data.bases: vals = dict(getattr(base, attr, {})) preserve_attr_data(vals, values) values = combine(vals, values) setattr(self, attr, typ(values))
[ "def", "_combine_attr_fast_update", "(", "self", ",", "attr", ",", "typ", ")", ":", "values", "=", "dict", "(", "getattr", "(", "self", ",", "attr", ",", "{", "}", ")", ")", "for", "base", "in", "self", ".", "_class_data", ".", "bases", ":", "vals", ...
35.615385
15.461538
def push_notification_devices_destroy_many(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/push_notification_devices#bulk-unregister-push-notification-devices" api_path = "/api/v2/push_notification_devices/destroy_many.json" return self.call(api_path, method="POST", data=data, **kwargs)
[ "def", "push_notification_devices_destroy_many", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/push_notification_devices/destroy_many.json\"", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"POST\"", ...
83.25
43.25
def compact_hdf5_file(filename, name=None, index=None, keep_backup=True): """Can compress an HDF5 to reduce file size. The properties on how to compress the new file are taken from a given trajectory in the file. Simply calls ``ptrepack`` from the command line. (Se also https://pytables.github.io/usersguide/utilities.html#ptrepackdescr) Currently only supported under Linux, no guarantee for Windows usage. :param filename: Name of the file to compact :param name: The name of the trajectory from which the compression properties are taken :param index: Instead of a name you could also specify an index, i.e -1 for the last trajectory in the file. :param keep_backup: If a back up version of the original file should be kept. The backup file is named as the original but `_backup` is appended to the end. :return: The return/error code of ptrepack """ if name is None and index is None: index = -1 tmp_traj = load_trajectory(name, index, as_new=False, load_all=pypetconstants.LOAD_NOTHING, force=True, filename=filename) service = tmp_traj.v_storage_service complevel = service.complevel complib = service.complib shuffle = service.shuffle fletcher32 = service.fletcher32 name_wo_ext, ext = os.path.splitext(filename) tmp_filename = name_wo_ext + '_tmp' + ext abs_filename = os.path.abspath(filename) abs_tmp_filename = os.path.abspath(tmp_filename) command = ['ptrepack', '-v', '--complib', complib, '--complevel', str(complevel), '--shuffle', str(int(shuffle)), '--fletcher32', str(int(fletcher32)), abs_filename, abs_tmp_filename] str_command = ' '.join(command) print('Executing command `%s`' % str_command) retcode = subprocess.call(command) if retcode != 0: print('#### ERROR: Compacting `%s` failed with errorcode %s! ####' % (filename, str(retcode))) else: print('#### Compacting successful ####') print('Renaming files') if keep_backup: backup_file_name = name_wo_ext + '_backup' + ext os.rename(filename, backup_file_name) else: os.remove(filename) os.rename(tmp_filename, filename) print('### Compacting and Renaming finished ####') return retcode
[ "def", "compact_hdf5_file", "(", "filename", ",", "name", "=", "None", ",", "index", "=", "None", ",", "keep_backup", "=", "True", ")", ":", "if", "name", "is", "None", "and", "index", "is", "None", ":", "index", "=", "-", "1", "tmp_traj", "=", "load...
32.026667
22.306667
def _recv_byte(self, byte): """ Non-printable filtering currently disabled because it did not play well with extended character sets. """ ## Filter out non-printing characters #if (byte >= ' ' and byte <= '~') or byte == '\n': if self.telnet_echo: self._echo_byte(byte) self.recv_buffer += byte
[ "def", "_recv_byte", "(", "self", ",", "byte", ")", ":", "## Filter out non-printing characters", "#if (byte >= ' ' and byte <= '~') or byte == '\\n':", "if", "self", ".", "telnet_echo", ":", "self", ".", "_echo_byte", "(", "byte", ")", "self", ".", "recv_buffer", "+=...
36.1
9.9
def inner_product(vec0: QubitVector, vec1: QubitVector) -> bk.BKTensor: """ Hilbert-Schmidt inner product between qubit vectors The tensor rank and qubits must match. """ if vec0.rank != vec1.rank or vec0.qubit_nb != vec1.qubit_nb: raise ValueError('Incompatibly vectors. Qubits and rank must match') vec1 = vec1.permute(vec0.qubits) # Make sure qubits in same order return bk.inner(vec0.tensor, vec1.tensor)
[ "def", "inner_product", "(", "vec0", ":", "QubitVector", ",", "vec1", ":", "QubitVector", ")", "->", "bk", ".", "BKTensor", ":", "if", "vec0", ".", "rank", "!=", "vec1", ".", "rank", "or", "vec0", ".", "qubit_nb", "!=", "vec1", ".", "qubit_nb", ":", ...
43.4
20.8
def main(self): """ Generates an output string by replacing the keywords in the format string with the corresponding values from a submission dictionary. """ self.manage_submissions() out_string = self.options['format'] # Pop until we get something which len(title) <= max-chars length = float('inf') while length > self.options['max_chars']: self.selected_submission = self.submissions.pop() length = len(self.selected_submission['title']) for k, v in self.selected_submission.items(): out_string = out_string.replace(k, self.h.unescape(str(v))) return self.output(out_string, out_string)
[ "def", "main", "(", "self", ")", ":", "self", ".", "manage_submissions", "(", ")", "out_string", "=", "self", ".", "options", "[", "'format'", "]", "# Pop until we get something which len(title) <= max-chars", "length", "=", "float", "(", "'inf'", ")", "while", ...
41.117647
19
def metadata_path(self, m_path): """Provide pointers to the paths of the metadata file Args: m_path: Path to metadata file """ if not m_path: self.metadata_dir = None self.metadata_file = None else: if not op.exists(m_path): raise OSError('{}: file does not exist!'.format(m_path)) if not op.dirname(m_path): self.metadata_dir = '.' else: self.metadata_dir = op.dirname(m_path) self.metadata_file = op.basename(m_path)
[ "def", "metadata_path", "(", "self", ",", "m_path", ")", ":", "if", "not", "m_path", ":", "self", ".", "metadata_dir", "=", "None", "self", ".", "metadata_file", "=", "None", "else", ":", "if", "not", "op", ".", "exists", "(", "m_path", ")", ":", "ra...
28.75
16.75
def parse_python(classifiers): """Parse out the versions of python supported a/c classifiers.""" prefix = 'Programming Language :: Python ::' python_classifiers = [c.split('::')[2].strip() for c in classifiers if c.startswith(prefix)] return ', '.join([c for c in python_classifiers if parse_version(c)])
[ "def", "parse_python", "(", "classifiers", ")", ":", "prefix", "=", "'Programming Language :: Python ::'", "python_classifiers", "=", "[", "c", ".", "split", "(", "'::'", ")", "[", "2", "]", ".", "strip", "(", ")", "for", "c", "in", "classifiers", "if", "c...
63.2
21.4
def ask(question, default_answer=False, default_answer_str="no"): """ Ask for user input. This asks a yes/no question with a preset default. You can bypass the user-input and fetch the default answer, if you set Args: question: The question to ask on stdout. default_answer: The default value to return. default_answer_str: The default answer string that we present to the user. Tests: >>> os.putenv("TEST", "yes"); ask("Test?", default_answer=True) True >>> os.putenv("TEST", "yes"); ask("Test?", default_answer=False) False """ response = default_answer def should_ignore_tty(): """ Check, if we want to ignore an opened tty result. """ ret_to_bool = {"yes": True, "no": False, "true": True, "false": False} envs = [os.getenv("CI", default="no"), os.getenv("TEST", default="no")] vals = [ret_to_bool[val] for val in envs if val in ret_to_bool] return any(vals) ignore_stdin_istty = should_ignore_tty() has_tty = sys.stdin.isatty() and not ignore_stdin_istty if has_tty: response = query_yes_no(question, default_answer_str) else: LOG.debug("NoTTY: %s -> %s", question, response) return response
[ "def", "ask", "(", "question", ",", "default_answer", "=", "False", ",", "default_answer_str", "=", "\"no\"", ")", ":", "response", "=", "default_answer", "def", "should_ignore_tty", "(", ")", ":", "\"\"\"\n Check, if we want to ignore an opened tty result.\n ...
32.410256
22.820513
def ask(question, default=None): """ @question: str @default: Any value which can be converted to string. Asks a user for a input. If default parameter is passed it will be appended to the end of the message in square brackets. """ question = str(question) if default: question += ' [' + str(default) + ']' question += ': ' reply = raw_input(question) return reply if reply else default
[ "def", "ask", "(", "question", ",", "default", "=", "None", ")", ":", "question", "=", "str", "(", "question", ")", "if", "default", ":", "question", "+=", "' ['", "+", "str", "(", "default", ")", "+", "']'", "question", "+=", "': '", "reply", "=", ...
25.058824
20.705882
def favorites_getList(user_id='', per_page='', page=''): """Returns list of Photo objects.""" method = 'flickr.favorites.getList' data = _doget(method, auth=True, user_id=user_id, per_page=per_page,\ page=page) photos = [] if isinstance(data.rsp.photos.photo, list): for photo in data.rsp.photos.photo: photos.append(_parse_photo(photo)) else: photos = [_parse_photo(data.rsp.photos.photo)] return photos
[ "def", "favorites_getList", "(", "user_id", "=", "''", ",", "per_page", "=", "''", ",", "page", "=", "''", ")", ":", "method", "=", "'flickr.favorites.getList'", "data", "=", "_doget", "(", "method", ",", "auth", "=", "True", ",", "user_id", "=", "user_i...
38.916667
14.25
def find_clusters(network, mask=[], t_labels=False): r""" Identify connected clusters of pores in the network. This method can also return a list of throat cluster numbers, which correspond to the cluster numbers of the pores to which the throat is connected. Either site and bond percolation can be considered, see description of input arguments for details. Parameters ---------- network : OpenPNM Network Object The network mask : array_like, boolean A list of active bonds or sites (throats or pores). If the mask is Np long, then the method will perform a site percolation, and if the mask is Nt long bond percolation will be performed. Returns ------- A tuple containing an Np long list of pore cluster labels, and an Nt-long list of throat cluster labels. The label numbers correspond such that pores and throats with the same label are part of the same cluster. Examples -------- >>> import openpnm as op >>> from scipy import rand >>> pn = op.network.Cubic(shape=[25, 25, 1]) >>> pn['pore.seed'] = rand(pn.Np) >>> pn['throat.seed'] = rand(pn.Nt) """ # Parse the input arguments mask = sp.array(mask, ndmin=1) if mask.dtype != bool: raise Exception('Mask must be a boolean array of Np or Nt length') # If pore mask was given perform site percolation if sp.size(mask) == network.Np: (p_clusters, t_clusters) = _site_percolation(network, mask) # If pore mask was given perform bond percolation elif sp.size(mask) == network.Nt: (p_clusters, t_clusters) = _bond_percolation(network, mask) else: raise Exception('Mask received was neither Nt nor Np long') return (p_clusters, t_clusters)
[ "def", "find_clusters", "(", "network", ",", "mask", "=", "[", "]", ",", "t_labels", "=", "False", ")", ":", "# Parse the input arguments", "mask", "=", "sp", ".", "array", "(", "mask", ",", "ndmin", "=", "1", ")", "if", "mask", ".", "dtype", "!=", "...
35.653061
22.857143
def plot_full(candsfile, cands, mode='im'): """ Plot 'full' features, such as cutout image and spectrum. """ loc, prop, d = read_candidates(candsfile, returnstate=True) npixx, npixy = prop[0][4].shape nints, nchan, npol = prop[0][5].shape bin = 10 plt.figure(1) for i in cands: if mode == 'spec': rr = np.array([np.abs(prop[i][5][:,i0:i0+bin,0].mean(axis=1)) for i0 in range(0,nchan,bin)]) ll = np.array([np.abs(prop[i][5][:,i0:i0+bin,1].mean(axis=1)) for i0 in range(0,nchan,bin)]) sh = ll.shape data = np.concatenate( (rr, np.zeros(shape=(sh[0], sh[1]/2)), ll), axis=1) elif mode == 'im': data = prop[i][4] plt.subplot(np.sqrt(len(cands)), np.sqrt(len(cands)), cands.index(i)) plt.imshow(data, interpolation='nearest') plt.show()
[ "def", "plot_full", "(", "candsfile", ",", "cands", ",", "mode", "=", "'im'", ")", ":", "loc", ",", "prop", ",", "d", "=", "read_candidates", "(", "candsfile", ",", "returnstate", "=", "True", ")", "npixx", ",", "npixy", "=", "prop", "[", "0", "]", ...
40.047619
23.047619
def _update_dirs_on_base(self): '''Fill up the names of dirs based on the contents of 'base'.''' if self._dirs['base'] != None: for d in self._predefined_dir_names: dstr = d #if d == "s2": # dstr = '.'+d self._dirs[d] = os.path.join(self._dirs['base'], dstr)
[ "def", "_update_dirs_on_base", "(", "self", ")", ":", "if", "self", ".", "_dirs", "[", "'base'", "]", "!=", "None", ":", "for", "d", "in", "self", ".", "_predefined_dir_names", ":", "dstr", "=", "d", "#if d == \"s2\":", "# dstr = '.'+d", "self", ".", "_...
43.25
14.25
def renderIndex(self, relpath="", refresh=0, refresh_index=0): """Returns HTML index code for this entry. If 'relpath' is empty, renders complete index.html file. If 'relpath' is not empty, then index is being included into a top-level log, and relpath should be passed to all sub-renderers. In this case the entry may make use of its cached_include file, if that is valid. If 'refresh' is set to a timestamp, then any subproducts (thumbnails, HTML caches, etc.) older than the timestamp will need to be regenerated. If 'refresh_index' is set to a timestamp, then any index files older than the timestamp will need to be regenerated. If 'relpath' is empty and 'prev', 'next' and/or 'up' is set, then Prev/Next/Up links will be inserted """ # check if cache can be used refresh_index = max(refresh, refresh_index) dprintf(2, "%s: rendering HTML index with relpath='%s', refresh=%s refresh_index=%s\n", self.pathname, relpath, time.strftime("%x %X", time.localtime(refresh)), time.strftime("%x %X", time.localtime(refresh_index))) if relpath and self.cached_include_valid: try: if os.path.getmtime(self.cached_include) >= refresh_index: dprintf(2, "using include cache %s\n", self.cached_include) return open(self.cached_include).read() else: dprintf(2, "include cache %s out of date, will regenerate\n", self.cached_include) self.cached_include_valid = False except: print("Error reading cached include code from %s, will regenerate" % self.cached_include) if verbosity.get_verbose() > 0: dprint(1, "Error traceback follows:") traceback.print_exc() self.cached_include_valid = False # form up attributes for % operator attrs = dict(self.__dict__) attrs['timestr'] = time.strftime("%x %X", time.localtime(self.timestamp)) attrs['relpath'] = relpath html = "" # replace title and comments for ignored entries if self.ignore: attrs['title'] = "This is not a real log entry" attrs['comment'] = """This entry was saved by PURR because the user chose to ignore and/or banish some data products. PURR has stored this information here for its opwn internal and highly nefarious purposes. This entry is will not appear in the log.""" # replace < and > in title and comments attrs['title'] = attrs['title'].replace("<", "&lt;").replace(">", "&gt;") # write header if asked if not relpath: icon = Purr.RenderIndex.renderIcon(24, "..") html += """<HTML><BODY> <TITLE>%(title)s</TITLE>""" % attrs if self._prev_link or self._next_link or self._up_link: html += """<DIV ALIGN=right><P>%s %s %s</P></DIV>""" % ( (self._prev_link and "<A HREF=\"%s\">&lt;&lt;Previous</A>" % self._prev_link) or "", (self._up_link and "<A HREF=\"%s\">Up</A>" % self._up_link) or "", (self._next_link and "<A HREF=\"%s\">Next&gt;&gt;</A>" % self._next_link) or "" ) html += ("<H2>" + icon + """ <A CLASS="TITLE" TIMESTAMP=%(timestamp)d>%(title)s</A></H2>""") % attrs else: icon = Purr.RenderIndex.renderIcon(24) html += """ <HR WIDTH=100%%> <H2>""" + icon + """ %(title)s</H2>""" % attrs # write comments html += """ <DIV ALIGN=right><P><SMALL>Logged on %(timestr)s</SMALL></P></DIV>\n <A CLASS="COMMENTS">\n""" % attrs # add comments logmode = False for cmt in self.comment.split("\n"): cmt = cmt.replace("<", "&lt;").replace(">", "&gt;").replace("&lt;BR&gt;", "<BR>") html += """ <P>%s</P>\n""" % cmt html += """ </A>\n""" # add data products if self.dps: have_real_dps = bool([dp for dp in self.dps if not dp.ignored]) if have_real_dps: html += """ <H3>Data products</H3> <TABLE BORDER=1 FRAME=box RULES=all CELLPADDING=5>\n""" for dp in self.dps: dpattrs = dict(dp.__dict__) dpattrs['comment'] = dpattrs['comment'].replace("<", "&lt;"). \ replace(">", "&gt;").replace('"', "''") # if generating complete index, write empty anchor for each DP if not relpath: if dp.ignored: html += """ <A CLASS="DP" SRC="%(sourcepath)s" POLICY="%(policy)s" COMMENT="%(comment)s"></A>\n""" % dpattrs # write normal anchor for normal products else: dpattrs['relpath'] = relpath dpattrs['basename'] = os.path.basename(dp.filename) html += """ <A CLASS="DP" FILENAME="%(filename)s" SRC="%(sourcepath)s" POLICY="%(policy)s" QUIET=%(quiet)d TIMESTAMP=%(timestamp).6f RENDER="%(render)s" COMMENT="%(comment)s"></A>\n""" % dpattrs # render a table row if not dp.ignored: renderer = Purr.Render.makeRenderer(dp.render, dp, refresh=refresh) html += Purr.Render.renderInTable(renderer, relpath) if have_real_dps: html += """ </TABLE>""" # write footer if not relpath: html += "</BODY></HTML>\n" else: # now, write to include cache, if being included open(self.cached_include, 'w').write(html) self.cached_include_valid = True return html
[ "def", "renderIndex", "(", "self", ",", "relpath", "=", "\"\"", ",", "refresh", "=", "0", ",", "refresh_index", "=", "0", ")", ":", "# check if cache can be used", "refresh_index", "=", "max", "(", "refresh", ",", "refresh_index", ")", "dprintf", "(", "2", ...
53.583333
20.12037
def get_neighborhood_overlap(self, node1, node2, connection_type=None): """Get the intersection of two nodes's neighborhoods. Neighborhood is defined by parameter connection_type. :param Vertex node1: First node. :param Vertex node2: Second node. :param Optional[str] connection_type: One of direct or second-degree. Defaults to direct. :return: Overlap of the nodes' neighborhoods. """ if connection_type is None or connection_type == "direct": order = 1 elif connection_type == "second-degree": order = 2 else: raise Exception( "Invalid option: {}. Valid options are direct and second-degree".format( connection_type) ) neighbors1 = self.graph.neighborhood(node1, order=order) neighbors2 = self.graph.neighborhood(node2, order=order) return set(neighbors1).intersection(neighbors2)
[ "def", "get_neighborhood_overlap", "(", "self", ",", "node1", ",", "node2", ",", "connection_type", "=", "None", ")", ":", "if", "connection_type", "is", "None", "or", "connection_type", "==", "\"direct\"", ":", "order", "=", "1", "elif", "connection_type", "=...
43.272727
20.727273
def toMBI(self, getMemoryDump = False): """ Returns a L{win32.MemoryBasicInformation} object using the data retrieved from the database. @type getMemoryDump: bool @param getMemoryDump: (Optional) If C{True} retrieve the memory dump. Defaults to C{False} since this may be a costly operation. @rtype: L{win32.MemoryBasicInformation} @return: Memory block information. """ mbi = win32.MemoryBasicInformation() mbi.BaseAddress = self.address mbi.RegionSize = self.size mbi.State = self._parse_state(self.state) mbi.Protect = self._parse_access(self.access) mbi.Type = self._parse_type(self.type) if self.alloc_base is not None: mbi.AllocationBase = self.alloc_base else: mbi.AllocationBase = mbi.BaseAddress if self.alloc_access is not None: mbi.AllocationProtect = self._parse_access(self.alloc_access) else: mbi.AllocationProtect = mbi.Protect if self.filename is not None: mbi.filename = self.filename if getMemoryDump and self.content is not None: mbi.content = self.content return mbi
[ "def", "toMBI", "(", "self", ",", "getMemoryDump", "=", "False", ")", ":", "mbi", "=", "win32", ".", "MemoryBasicInformation", "(", ")", "mbi", ".", "BaseAddress", "=", "self", ".", "address", "mbi", ".", "RegionSize", "=", "self", ".", "size", "mbi", ...
39.709677
13.129032
def print_tokens(output, tokens, style): """ Print a list of (Token, text) tuples in the given style to the output. """ assert isinstance(output, Output) assert isinstance(style, Style) # Reset first. output.reset_attributes() output.enable_autowrap() # Print all (token, text) tuples. attrs_for_token = _TokenToAttrsCache(style.get_attrs_for_token) for token, text in tokens: attrs = attrs_for_token[token] if attrs: output.set_attributes(attrs) else: output.reset_attributes() output.write(text) # Reset again. output.reset_attributes() output.flush()
[ "def", "print_tokens", "(", "output", ",", "tokens", ",", "style", ")", ":", "assert", "isinstance", "(", "output", ",", "Output", ")", "assert", "isinstance", "(", "style", ",", "Style", ")", "# Reset first.", "output", ".", "reset_attributes", "(", ")", ...
23.888889
18.185185
def add_filename_pattern(self, dir_name, pattern): """ Adds a Unix shell-style wildcard pattern underneath the specified directory :param dir_name: str: directory that contains the pattern :param pattern: str: Unix shell-style wildcard pattern """ full_pattern = '{}{}{}'.format(dir_name, os.sep, pattern) filename_regex = fnmatch.translate(full_pattern) self.regex_list.append(re.compile(filename_regex))
[ "def", "add_filename_pattern", "(", "self", ",", "dir_name", ",", "pattern", ")", ":", "full_pattern", "=", "'{}{}{}'", ".", "format", "(", "dir_name", ",", "os", ".", "sep", ",", "pattern", ")", "filename_regex", "=", "fnmatch", ".", "translate", "(", "fu...
51.222222
17.666667
def messaging(_context, repository, reset_on_start=False): """ Directive for setting up the user message resource in the appropriate repository. :param str repository: The repository to create the user messages resource in. """ discriminator = ('messaging', repository) reg = get_current_registry() config = Configurator(reg, package=_context.package) _context.action(discriminator=discriminator, # pylint: disable=E1101 callable=config.setup_system_repository, args=(repository,), kw=dict(reset_on_start=reset_on_start))
[ "def", "messaging", "(", "_context", ",", "repository", ",", "reset_on_start", "=", "False", ")", ":", "discriminator", "=", "(", "'messaging'", ",", "repository", ")", "reg", "=", "get_current_registry", "(", ")", "config", "=", "Configurator", "(", "reg", ...
40.666667
19.066667
def update_account(self, email=None, company_name=None, first_name=None, last_name=None, address=None, postal_code=None, city=None, state=None, country=None, phone=None): """ :: POST /:login :param email: Email address :type email: :py:class:`basestring` :param company_name: Company name :type company_name: :py:class:`basestring` :param first_name: First name :type first_name: :py:class:`basestring` :param last_name: Last name :type last_name: :py:class:`basestring` :param address: Address :type address: :py:class:`basestring` :param postal_code: Postal code :type postal_code: :py:class:`basestring` :param city: City :type city: :py:class:`basestring` :param state: State :type state: :py:class:`basestring` :param country: Country :type country: :py:class:`basestring` :param phone: Phone :type phone: :py:class:`basestring` :Returns: a dictionary with updated account info :rtype: :py:class:`dict` """ params = {} if email: params['email'] = email if company_name: params['companyName'] = company_name if first_name: params['firstName'] = first_name if last_name: params['lastName'] = last_name if address: params['address'] = address if postal_code: params['postalCode'] = postal_code if city: params['city'] = city if state: params['state'] = state if country: params['country'] = country if phone: params['phone'] = phone j, _ = self.request('POST', '', params=params) return j
[ "def", "update_account", "(", "self", ",", "email", "=", "None", ",", "company_name", "=", "None", ",", "first_name", "=", "None", ",", "last_name", "=", "None", ",", "address", "=", "None", ",", "postal_code", "=", "None", ",", "city", "=", "None", ",...
29.625
15.0625
def init_ui(self): """Setup control widget UI.""" self.control_layout = QHBoxLayout() self.setLayout(self.control_layout) self.reset_button = QPushButton() self.reset_button.setFixedSize(40, 40) self.reset_button.setIcon(QtGui.QIcon(WIN_PATH)) self.game_timer = QLCDNumber() self.game_timer.setStyleSheet("QLCDNumber {color: red;}") self.game_timer.setFixedWidth(100) self.move_counter = QLCDNumber() self.move_counter.setStyleSheet("QLCDNumber {color: red;}") self.move_counter.setFixedWidth(100) self.control_layout.addWidget(self.game_timer) self.control_layout.addWidget(self.reset_button) self.control_layout.addWidget(self.move_counter)
[ "def", "init_ui", "(", "self", ")", ":", "self", ".", "control_layout", "=", "QHBoxLayout", "(", ")", "self", ".", "setLayout", "(", "self", ".", "control_layout", ")", "self", ".", "reset_button", "=", "QPushButton", "(", ")", "self", ".", "reset_button",...
43.941176
11.588235
def updateBoostStrength(self): """ Update boost strength using given strength factor during training """ if self.training: self.boostStrength = self.boostStrength * self.boostStrengthFactor
[ "def", "updateBoostStrength", "(", "self", ")", ":", "if", "self", ".", "training", ":", "self", ".", "boostStrength", "=", "self", ".", "boostStrength", "*", "self", ".", "boostStrengthFactor" ]
34.333333
15
def is_series(data): """ Checks whether the supplied data is of Series type. """ dd = None if 'dask' in sys.modules: import dask.dataframe as dd return((pd is not None and isinstance(data, pd.Series)) or (dd is not None and isinstance(data, dd.Series)))
[ "def", "is_series", "(", "data", ")", ":", "dd", "=", "None", "if", "'dask'", "in", "sys", ".", "modules", ":", "import", "dask", ".", "dataframe", "as", "dd", "return", "(", "(", "pd", "is", "not", "None", "and", "isinstance", "(", "data", ",", "p...
31.888889
13.222222
def frombed(args): """ %prog frombed bed_file [--options] > gff_file Convert bed to gff file. In bed, the accn will convert to key='ID' Default type will be `match` and default source will be `source` """ p = OptionParser(frombed.__doc__) p.add_option("--type", default="match", help="GFF feature type [default: %default]") p.add_option("--source", default="default", help="GFF source qualifier [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args bed = Bed(bedfile) for b in bed: print(b.gffline(type=opts.type, source=opts.source))
[ "def", "frombed", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "frombed", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--type\"", ",", "default", "=", "\"match\"", ",", "help", "=", "\"GFF feature type [default: %default]\"", ")", "p", ".", ...
31
18.909091
def _GetDirectory(self): """Retrieves a directory. Returns: TSKPartitionDirectory: a directory or None if not available. """ if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY: return None return TSKPartitionDirectory(self._file_system, self.path_spec)
[ "def", "_GetDirectory", "(", "self", ")", ":", "if", "self", ".", "entry_type", "!=", "definitions", ".", "FILE_ENTRY_TYPE_DIRECTORY", ":", "return", "None", "return", "TSKPartitionDirectory", "(", "self", ".", "_file_system", ",", "self", ".", "path_spec", ")" ...
31.777778
20.444444
def stop_process(self): """Request the module process to stop and release it :return: None """ if not self.process: return logger.info("I'm stopping module %r (pid=%d)", self.name, self.process.pid) self.kill() # Clean inner process reference self.process = None
[ "def", "stop_process", "(", "self", ")", ":", "if", "not", "self", ".", "process", ":", "return", "logger", ".", "info", "(", "\"I'm stopping module %r (pid=%d)\"", ",", "self", ".", "name", ",", "self", ".", "process", ".", "pid", ")", "self", ".", "kil...
27.416667
19
def can_create_activities(self): """Tests if this user can create Activities. A return of true does not guarantee successful authorization. A return of false indicates that it is known creating an Activity will result in a PermissionDenied. This is intended as a hint to an application that may opt not to offer create operations to an unauthorized user. return: (boolean) - false if Activity creation is not authorized, true otherwise compliance: mandatory - This method must be implemented. """ url_path = construct_url('authorization', bank_id=self._catalog_idstr) return self._get_request(url_path)['activityHints']['canCreate']
[ "def", "can_create_activities", "(", "self", ")", ":", "url_path", "=", "construct_url", "(", "'authorization'", ",", "bank_id", "=", "self", ".", "_catalog_idstr", ")", "return", "self", ".", "_get_request", "(", "url_path", ")", "[", "'activityHints'", "]", ...
50.4
19.4
def authenticate_credentials(self, token: bytes, request=None): """ Authenticate the token with optional request for context. """ user = AuthToken.get_user_for_token(token) if user is None: raise AuthenticationFailed(_('Invalid auth token.')) if not user.is_active: raise AuthenticationFailed(_('User inactive or deleted.')) return user, token
[ "def", "authenticate_credentials", "(", "self", ",", "token", ":", "bytes", ",", "request", "=", "None", ")", ":", "user", "=", "AuthToken", ".", "get_user_for_token", "(", "token", ")", "if", "user", "is", "None", ":", "raise", "AuthenticationFailed", "(", ...
31.846154
20.923077
def release(self, device_info): """This function is called by the segmentation state machine when it has finished with the device information.""" if _debug: DeviceInfoCache._debug("release %r", device_info) # this information record might be used by more than one SSM if device_info._ref_count == 0: raise RuntimeError("reference count") # decrement the reference count device_info._ref_count -= 1
[ "def", "release", "(", "self", ",", "device_info", ")", ":", "if", "_debug", ":", "DeviceInfoCache", ".", "_debug", "(", "\"release %r\"", ",", "device_info", ")", "# this information record might be used by more than one SSM", "if", "device_info", ".", "_ref_count", ...
41.545455
14.636364
def skip_regex(lines, options): """ Optionally exclude lines that match '--skip-requirements-regex' """ skip_regex = options.skip_requirements_regex if options else None if skip_regex: lines = filterfalse(re.compile(skip_regex).search, lines) return lines
[ "def", "skip_regex", "(", "lines", ",", "options", ")", ":", "skip_regex", "=", "options", ".", "skip_requirements_regex", "if", "options", "else", "None", "if", "skip_regex", ":", "lines", "=", "filterfalse", "(", "re", ".", "compile", "(", "skip_regex", ")...
35
17
def _infer_type(value, element_kind, element_name): """ Infer the CIM type name of the value, based upon its Python type. """ if value is None: raise ValueError( _format("Cannot infer CIM type of {0} {1!A} from its value when " "the value is None", element_kind, element_name)) try: return cimtype(value) except TypeError as exc: raise ValueError( _format("Cannot infer CIM type of {0} {1!A} from its value: {2!A}", element_kind, element_name, exc))
[ "def", "_infer_type", "(", "value", ",", "element_kind", ",", "element_name", ")", ":", "if", "value", "is", "None", ":", "raise", "ValueError", "(", "_format", "(", "\"Cannot infer CIM type of {0} {1!A} from its value when \"", "\"the value is None\"", ",", "element_ki...
34.25
21.375
def prepare_sequencemanager(self) -> None: """Configure the |SequenceManager| object available in module |pub| following the definitions of the actual XML `reader` or `writer` element when available; if not use those of the XML `series_io` element. Compare the following results with `single_run.xml` to see that the first `writer` element defines the input file type specifically, that the second `writer` element defines a general file type, and that the third `writer` element does not define any file type (the principle mechanism is the same for other options, e.g. the aggregation mode): >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface, pub >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... interface = XMLInterface('single_run.xml') >>> series_io = interface.series_io >>> with TestIO(): ... series_io.writers[0].prepare_sequencemanager() >>> pub.sequencemanager.inputfiletype 'asc' >>> pub.sequencemanager.fluxfiletype 'npy' >>> pub.sequencemanager.fluxaggregation 'none' >>> with TestIO(): ... series_io.writers[1].prepare_sequencemanager() >>> pub.sequencemanager.statefiletype 'nc' >>> pub.sequencemanager.stateoverwrite False >>> with TestIO(): ... series_io.writers[2].prepare_sequencemanager() >>> pub.sequencemanager.statefiletype 'npy' >>> pub.sequencemanager.fluxaggregation 'mean' >>> pub.sequencemanager.inputoverwrite True >>> pub.sequencemanager.inputdirpath 'LahnH/series/input' """ for config, convert in ( ('filetype', lambda x: x), ('aggregation', lambda x: x), ('overwrite', lambda x: x.lower() == 'true'), ('dirpath', lambda x: x)): xml_special = self.find(config) xml_general = self.master.find(config) for name_manager, name_xml in zip( ('input', 'flux', 'state', 'node'), ('inputs', 'fluxes', 'states', 'nodes')): value = None for xml, attr_xml in zip( (xml_special, xml_special, xml_general, xml_general), (name_xml, 'general', name_xml, 'general')): try: value = find(xml, attr_xml).text except AttributeError: continue break setattr(hydpy.pub.sequencemanager, f'{name_manager}{config}', convert(value))
[ "def", "prepare_sequencemanager", "(", "self", ")", "->", "None", ":", "for", "config", ",", "convert", "in", "(", "(", "'filetype'", ",", "lambda", "x", ":", "x", ")", ",", "(", "'aggregation'", ",", "lambda", "x", ":", "x", ")", ",", "(", "'overwri...
41.246377
15.942029
def __get_substitution_paths(g): """ get atoms paths from detached atom to attached :param g: CGRContainer :return: tuple of atoms numbers """ for n, nbrdict in g.adjacency(): for m, l in combinations(nbrdict, 2): nms = nbrdict[m]['sp_bond'] nls = nbrdict[l]['sp_bond'] if nms == (1, None) and nls == (None, 1): yield m, n, l elif nms == (None, 1) and nls == (1, None): yield l, n, m
[ "def", "__get_substitution_paths", "(", "g", ")", ":", "for", "n", ",", "nbrdict", "in", "g", ".", "adjacency", "(", ")", ":", "for", "m", ",", "l", "in", "combinations", "(", "nbrdict", ",", "2", ")", ":", "nms", "=", "nbrdict", "[", "m", "]", "...
35.6
9.2
def launch_tor(config, reactor, tor_binary=None, progress_updates=None, connection_creator=None, timeout=None, kill_on_stderr=True, stdout=None, stderr=None): """ Deprecated; use launch() instead. See also controller.py """ from .controller import launch # XXX FIXME are we dealing with options in the config "properly" # as far as translating semantics from the old launch_tor to # launch()? DataDirectory, User, ControlPort, ...? tor = yield launch( reactor, stdout=stdout, stderr=stderr, progress_updates=progress_updates, tor_binary=tor_binary, connection_creator=connection_creator, timeout=timeout, kill_on_stderr=kill_on_stderr, _tor_config=config, ) defer.returnValue(tor.process)
[ "def", "launch_tor", "(", "config", ",", "reactor", ",", "tor_binary", "=", "None", ",", "progress_updates", "=", "None", ",", "connection_creator", "=", "None", ",", "timeout", "=", "None", ",", "kill_on_stderr", "=", "True", ",", "stdout", "=", "None", "...
31
12
def isomap(geom, n_components=8, eigen_solver='auto', random_state=None, path_method='auto', distance_matrix=None, graph_distance_matrix = None, centered_matrix=None, solver_kwds=None): """ Parameters ---------- geom : a Geometry object from megaman.geometry.geometry n_components : integer, optional The dimension of the projection subspace. eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'} 'auto' : algorithm will attempt to choose the best method for input data 'dense' : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. 'arpack' : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. 'lobpcg' : Locally Optimal Block Preconditioned Conjugate Gradient Method. A preconditioned eigensolver for large symmetric positive definite (SPD) generalized eigenproblems. 'amg' : AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'. By default, arpack is used. path_method : string, method for computing graph shortest path. One of : 'auto', 'D', 'FW', 'BF', 'J'. See scipy.sparse.csgraph.shortest_path for more information. distance_matrix : sparse Ndarray (n_obs, n_obs), optional. Pairwise distance matrix sparse zeros considered 'infinite'. graph_distance_matrix : Ndarray (n_obs, n_obs), optional. Pairwise graph distance matrix. Output of graph_shortest_path. centered_matrix : Ndarray (n_obs, n_obs), optional. Centered version of graph_distance_matrix solver_kwds : any additional keyword arguments to pass to the selected eigen_solver Returns ------- embedding : array, shape=(n_samples, n_components) The reduced samples. Notes ----- """ # Step 1: use geometry to calculate the distance matrix if ((distance_matrix is None) and (centered_matrix is None)): if geom.adjacency_matrix is None: distance_matrix = geom.compute_adjacency_matrix() else: distance_matrix = geom.adjacency_matrix # Step 2: use graph_shortest_path to construct D_G ## WARNING: D_G is an (NxN) DENSE matrix!! if ((graph_distance_matrix is None) and (centered_matrix is None)): graph_distance_matrix = graph_shortest_path(distance_matrix, method=path_method, directed=False) # Step 3: center graph distance matrix if centered_matrix is None: centered_matrix = center_matrix(graph_distance_matrix) # Step 4: compute d largest eigenvectors/values of centered_matrix lambdas, diffusion_map = eigen_decomposition(centered_matrix, n_components, largest=True, eigen_solver=eigen_solver, random_state=random_state, solver_kwds=solver_kwds) # Step 5: # return Y = [sqrt(lambda_1)*V_1, ..., sqrt(lambda_d)*V_d] ind = np.argsort(lambdas); ind = ind[::-1] # sort largest lambdas = lambdas[ind]; diffusion_map = diffusion_map[:, ind] embedding = diffusion_map[:, 0:n_components] * np.sqrt(lambdas[0:n_components]) return embedding
[ "def", "isomap", "(", "geom", ",", "n_components", "=", "8", ",", "eigen_solver", "=", "'auto'", ",", "random_state", "=", "None", ",", "path_method", "=", "'auto'", ",", "distance_matrix", "=", "None", ",", "graph_distance_matrix", "=", "None", ",", "center...
48.180723
25.048193
def _create_parser() -> ArgumentParser: """ Creates argument parser for the CLI. :return: the argument parser """ parser = ArgumentParser(prog=EXECUTABLE_NAME, description=f"{DESCRIPTION} (v{VERSION})") parser.add_argument( f"-{VERBOSE_SHORT_PARAMETER}", action="count", default=0, help="increase the level of log verbosity (add multiple increase further)") subparsers = parser.add_subparsers(dest=ACTION_CLI_PARAMETER_ACCESS, help="action") unlock_subparser = subparsers.add_parser(Action.UNLOCK.value, help="release a lock") unlock_subparser.add_argument( f"-{REGEX_KEY_ENABLED_SHORT_PARAMETER}", action="store_true", default=DEFAULT_REGEX_KEY_ENABLED, help="whether the key should be treated as a regular expression and to release all matching locks") lock_subparser = subparsers.add_parser(Action.LOCK.value, help="acquire a lock") lock_and_execute_subparser = subparsers.add_parser(Action.EXECUTE.value, help="call executable whilst holding lock") for subparser in (lock_subparser, lock_and_execute_subparser): subparser.add_argument( f"--{SESSION_TTL_LONG_PARAMETER}", type=float, default=DEFAULT_SESSION_TTL, help=f"time to live (ttl) in seconds of the session that will be created to hold the lock. Must be between " f"{MIN_LOCK_TIMEOUT_IN_SECONDS}s and {MAX_LOCK_TIMEOUT_IN_SECONDS}s (inclusive). If set to " f"{NO_EXPIRY_SESSION_TTL_CLI_PARAMETER_VALUE}, the session will not expire") subparser.add_argument( f"--{NON_BLOCKING_LONG_PARAMETER}", action="store_true", default=DEFAULT_NON_BLOCKING, help="do not block if cannot lock straight away") subparser.add_argument( f"--{TIMEOUT_LONG_PARAMETER}", default=DEFAULT_TIMEOUT, type=float, help="give up trying to acquire the key after this many seconds (where 0 is never)") subparser.add_argument( f"--{METADATA_LONG_PARAMETER}", default=DEFAULT_METADATA, type=str, action=_ParseJsonAction, help="additional metadata to add to the lock information (will be converted to JSON)") subparser.add_argument( f"--{ON_BEFORE_LOCK_LONG_PARAMETER}", default=[], type=str, nargs="+", action="append", help="path to executable that is to be called before an attempt is made to acquire a lock, where the lock " "key is passed as the first argument. Any failures of this executable are ignored") subparser.add_argument( f"--{ON_LOCK_ALREADY_LOCKED_LONG_PARAMETER}", default=[], type=str, nargs="+", action="append", help="path to executable that is to be called after an attempt has been made to acquire a lock but failed " "due to the lock already been taken, where the lock key is passed as the first argument. Any failures " "of this executable are ignored") subparser.add_argument( f"-{LOCK_POLL_INTERVAL_SHORT_PARAMETER}", default=DEFAULT_LOCK_POLL_INTERVAL_GENERATOR(1), type=float, help="number of seconds between polls to acquire a locked lock") # XXX: probably a better way of iterating subparsers on `subparsers` for subparser in [unlock_subparser, lock_subparser, lock_and_execute_subparser]: subparser.add_argument( KEY_PARAMETER, type=str, help="the lock identifier") lock_and_execute_subparser.add_argument( EXECUTABLE_PARAMETER, type=str, help="to execute in shell") return parser
[ "def", "_create_parser", "(", ")", "->", "ArgumentParser", ":", "parser", "=", "ArgumentParser", "(", "prog", "=", "EXECUTABLE_NAME", ",", "description", "=", "f\"{DESCRIPTION} (v{VERSION})\"", ")", "parser", ".", "add_argument", "(", "f\"-{VERBOSE_SHORT_PARAMETER}\"", ...
62.928571
37.178571
def _first_glimpse_sensor(self, x_t): """ Compute first glimpse position using down-sampled image. """ downsampled_img = theano.tensor.signal.downsample.max_pool_2d(x_t, (4,4)) downsampled_img = downsampled_img.flatten() first_l = T.dot(downsampled_img, self.W_f) if self.disable_reinforce: wf_grad = self.W_f if self.random_glimpse: first_l = self.srng.uniform((2,), low=-1.7, high=1.7) else: sampled_l_t = self._sample_gaussian(first_l, self.cov) sampled_pdf = self._multi_gaussian_pdf(disconnected_grad(sampled_l_t), first_l) wf_grad = T.grad(T.log(sampled_pdf), self.W_f) first_l = sampled_l_t return first_l, wf_grad
[ "def", "_first_glimpse_sensor", "(", "self", ",", "x_t", ")", ":", "downsampled_img", "=", "theano", ".", "tensor", ".", "signal", ".", "downsample", ".", "max_pool_2d", "(", "x_t", ",", "(", "4", ",", "4", ")", ")", "downsampled_img", "=", "downsampled_im...
45
16.294118
def multipart_listuploads(self, bucket): """List objects in a bucket. :param bucket: A :class:`invenio_files_rest.models.Bucket` instance. :returns: The Flask response. """ return self.make_response( data=MultipartObject.query_by_bucket(bucket).limit(1000).all(), context={ 'class': MultipartObject, 'bucket': bucket, 'many': True, } )
[ "def", "multipart_listuploads", "(", "self", ",", "bucket", ")", ":", "return", "self", ".", "make_response", "(", "data", "=", "MultipartObject", ".", "query_by_bucket", "(", "bucket", ")", ".", "limit", "(", "1000", ")", ".", "all", "(", ")", ",", "con...
32.5
15.428571
def text(self, encoding=None, errors='strict'): r""" Open this file, read it in, return the content as a string. This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r' are automatically translated to '\n'. Optional arguments: encoding - The Unicode encoding (or character set) of the file. If present, the content of the file is decoded and returned as a unicode object; otherwise it is returned as an 8-bit str. errors - How to handle Unicode errors; see help(str.decode) for the options. Default is 'strict'. """ if encoding is None: # 8-bit f = self.open(_textmode) try: return f.read() finally: f.close() else: # Unicode f = codecs.open(self, 'r', encoding, errors) # (Note - Can't use 'U' mode here, since codecs.open # doesn't support 'U' mode, even in Python 2.3.) try: t = f.read() finally: f.close() return (t.replace(u'\r\n', u'\n') .replace(u'\r\x85', u'\n') .replace(u'\r', u'\n') .replace(u'\x85', u'\n') .replace(u'\u2028', u'\n'))
[ "def", "text", "(", "self", ",", "encoding", "=", "None", ",", "errors", "=", "'strict'", ")", ":", "if", "encoding", "is", "None", ":", "# 8-bit", "f", "=", "self", ".", "open", "(", "_textmode", ")", "try", ":", "return", "f", ".", "read", "(", ...
36.75
16.388889
def popdict(src, keys): """ Extract all keys (with values) from `src` dictionary as new dictionary values are removed from source dictionary. """ new = {} for key in keys: if key in src: new[key] = src.pop(key) return new
[ "def", "popdict", "(", "src", ",", "keys", ")", ":", "new", "=", "{", "}", "for", "key", "in", "keys", ":", "if", "key", "in", "src", ":", "new", "[", "key", "]", "=", "src", ".", "pop", "(", "key", ")", "return", "new" ]
23.636364
17.636364
def string_to_list(string, sep=",", filter_empty=False): """Transforma una string con elementos separados por `sep` en una lista.""" return [value.strip() for value in string.split(sep) if (not filter_empty or value)]
[ "def", "string_to_list", "(", "string", ",", "sep", "=", "\",\"", ",", "filter_empty", "=", "False", ")", ":", "return", "[", "value", ".", "strip", "(", ")", "for", "value", "in", "string", ".", "split", "(", "sep", ")", "if", "(", "not", "filter_em...
58.5
8.75
def list_services(kwargs=None, conn=None, call=None): ''' .. versionadded:: 2015.8.0 List hosted services associated with the account CLI Example: .. code-block:: bash salt-cloud -f list_services my-azure ''' if call != 'function': raise SaltCloudSystemExit( 'The list_services function must be called with -f or --function.' ) if not conn: conn = get_conn() data = conn.list_hosted_services() ret = {} for item in data.hosted_services: ret[item.service_name] = object_to_dict(item) ret[item.service_name]['name'] = item.service_name return ret
[ "def", "list_services", "(", "kwargs", "=", "None", ",", "conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The list_services function must be called with -f or --function.'", ")", ...
24.346154
23.192308
def render_toolbar(context, config): """Render the toolbar for the given config.""" quill_config = getattr(quill_app, config) t = template.loader.get_template(quill_config['toolbar_template']) return t.render(context)
[ "def", "render_toolbar", "(", "context", ",", "config", ")", ":", "quill_config", "=", "getattr", "(", "quill_app", ",", "config", ")", "t", "=", "template", ".", "loader", ".", "get_template", "(", "quill_config", "[", "'toolbar_template'", "]", ")", "retur...
45.8
10.2
def gaussian(x, y, xsigma, ysigma): """ Two-dimensional oriented Gaussian pattern (i.e., 2D version of a bell curve, like a normal distribution but not necessarily summing to 1.0). """ if xsigma==0.0 or ysigma==0.0: return x*0.0 with float_error_ignore(): x_w = np.divide(x,xsigma) y_h = np.divide(y,ysigma) return np.exp(-0.5*x_w*x_w + -0.5*y_h*y_h)
[ "def", "gaussian", "(", "x", ",", "y", ",", "xsigma", ",", "ysigma", ")", ":", "if", "xsigma", "==", "0.0", "or", "ysigma", "==", "0.0", ":", "return", "x", "*", "0.0", "with", "float_error_ignore", "(", ")", ":", "x_w", "=", "np", ".", "divide", ...
30.692308
14.692308
def check_api_error(api_response): print(api_response) """Check if returned API response contains an error.""" if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200: print("Server response code: %s" % api_response['code']) print("Server response: %s" % api_response) raise exceptions.HTTPError('Unexpected response', response=api_response) if type(api_response) == dict and (api_response.get('status') == 'failed'): if 'ProgrammingError' in api_response.get('exception_cls'): raise DatabaseError(message='PyBossa database error.', error=api_response) if ('DBIntegrityError' in api_response.get('exception_cls') and 'project' in api_response.get('target')): msg = 'PyBossa project already exists.' raise ProjectAlreadyExists(message=msg, error=api_response) if 'project' in api_response.get('target'): raise ProjectNotFound(message='PyBossa Project not found', error=api_response) if 'task' in api_response.get('target'): raise TaskNotFound(message='PyBossa Task not found', error=api_response) else: print("Server response: %s" % api_response) raise exceptions.HTTPError('Unexpected response', response=api_response)
[ "def", "check_api_error", "(", "api_response", ")", ":", "print", "(", "api_response", ")", "if", "type", "(", "api_response", ")", "==", "dict", "and", "'code'", "in", "api_response", "and", "api_response", "[", "'code'", "]", "<>", "200", ":", "print", "...
58.875
22.25
def mass_2d(self, r, rho0, Rs): """ mass enclosed projected 2d sphere of radius r :param r: :param rho0: :param a: :param s: :return: """ sigma0 = self.rho2sigma(rho0, Rs) return self.mass_2d_lens(r, sigma0, Rs)
[ "def", "mass_2d", "(", "self", ",", "r", ",", "rho0", ",", "Rs", ")", ":", "sigma0", "=", "self", ".", "rho2sigma", "(", "rho0", ",", "Rs", ")", "return", "self", ".", "mass_2d_lens", "(", "r", ",", "sigma0", ",", "Rs", ")" ]
23.416667
15.25
def _make_expand_x_fn_for_non_batch_interpolation(y_ref, axis): """Make func to expand left/right (of axis) dims of tensors shaped like x.""" # This expansion is to help x broadcast with `y`, the output. # In the non-batch case, the output shape is going to be # y_ref.shape[:axis] + x.shape + y_ref.shape[axis+1:] # Recall we made axis non-negative y_ref_shape = tf.shape(input=y_ref) y_ref_shape_left = y_ref_shape[:axis] y_ref_shape_right = y_ref_shape[axis + 1:] def expand_ends(x, broadcast=False): """Expand x so it can bcast w/ tensors of output shape.""" # Assume out_shape = A + x.shape + B, and rank(A) = axis. # Expand with singletons with same rank as A, B. expanded_shape = tf.pad( tensor=tf.shape(input=x), paddings=[[axis, tf.size(input=y_ref_shape_right)]], constant_values=1) x_expanded = tf.reshape(x, expanded_shape) if broadcast: out_shape = tf.concat(( y_ref_shape_left, tf.shape(input=x), y_ref_shape_right, ), axis=0) if x.dtype.is_bool: x_expanded = x_expanded | tf.cast(tf.zeros(out_shape), tf.bool) else: x_expanded += tf.zeros(out_shape, dtype=x.dtype) return x_expanded return expand_ends
[ "def", "_make_expand_x_fn_for_non_batch_interpolation", "(", "y_ref", ",", "axis", ")", ":", "# This expansion is to help x broadcast with `y`, the output.", "# In the non-batch case, the output shape is going to be", "# y_ref.shape[:axis] + x.shape + y_ref.shape[axis+1:]", "# Recall we made...
35.942857
16.714286
def find_executable(executable): ''' Finds executable in PATH Returns: string or None ''' logger = logging.getLogger(__name__) logger.debug("Checking executable '%s'...", executable) executable_path = _find_executable(executable) found = executable_path is not None if found: logger.debug("Executable '%s' found: '%s'", executable, executable_path) else: logger.debug("Executable '%s' not found", executable) return executable_path
[ "def", "find_executable", "(", "executable", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"Checking executable '%s'...\"", ",", "executable", ")", "executable_path", "=", "_find_executable", "(", "execu...
30.3125
20.9375
def _get_render_prepared_object(cls, context, **option_values): """ Returns a fully prepared, request-aware menu object that can be used for rendering. ``context`` could be a ``django.template.Context`` object passed to ``render_from_tag()`` by a menu tag. """ ctx_vals = cls._create_contextualvals_obj_from_context(context) opt_vals = cls._create_optionvals_obj_from_values(**option_values) if issubclass(cls, models.Model): instance = cls.get_from_collected_values(ctx_vals, opt_vals) else: instance = cls.create_from_collected_values(ctx_vals, opt_vals) if not instance: return None instance.prepare_to_render(context['request'], ctx_vals, opt_vals) return instance
[ "def", "_get_render_prepared_object", "(", "cls", ",", "context", ",", "*", "*", "option_values", ")", ":", "ctx_vals", "=", "cls", ".", "_create_contextualvals_obj_from_context", "(", "context", ")", "opt_vals", "=", "cls", ".", "_create_optionvals_obj_from_values", ...
43.611111
24.277778
def sort(self): """Sort by detection time. .. rubric:: Example >>> family = Family( ... template=Template(name='a'), detections=[ ... Detection(template_name='a', detect_time=UTCDateTime(0) + 200, ... no_chans=8, detect_val=4.2, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0), ... Detection(template_name='a', detect_time=UTCDateTime(0), ... no_chans=8, detect_val=4.5, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0), ... Detection(template_name='a', detect_time=UTCDateTime(0) + 10, ... no_chans=8, detect_val=4.5, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0)]) >>> family[0].detect_time UTCDateTime(1970, 1, 1, 0, 3, 20) >>> family.sort()[0].detect_time UTCDateTime(1970, 1, 1, 0, 0) """ self.detections = sorted(self.detections, key=lambda d: d.detect_time) return self
[ "def", "sort", "(", "self", ")", ":", "self", ".", "detections", "=", "sorted", "(", "self", ".", "detections", ",", "key", "=", "lambda", "d", ":", "d", ".", "detect_time", ")", "return", "self" ]
45.692308
19.423077
def validate_ok_for_update(update): """Validate an update document.""" validate_is_mapping("update", update) # Update can not be {} if not update: raise ValueError('update only works with $ operators') first = next(iter(update)) if not first.startswith('$'): raise ValueError('update only works with $ operators')
[ "def", "validate_ok_for_update", "(", "update", ")", ":", "validate_is_mapping", "(", "\"update\"", ",", "update", ")", "# Update can not be {}", "if", "not", "update", ":", "raise", "ValueError", "(", "'update only works with $ operators'", ")", "first", "=", "next",...
38.333333
11.444444
def register_user_type(self, keyspace, user_type, klass): """ Registers a class to use to represent a particular user-defined type. Query parameters for this user-defined type will be assumed to be instances of `klass`. Result sets for this user-defined type will be instances of `klass`. If no class is registered for a user-defined type, a namedtuple will be used for result sets, and non-prepared statements may not encode parameters for this type correctly. `keyspace` is the name of the keyspace that the UDT is defined in. `user_type` is the string name of the UDT to register the mapping for. `klass` should be a class with attributes whose names match the fields of the user-defined type. The constructor must accepts kwargs for each of the fields in the UDT. This method should only be called after the type has been created within Cassandra. Example:: cluster = Cluster(protocol_version=3) session = cluster.connect() session.set_keyspace('mykeyspace') session.execute("CREATE TYPE address (street text, zipcode int)") session.execute("CREATE TABLE users (id int PRIMARY KEY, location address)") # create a class to map to the "address" UDT class Address(object): def __init__(self, street, zipcode): self.street = street self.zipcode = zipcode cluster.register_user_type('mykeyspace', 'address', Address) # insert a row using an instance of Address session.execute("INSERT INTO users (id, location) VALUES (%s, %s)", (0, Address("123 Main St.", 78723))) # results will include Address instances results = session.execute("SELECT * FROM users") row = results[0] print row.id, row.location.street, row.location.zipcode """ if self.protocol_version < 3: log.warning("User Type serialization is only supported in native protocol version 3+ (%d in use). " "CQL encoding for simple statements will still work, but named tuples will " "be returned when reading type %s.%s.", self.protocol_version, keyspace, user_type) self._user_types[keyspace][user_type] = klass for session in tuple(self.sessions): session.user_type_registered(keyspace, user_type, klass) UserType.evict_udt_class(keyspace, user_type)
[ "def", "register_user_type", "(", "self", ",", "keyspace", ",", "user_type", ",", "klass", ")", ":", "if", "self", ".", "protocol_version", "<", "3", ":", "log", ".", "warning", "(", "\"User Type serialization is only supported in native protocol version 3+ (%d in use)....
44.912281
27.263158
def Tags(self): """Return all tags found in the value stream. Returns: A `{tagType: ['list', 'of', 'tags']}` dictionary. """ return { IMAGES: self.images.Keys(), AUDIO: self.audios.Keys(), HISTOGRAMS: self.histograms.Keys(), SCALARS: self.scalars.Keys(), COMPRESSED_HISTOGRAMS: self.compressed_histograms.Keys(), TENSORS: self.tensors.Keys(), # Use a heuristic: if the metagraph is available, but # graph is not, then we assume the metagraph contains the graph. GRAPH: self._graph is not None, META_GRAPH: self._meta_graph is not None, RUN_METADATA: list(self._tagged_metadata.keys()) }
[ "def", "Tags", "(", "self", ")", ":", "return", "{", "IMAGES", ":", "self", ".", "images", ".", "Keys", "(", ")", ",", "AUDIO", ":", "self", ".", "audios", ".", "Keys", "(", ")", ",", "HISTOGRAMS", ":", "self", ".", "histograms", ".", "Keys", "("...
35.947368
15.526316
def daytime(date: datetime.date, daybreak: datetime.time = datetime.time(NORMAL_DAY_START_H), nightfall: datetime.time = datetime.time(NORMAL_DAY_END_H)) \ -> "Interval": """ Returns an :class:`Interval` representing daytime on the date given. """ return Interval( datetime.datetime.combine(date, daybreak), datetime.datetime.combine(date, nightfall), )
[ "def", "daytime", "(", "date", ":", "datetime", ".", "date", ",", "daybreak", ":", "datetime", ".", "time", "=", "datetime", ".", "time", "(", "NORMAL_DAY_START_H", ")", ",", "nightfall", ":", "datetime", ".", "time", "=", "datetime", ".", "time", "(", ...
41
18.818182
def load_data(self): """ Loads data files and stores the output in the data attribute. """ data = [] valid_dates = [] mrms_files = np.array(sorted(os.listdir(self.path + self.variable + "/"))) mrms_file_dates = np.array([m_file.split("_")[-2].split("-")[0] for m_file in mrms_files]) old_mrms_file = None file_obj = None for t in range(self.all_dates.shape[0]): file_index = np.where(mrms_file_dates == self.all_dates[t].strftime("%Y%m%d"))[0] if len(file_index) > 0: mrms_file = mrms_files[file_index][0] if mrms_file is not None: if file_obj is not None: file_obj.close() file_obj = Dataset(self.path + self.variable + "/" + mrms_file) #old_mrms_file = mrms_file if "time" in file_obj.variables.keys(): time_var = "time" else: time_var = "date" file_valid_dates = pd.DatetimeIndex(num2date(file_obj.variables[time_var][:], file_obj.variables[time_var].units)) else: file_valid_dates = pd.DatetimeIndex([]) time_index = np.where(file_valid_dates.values == self.all_dates.values[t])[0] if len(time_index) > 0: data.append(file_obj.variables[self.variable][time_index[0]]) valid_dates.append(self.all_dates[t]) if file_obj is not None: file_obj.close() self.data = np.array(data) self.data[self.data < 0] = 0 self.data[self.data > 150] = 150 self.valid_dates = pd.DatetimeIndex(valid_dates)
[ "def", "load_data", "(", "self", ")", ":", "data", "=", "[", "]", "valid_dates", "=", "[", "]", "mrms_files", "=", "np", ".", "array", "(", "sorted", "(", "os", ".", "listdir", "(", "self", ".", "path", "+", "self", ".", "variable", "+", "\"/\"", ...
47.102564
17.820513
def once(ctx, name): """Run kibitzr checks once and exit""" from kibitzr.app import Application app = Application() sys.exit(app.run(once=True, log_level=ctx.obj['log_level'], names=name))
[ "def", "once", "(", "ctx", ",", "name", ")", ":", "from", "kibitzr", ".", "app", "import", "Application", "app", "=", "Application", "(", ")", "sys", ".", "exit", "(", "app", ".", "run", "(", "once", "=", "True", ",", "log_level", "=", "ctx", ".", ...
40
14.8
def stop_notifying(cls, user_or_email, instance): """Delete the watch created by notify.""" super(InstanceEvent, cls).stop_notifying(user_or_email, object_id=instance.pk)
[ "def", "stop_notifying", "(", "cls", ",", "user_or_email", ",", "instance", ")", ":", "super", "(", "InstanceEvent", ",", "cls", ")", ".", "stop_notifying", "(", "user_or_email", ",", "object_id", "=", "instance", ".", "pk", ")" ]
58
15.75
def restore_from_checkpoint(sess, input_checkpoint): """Return a TensorFlow saver from a checkpoint containing the metagraph.""" saver = tf.train.import_meta_graph('{}.meta'.format(input_checkpoint)) saver.restore(sess, input_checkpoint) return saver
[ "def", "restore_from_checkpoint", "(", "sess", ",", "input_checkpoint", ")", ":", "saver", "=", "tf", ".", "train", ".", "import_meta_graph", "(", "'{}.meta'", ".", "format", "(", "input_checkpoint", ")", ")", "saver", ".", "restore", "(", "sess", ",", "inpu...
52.4
14.2
def remnant_mass_ulim(eta, ns_g_mass, bh_spin_z, ns_sequence, max_ns_g_mass, shift): """ Function that determines the maximum remnant disk mass for an NS-BH system with given symmetric mass ratio, NS mass, and BH spin parameter component along the orbital angular momentum. This is a wrapper to the function remnant_mass. Maximization is achieved by setting the BH dimensionless spin magntitude to unity. An unreasonably large remnant disk mass (100 solar masses) is returned if the maximum possible NS mass is exceeded in applying the model of Foucart PRD 86, 124007 (2012). Parameters ----------- eta: float the symmetric mass ratio of the binary ns_g_mass: float NS gravitational mass (in solar masses) bh_spin_z: float the BH dimensionless spin parameter for the spin projection along the orbital angular momentum ns_sequence: 3D-array contains the sequence data in the form NS gravitational mass (in solar masses), NS baryonic mass (in solar masses), NS compactness (dimensionless) shift: float an amount to be subtracted to the remnant mass upper limit predicted by the model (in solar masses) Returns ---------- remnant_mass_upper_limit: float The remnant mass upper limit in solar masses """ # Sanity checks if not (eta > 0. and eta <=0.25 and abs(bh_spin_z)<=1): raise Exception("""The absolute value of the BH spin z-component must be <=1. Eta must be between 0 and 0.25. The function remnant_mass_ulim was launched with eta={0} and chi_z={1}. Unphysical parameters!""".format(eta, bh_spin_z)) # To maximise the remnant mass, allow for the BH spin magnitude to be maximum bh_spin_magnitude = 1. # Unreasonably large remnant disk mass default_remnant_mass = 100. if not ns_g_mass > max_ns_g_mass: bh_spin_inclination = np.arccos(bh_spin_z/bh_spin_magnitude) remnant_mass_upper_limit = pycbc.tmpltbank.em_progenitors.remnant_mass(eta, ns_g_mass, ns_sequence, bh_spin_magnitude, bh_spin_inclination, shift) else: remnant_mass_upper_limit = default_remnant_mass return remnant_mass_upper_limit
[ "def", "remnant_mass_ulim", "(", "eta", ",", "ns_g_mass", ",", "bh_spin_z", ",", "ns_sequence", ",", "max_ns_g_mass", ",", "shift", ")", ":", "# Sanity checks", "if", "not", "(", "eta", ">", "0.", "and", "eta", "<=", "0.25", "and", "abs", "(", "bh_spin_z",...
43.54902
20
def XYZ_to_galcencyl(X,Y,Z,Xsun=1.,Zsun=0.,_extra_rot=True): """ NAME: XYZ_to_galcencyl PURPOSE: transform XYZ coordinates (wrt Sun) to cylindrical Galactocentric coordinates INPUT: X - X Y - Y Z - Z Xsun - cylindrical distance to the GC Zsun - Sun's height above the midplane _extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition OUTPUT: R,phi,z HISTORY: 2010-09-24 - Written - Bovy (NYU) """ XYZ= nu.atleast_2d(XYZ_to_galcenrect(X,Y,Z,Xsun=Xsun,Zsun=Zsun, _extra_rot=_extra_rot)) return nu.array(rect_to_cyl(XYZ[:,0],XYZ[:,1],XYZ[:,2])).T
[ "def", "XYZ_to_galcencyl", "(", "X", ",", "Y", ",", "Z", ",", "Xsun", "=", "1.", ",", "Zsun", "=", "0.", ",", "_extra_rot", "=", "True", ")", ":", "XYZ", "=", "nu", ".", "atleast_2d", "(", "XYZ_to_galcenrect", "(", "X", ",", "Y", ",", "Z", ",", ...
20.833333
30.777778
def parse_input(s): """Parse the given input and intelligently transform it into an absolute, non-naive, timezone-aware datetime object for the UTC timezone. The input can be specified as a millisecond-precision UTC timestamp (or delta against Epoch), with or without a terminating 'L'. Alternatively, the input can be specified as a human-readable delta string with unit-separated segments, like '24d6h4m500' (24 days, 6 hours, 4 minutes and 500ms), as long as the segments are in descending unit span order.""" if isinstance(s, six.integer_types): s = str(s) elif not isinstance(s, six.string_types): raise ValueError(s) original = s if s[-1:] == 'L': s = s[:-1] sign = {'-': -1, '=': 0, '+': 1}.get(s[0], None) if sign is not None: s = s[1:] ts = 0 for unit in _SORTED_UNITS: pos = s.find(unit[0]) if pos == 0: raise ValueError(original) elif pos > 0: # If we find a unit letter, we're dealing with an offset. Default # to positive offset if a sign wasn't specified. if sign is None: sign = 1 ts += int(s[:pos]) * __timedelta_millis(unit[1]) s = s[min(len(s), pos + 1):] if s: ts += int(s) return date_from_utc_ts(ts) if not sign else \ utc() + sign * delta(milliseconds=ts)
[ "def", "parse_input", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "six", ".", "integer_types", ")", ":", "s", "=", "str", "(", "s", ")", "elif", "not", "isinstance", "(", "s", ",", "six", ".", "string_types", ")", ":", "raise", "ValueErr...
33.463415
21.829268
def exec_commands(commands: str, **parameters: Any) -> None: """Execute the given Python commands. Function |exec_commands| is thought for testing purposes only (see the main documentation on module |hyd|). Seperate individual commands by semicolons and replaced whitespaces with underscores: >>> from hydpy.exe.commandtools import exec_commands >>> import sys >>> exec_commands("x_=_1+1;print(x)") Start to execute the commands ['x_=_1+1', 'print(x)'] for testing purposes. 2 |exec_commands| interprets double underscores as a single underscores: >>> exec_commands("x_=_1;print(x.____class____)") Start to execute the commands ['x_=_1', 'print(x.____class____)'] \ for testing purposes. <class 'int'> |exec_commands| evaluates additional keyword arguments before it executes the given commands: >>> exec_commands("e=x==y;print(e)", x=1, y=2) Start to execute the commands ['e=x==y', 'print(e)'] for testing purposes. False """ cmdlist = commands.split(';') print(f'Start to execute the commands {cmdlist} for testing purposes.') for par, value in parameters.items(): exec(f'{par} = {value}') for command in cmdlist: command = command.replace('__', 'temptemptemp') command = command.replace('_', ' ') command = command.replace('temptemptemp', '_') exec(command)
[ "def", "exec_commands", "(", "commands", ":", "str", ",", "*", "*", "parameters", ":", "Any", ")", "->", "None", ":", "cmdlist", "=", "commands", ".", "split", "(", "';'", ")", "print", "(", "f'Start to execute the commands {cmdlist} for testing purposes.'", ")"...
37.916667
22.416667
def enable_enhanced_monitoring(stream_name, metrics, region=None, key=None, keyid=None, profile=None): ''' Enable enhanced monitoring for the specified shard-level metrics on stream stream_name CLI example:: salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = _execute_with_retries(conn, "enable_enhanced_monitoring", StreamName=stream_name, ShardLevelMetrics=metrics) if 'error' not in r: r['result'] = True return r
[ "def", "enable_enhanced_monitoring", "(", "stream_name", ",", "metrics", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", ...
39.333333
29.555556
def SetGaugeCallback(self, metric_name, callback, fields=None): """See base class.""" self._gauge_metrics[metric_name].SetCallback(callback, fields)
[ "def", "SetGaugeCallback", "(", "self", ",", "metric_name", ",", "callback", ",", "fields", "=", "None", ")", ":", "self", ".", "_gauge_metrics", "[", "metric_name", "]", ".", "SetCallback", "(", "callback", ",", "fields", ")" ]
51.333333
16.333333
def to_delete(datetimes, years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, firstweekday=SATURDAY, now=None): """ Return a set of datetimes that should be deleted, out of ``datetimes``. See ``to_keep`` for a description of arguments. """ datetimes = set(datetimes) return datetimes - to_keep(datetimes, years=years, months=months, weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds, firstweekday=firstweekday, now=now)
[ "def", "to_delete", "(", "datetimes", ",", "years", "=", "0", ",", "months", "=", "0", ",", "weeks", "=", "0", ",", "days", "=", "0", ",", "hours", "=", "0", ",", "minutes", "=", "0", ",", "seconds", "=", "0", ",", "firstweekday", "=", "SATURDAY"...
42
15.2
def update(self, friendly_name=values.unset, attributes=values.unset, date_created=values.unset, date_updated=values.unset, created_by=values.unset): """ Update the SessionInstance :param unicode friendly_name: The human-readable name of this session. :param unicode attributes: An optional string metadata field you can use to store any data you wish. :param datetime date_created: The date that this resource was created. :param datetime date_updated: The date that this resource was last updated. :param unicode created_by: Identity of the session's creator. :returns: Updated SessionInstance :rtype: twilio.rest.messaging.v1.session.SessionInstance """ return self._proxy.update( friendly_name=friendly_name, attributes=attributes, date_created=date_created, date_updated=date_updated, created_by=created_by, )
[ "def", "update", "(", "self", ",", "friendly_name", "=", "values", ".", "unset", ",", "attributes", "=", "values", ".", "unset", ",", "date_created", "=", "values", ".", "unset", ",", "date_updated", "=", "values", ".", "unset", ",", "created_by", "=", "...
44.590909
19.863636
def get_default_config(self): """ Returns the default collector settings """ config = super(IPVSCollector, self).get_default_config() config.update({ 'bin': '/usr/sbin/ipvsadm', 'use_sudo': True, 'sudo_cmd': '/usr/bin/sudo', 'path': 'ipvs' }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "IPVSCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'bin'", ":", "'/usr/sbin/ipvsadm'", ",", "'use_sudo'", ":", "True", ...
32.5
11
def description_of(file, name='stdin'): """Return a string describing the probable encoding of a file.""" u = UniversalDetector() for line in file: u.feed(line) u.close() result = u.result if result['encoding']: return '%s: %s with confidence %s' % (name, result['encoding'], result['confidence']) else: return '%s: no result' % name
[ "def", "description_of", "(", "file", ",", "name", "=", "'stdin'", ")", ":", "u", "=", "UniversalDetector", "(", ")", "for", "line", "in", "file", ":", "u", ".", "feed", "(", "line", ")", "u", ".", "close", "(", ")", "result", "=", "u", ".", "res...
35.769231
16.153846