text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def reset(cls): """Reset the registry to the standard multihash functions.""" # Maps function names (hyphens or underscores) to registered functions. cls._func_from_name = {} # Maps hashlib names to registered functions. cls._func_from_hash = {} # Hashlib compatibility data by function. cls._func_hash = {} register = cls._do_register for (func, hash_name, hash_new) in cls._std_func_data: register(func, func.name, hash_name, hash_new) assert set(cls._func_hash) == set(Func)
[ "def", "reset", "(", "cls", ")", ":", "# Maps function names (hyphens or underscores) to registered functions.", "cls", ".", "_func_from_name", "=", "{", "}", "# Maps hashlib names to registered functions.", "cls", ".", "_func_from_hash", "=", "{", "}", "# Hashlib compatibili...
37.2
19.133333
def calcELAxi(R,vR,vT,pot,vc=1.,ro=1.): """ NAME: calcELAxi PURPOSE: calculate the energy and angular momentum INPUT: R - Galactocentric radius (/ro) vR - radial part of the velocity (/vc) vT - azimuthal part of the velocity (/vc) vc - circular velocity ro - reference radius OUTPUT: (E,L) HISTORY: 2010-11-30 - Written - Bovy (NYU) """ return (potentialAxi(R,pot)+vR**2./2.+vT**2./2.,R*vT)
[ "def", "calcELAxi", "(", "R", ",", "vR", ",", "vT", ",", "pot", ",", "vc", "=", "1.", ",", "ro", "=", "1.", ")", ":", "return", "(", "potentialAxi", "(", "R", ",", "pot", ")", "+", "vR", "**", "2.", "/", "2.", "+", "vT", "**", "2.", "/", ...
27.5
14.555556
def parse_c_serialized(f): """ Reads in a binary file created by a C++ serializer (prob. MFC?) and returns tuples of (header name, data following the header). These are used by Thermo for *.CF and *.DXF files and by Agilent for new-style *.REG files. """ # TODO: rewrite to use re library f.seek(0) try: p_rec_type = None while True: rec_off = f.tell() while True: if f.read(2) == b'\xff\xff': h = struct.unpack('<HH', f.read(4)) if h[1] < 64 and h[1] != 0: rec_type = f.read(h[1]) if rec_type[0] == 67: # starts with 'C' break if f.read(1) == b'': raise EOFError f.seek(f.tell() - 2) if p_rec_type is not None: rec_len = f.tell() - 6 - len(rec_type) - rec_off f.seek(rec_off) yield p_rec_type, f.read(rec_len) f.seek(f.tell() + 6 + len(rec_type)) # p_type = h[0] p_rec_type = rec_type except EOFError: rec_len = f.tell() - 6 - len(rec_type) - rec_off f.seek(rec_off) yield p_rec_type, f.read(rec_len)
[ "def", "parse_c_serialized", "(", "f", ")", ":", "# TODO: rewrite to use re library", "f", ".", "seek", "(", "0", ")", "try", ":", "p_rec_type", "=", "None", "while", "True", ":", "rec_off", "=", "f", ".", "tell", "(", ")", "while", "True", ":", "if", ...
36.970588
12.911765
def job_to_dict(job): """Converts a job to an OrderedDict.""" data = OrderedDict() data['id'] = job.id data['name'] = job.name data['func'] = job.func_ref data['args'] = job.args data['kwargs'] = job.kwargs data.update(trigger_to_dict(job.trigger)) if not job.pending: data['misfire_grace_time'] = job.misfire_grace_time data['max_instances'] = job.max_instances data['next_run_time'] = None if job.next_run_time is None else job.next_run_time return data
[ "def", "job_to_dict", "(", "job", ")", ":", "data", "=", "OrderedDict", "(", ")", "data", "[", "'id'", "]", "=", "job", ".", "id", "data", "[", "'name'", "]", "=", "job", ".", "name", "data", "[", "'func'", "]", "=", "job", ".", "func_ref", "data...
28.111111
21.055556
def get_all_tgt(self): """ Returns a list of AS_REP tickets in native format (dict). To determine which ticket are AP_REP we check for the server principal to be the kerberos service """ tgts = [] for cred in self.credentials: if cred.server.to_string().lower().find('krbtgt') != -1: tgts.append(cred.to_tgt()) return tgts
[ "def", "get_all_tgt", "(", "self", ")", ":", "tgts", "=", "[", "]", "for", "cred", "in", "self", ".", "credentials", ":", "if", "cred", ".", "server", ".", "to_string", "(", ")", ".", "lower", "(", ")", ".", "find", "(", "'krbtgt'", ")", "!=", "-...
30.454545
21
def process_entry(self, entry): 'Construct a Post from a feedparser entry and save/update it in db' from feedjack.models import Post, Tag ## Construct a Post object from feedparser entry (FeedParserDict) post = Post(feed=self.feed) post.link = entry.get('link', self.feed.link) post.title = entry.get('title', post.link) post.guid = self._get_guid(entry) if 'author_detail' in entry: post.author = entry.author_detail.get('name', '') post.author_email = entry.author_detail.get('email', '') if not post.author: post.author = entry.get('author', entry.get('creator', '')) if not post.author_email: post.author_email = 'nospam@nospam.com' try: post.content = entry.content[0].value except: post.content = entry.get('summary', entry.get('description', '')) # Try to get the post date from "updated" then "published" then "created" ts_parsed = ts_raw = None for k in self.post_timestamp_keys: try: post.date_modified = get_modified_date( entry.get('{0}_parsed'.format(k)), entry.get(k) ) except ValueError as err: log.warn( 'Failed to process post timestamp:' ' {0} (feed_id: {1}, post_guid: {2})'.format(err, self.feed.id, post.guid) ) if post.date_modified: break post.comments = entry.get('comments', '') enclosures = entry.get('enclosures', list()) if 'media_content' in entry: for mc in entry.media_content: if 'url' in mc: e = dict(href=mc['url'], medium=mc.get('medium', 'image')) else: e = entry.media_content e['type'] = 'application/x-media-content' # special ct for these things enclosures.append(e) assert enclosures, enclosures post.enclosures = enclosures ## Get a list of tag objects from an entry # Note that these objects can't go into m2m field until properly saved fcat = list() if entry.has_key('tags'): for tcat in entry.tags: qcat = tcat.label if tcat.label is not None else tcat.term if not qcat: continue qcat = qcat.strip() if ',' in qcat or '/' in qcat: qcat = qcat.replace(',', '/').split('/') else: qcat = [qcat] for zcat in qcat: tagname = ' '.join(zcat.lower().split()).strip()[:255] if not tagname: continue if not Tag.objects.filter(name=tagname): cobj = Tag(name=tagname) cobj.save() fcat.append(Tag.objects.get(name=tagname)) ## Some feedback post_base_fields = 'title link guid author author_email'.split() log.debug('[{0}] Entry\n{1}'.format(self.feed.id, '\n'.join( [' {0}: {1}'.format(key, getattr(post, key)) for key in post_base_fields] + ['tags: {0}'.format(' '.join(it.imap(op.attrgetter('name'), fcat)))] ))) ## Store / update a post if post.guid in self.postdict: # post exists, update if it was modified (and feed is mutable) post_old = self.postdict[post.guid] changed = post_old.content != post.content or ( post.date_modified and post_old.date_modified != post.date_modified ) if not self.feed.immutable and changed: retval = ENTRY_UPDATED log.extra('[{0}] Updating existing post: {1}'.format(self.feed.id, post.link)) # Update fields for field in post_base_fields + ['content', 'comments']: setattr(post_old, field, getattr(post, field)) post_old.date_modified = post.date_modified or post_old.date_modified # Update tags post_old.tags.clear() for tcat in fcat: post_old.tags.add(tcat) post_old.save() else: retval = ENTRY_SAME log.extra( ( '[{0}] Post has not changed: {1}' if not changed else '[{0}] Post changed, but feed is marked as immutable: {1}' )\ .format(self.feed.id, post.link) ) else: # new post, store it into database retval = ENTRY_NEW log.extra( '[{0}] Saving new post: {1} (timestamp: {2})'\ .format(self.feed.id, post.guid, post.date_modified) ) # Try hard to set date_modified: feed.modified, http.modified and now() as a last resort if not post.date_modified and self.fpf: try: post.date_modified = get_modified_date( self.fpf.feed.get('modified_parsed') or self.fpf.get('modified_parsed'), self.fpf.feed.get('modified') or self.fpf.get('modified') ) except ValueError as err: log.warn(( 'Failed to process feed/http timestamp: {0} (feed_id: {1},' ' post_guid: {2}), falling back to "now"' ).format(err, self.feed.id, post.guid)) if not post.date_modified: post.date_modified = timezone.now() log.debug(( '[{0}] Using current time for post' ' ({1}) timestamp' ).format(self.feed.id, post.guid)) else: log.debug( '[{0}] Using timestamp from feed/http for post ({1}): {2}'\ .format(self.feed.id, post.guid, post.date_modified) ) if self.options.hidden: post.hidden = True try: post.save() except IntegrityError: log.error( 'IntegrityError while saving (supposedly) new'\ ' post with guid: {0.guid}, link: {0.link}, title: {0.title}'.format(post) ) raise for tcat in fcat: post.tags.add(tcat) self.postdict[post.guid] = post return retval
[ "def", "process_entry", "(", "self", ",", "entry", ")", ":", "from", "feedjack", ".", "models", "import", "Post", ",", "Tag", "## Construct a Post object from feedparser entry (FeedParserDict)", "post", "=", "Post", "(", "feed", "=", "self", ".", "feed", ")", "p...
38.666667
21.777778
def remove_callback(instance, prop, callback): """ Remove a callback function from a property in an instance Parameters ---------- instance The instance to detach the callback from prop : str Name of callback property in `instance` callback : func The callback function to remove """ p = getattr(type(instance), prop) if not isinstance(p, CallbackProperty): raise TypeError("%s is not a CallbackProperty" % prop) p.remove_callback(instance, callback)
[ "def", "remove_callback", "(", "instance", ",", "prop", ",", "callback", ")", ":", "p", "=", "getattr", "(", "type", "(", "instance", ")", ",", "prop", ")", "if", "not", "isinstance", "(", "p", ",", "CallbackProperty", ")", ":", "raise", "TypeError", "...
30.058824
14.058824
def _expect(self, expected, times=50): """Find the `expected` line within `times` trials. Args: expected str: the expected string times int: number of trials """ print '[%s] Expecting [%s]' % (self.port, expected) retry_times = 10 while times > 0 and retry_times > 0: line = self._readline() print '[%s] Got line [%s]' % (self.port, line) if line == expected: print '[%s] Expected [%s]' % (self.port, expected) return if not line: retry_times -= 1 time.sleep(0.1) times -= 1 raise Exception('failed to find expected string[%s]' % expected)
[ "def", "_expect", "(", "self", ",", "expected", ",", "times", "=", "50", ")", ":", "print", "'[%s] Expecting [%s]'", "%", "(", "self", ".", "port", ",", "expected", ")", "retry_times", "=", "10", "while", "times", ">", "0", "and", "retry_times", ">", "...
29.36
19.16
def description(self): """ A list of the metrics this query will ask for. """ if 'metrics' in self.raw: metrics = self.raw['metrics'] head = metrics[0:-1] or metrics[0:1] text = ", ".join(head) if len(metrics) > 1: tail = metrics[-1] text = text + " and " + tail else: text = 'n/a' return text
[ "def", "description", "(", "self", ")", ":", "if", "'metrics'", "in", "self", ".", "raw", ":", "metrics", "=", "self", ".", "raw", "[", "'metrics'", "]", "head", "=", "metrics", "[", "0", ":", "-", "1", "]", "or", "metrics", "[", "0", ":", "1", ...
26.25
13.5
def dereference_package_descriptor(descriptor, base_path): """Dereference data package descriptor (IN-PLACE FOR NOW). """ for resource in descriptor.get('resources', []): dereference_resource_descriptor(resource, base_path, descriptor) return descriptor
[ "def", "dereference_package_descriptor", "(", "descriptor", ",", "base_path", ")", ":", "for", "resource", "in", "descriptor", ".", "get", "(", "'resources'", ",", "[", "]", ")", ":", "dereference_resource_descriptor", "(", "resource", ",", "base_path", ",", "de...
45.333333
13.5
def pulls(self, from_date=None): """Fetch the pull requests from the repository. The method retrieves, from a GitHub repository, the pull requests updated since the given date. :param from_date: obtain pull requests updated since this date :returns: a generator of pull requests """ issues_groups = self.issues(from_date=from_date) for raw_issues in issues_groups: issues = json.loads(raw_issues) for issue in issues: if "pull_request" not in issue: continue pull_number = issue["number"] path = urijoin(self.base_url, 'repos', self.owner, self.repository, "pulls", pull_number) r = self.fetch(path) pull = r.text yield pull
[ "def", "pulls", "(", "self", ",", "from_date", "=", "None", ")", ":", "issues_groups", "=", "self", ".", "issues", "(", "from_date", "=", "from_date", ")", "for", "raw_issues", "in", "issues_groups", ":", "issues", "=", "json", ".", "loads", "(", "raw_is...
31.192308
20.961538
def _produce_return(self, cursor): """ Return the one result. """ results = cursor.fetchmany(2) if len(results) != 1: return None # Return the one row, or the one column. row = results[0] if self._row_formatter is not None: row = self._row_formatter(row, cursor) elif len(row) == 1: row = row[0] return row
[ "def", "_produce_return", "(", "self", ",", "cursor", ")", ":", "results", "=", "cursor", ".", "fetchmany", "(", "2", ")", "if", "len", "(", "results", ")", "!=", "1", ":", "return", "None", "# Return the one row, or the one column.", "row", "=", "results", ...
26.8
13.666667
def _compile_column_metadata(row, keys, number): """ Compile column metadata from one excel row ("9 part data") :param list row: Row of cells :param list keys: Variable header keys :return dict: Column metadata """ # Store the variable keys by index in a dictionary _column = {} _interpretation = {} _calibration = {} _physical = {} # Use the header keys to place the column data in the dictionary if keys: for idx, key in enumerate(keys): _key_low = key.lower() # Special case: Calibration data if re.match(re_calibration, _key_low): m = re.match(re_calibration, _key_low) if m: _key = m.group(1) _calibration[_key] = row[idx].value # Special case: PhysicalSample data elif re.match(re_physical, _key_low): m = re.match(re_physical, _key_low) if m: _key = m.group(1) _physical[_key] = row[idx].value # Special case: Interpretation data elif re.match(re_interpretation, _key_low): # Put interpretation data in a tmp dictionary that we'll sort later. _interpretation[_key_low] = row[idx].value else: try: val = row[idx].value except Exception: logger_excel.info("compile_column_metadata: Couldn't get value from row cell") val = "n/a" try: if key == "variableName": val = _rm_units_from_var_name_single(row[idx].value) except Exception: # when a variableName fails to split, keep the name as-is and move on. pass _column[key] = val _column["number"] = number if _calibration: _column["calibration"] = _calibration # Only allow physicalSample on measured variableTypes. duh. if _physical and _column["variableType"] == "measured": _column["physicalSample"] = _physical if _interpretation: _interpretation_data = _compile_interpretation(_interpretation) _column["interpretation"] = _interpretation_data # If there are not keys, that means it's a header-less metadata section. else: # Assume we only have one cell, because we have no keys to know what data is here. try: val = row[0].value.lower() except AttributeError: val = row[0].value except Exception: logger_excel.info("compile_column_metadata: Couldn't get value from row cell") val = "n/a" val = _rm_units_from_var_name_single(val) _column["variableName"] = val _column["number"] = number # Add this column to the overall metadata, but skip if there's no data present _column = {k: v for k, v in _column.items() if v} return _column
[ "def", "_compile_column_metadata", "(", "row", ",", "keys", ",", "number", ")", ":", "# Store the variable keys by index in a dictionary", "_column", "=", "{", "}", "_interpretation", "=", "{", "}", "_calibration", "=", "{", "}", "_physical", "=", "{", "}", "# U...
37.797468
19.392405
def download(gfile, wks_name=None, col_names=False, row_names=False, credentials=None, start_cell = 'A1'): """ Download Google Spreadsheet and convert it to Pandas DataFrame :param gfile: path to Google Spreadsheet or gspread ID :param wks_name: worksheet name :param col_names: assing top row to column names for Pandas DataFrame :param row_names: assing left column to row names for Pandas DataFrame :param credentials: provide own credentials :param start_cell: specify where to start capturing of the DataFrame; default is A1 :type gfile: str :type wks_name: str :type col_names: bool :type row_names: bool :type credentials: class 'oauth2client.client.OAuth2Credentials' :type start_cell: str :returns: Pandas DataFrame :rtype: class 'pandas.core.frame.DataFrame' :Example: >>> from df2gspread import gspread2df as g2d >>> df = g2d.download(gfile="1U-kSDyeD-...", col_names=True, row_names=True) >>> df col1 col2 field1 1 2 field2 3 4 """ # access credentials credentials = get_credentials(credentials) # auth for gspread gc = gspread.authorize(credentials) try: # if gfile is file_id gc.open_by_key(gfile).__repr__() gfile_id = gfile except: # else look for file_id in drive gfile_id = get_file_id(credentials, gfile) if gfile_id is None: raise RuntimeError( "Trying to open non-existent or inaccessible spreadsheet") wks = get_worksheet(gc, gfile_id, wks_name) if wks is None: raise RuntimeError( "Trying to open non-existent or inaccessible worksheet") raw_data = wks.get_all_values() if not raw_data: raise ValueError( 'Worksheet is empty or invalid.' ) start_row_int, start_col_int = gspread.utils.a1_to_rowcol(start_cell) rows, cols = np.shape(raw_data) if start_col_int > cols or (row_names and start_col_int + 1 > cols): raise RuntimeError( "Start col (%s) out of the table columns(%s)" % (start_col_int + row_names, cols)) if start_row_int > rows or (col_names and start_row_int + 1 > rows): raise RuntimeError( "Start row (%s) out of the table rows(%s)" % (start_row_int + col_names, rows)) raw_data = [row[start_col_int-1:] for row in raw_data[start_row_int-1:]] if row_names and col_names: row_names = [row[0] for row in raw_data[1:]] col_names = raw_data[0][1:] raw_data = [row[1:] for row in raw_data[1:]] elif row_names: row_names = [row[0] for row in raw_data] col_names = np.arange(len(raw_data[0]) - 1) raw_data = [row[1:] for row in raw_data] elif col_names: row_names = np.arange(len(raw_data) - 1) col_names = raw_data[0] raw_data = raw_data[1:] else: row_names = np.arange(len(raw_data)) col_names = np.arange(len(raw_data[0])) df = pd.DataFrame([pd.Series(row) for row in raw_data], index=row_names) df.columns = col_names return df
[ "def", "download", "(", "gfile", ",", "wks_name", "=", "None", ",", "col_names", "=", "False", ",", "row_names", "=", "False", ",", "credentials", "=", "None", ",", "start_cell", "=", "'A1'", ")", ":", "# access credentials", "credentials", "=", "get_credent...
34.473684
21.673684
def memoize(method): """A new method which acts like the given method but memoizes arguments See https://en.wikipedia.org/wiki/Memoization for the general idea >>> @memoize ... def test(arg): ... print('called') ... return arg + 1 >>> test(1) called 2 >>> test(2) called 3 >>> test(1) 2 The returned method also has an attached method "invalidate" which removes given values from the cache Or empties the cache if no values are given >>> test.invalidate(2) >>> test(1) 2 >>> test(2) called 3 """ method.cache = {} def invalidate(*arguments, **keyword_arguments): key = _represent_arguments(*arguments, **keyword_arguments) if not key: method.cache = {} elif key in method.cache: del method.cache[key] else: raise KeyError( 'Not prevously cached: %s(%s)' % (method.__name__, key)) def new_method(*arguments, **keyword_arguments): """Cache the arguments and return values of the call The key cached is the repr() of arguments This allows more types of values to be used as keys to the cache Such as lists and tuples """ key = _represent_arguments(*arguments, **keyword_arguments) if key not in method.cache: method.cache[key] = method(*arguments, **keyword_arguments) return method.cache[key] new_method.invalidate = invalidate new_method.__doc__ = method.__doc__ new_method.__name__ = 'memoize(%s)' % method.__name__ return new_method
[ "def", "memoize", "(", "method", ")", ":", "method", ".", "cache", "=", "{", "}", "def", "invalidate", "(", "*", "arguments", ",", "*", "*", "keyword_arguments", ")", ":", "key", "=", "_represent_arguments", "(", "*", "arguments", ",", "*", "*", "keywo...
29.5
20.62963
def get_trunk_interfaces(auth, url, devid=None, devip=None): """Function takes devId as input to RESTFULL call to HP IMC platform :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param devid: str requires devid of the target device :param devip: str of ipv4 address of the target device :return: list of dictionaries where each element of the list represents an interface which has been configured as a VLAN trunk port :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.vlanm import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> trunk_interfaces = get_trunk_interfaces('10', auth.creds, auth.url) >>> assert type(trunk_interfaces) is list >>> assert len(trunk_interfaces[0]) == 3 >>> assert 'allowedVlans' in trunk_interfaces[0] >>> assert 'ifIndex' in trunk_interfaces[0] >>> assert 'pvid' in trunk_interfaces[0] >>> get_trunk_interfaces('350', auth.creds, auth.url) ['No trunk inteface'] """ if devip is not None: devid = get_dev_details(devip, auth, url)['id'] get_trunk_interfaces_url = "/imcrs/vlan/trunk?devId=" + str(devid) + \ "&start=1&size=5000&total=false" f_url = url + get_trunk_interfaces_url response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: dev_trunk_interfaces = (json.loads(response.text)) if len(dev_trunk_interfaces) == 2: if isinstance(dev_trunk_interfaces['trunkIf'], list): return dev_trunk_interfaces['trunkIf'] elif isinstance(dev_trunk_interfaces['trunkIf'], dict): return [dev_trunk_interfaces['trunkIf']] else: dev_trunk_interfaces['trunkIf'] = ["No trunk inteface"] return dev_trunk_interfaces['trunkIf'] except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' get_trunk_interfaces: An Error has occured'
[ "def", "get_trunk_interfaces", "(", "auth", ",", "url", ",", "devid", "=", "None", ",", "devip", "=", "None", ")", ":", "if", "devip", "is", "not", "None", ":", "devid", "=", "get_dev_details", "(", "devip", ",", "auth", ",", "url", ")", "[", "'id'",...
37.684211
25.350877
def insert_loudest_triggers_option_group(parser, coinc_options=True): """ Add options to the optparser object for selecting templates in bins. Parameters ----------- parser : object OptionParser instance. """ opt_group = insert_bank_bins_option_group(parser) opt_group.title = "Options for finding loudest triggers." if coinc_options: opt_group.add_argument("--statmap-file", default=None, help="HDF format clustered coincident trigger " "result file.") opt_group.add_argument("--statmap-group", default="foreground", help="Name of group in statmap file to " "get triggers.") opt_group.add_argument("--sngl-trigger-files", nargs="+", default=None, action=types.MultiDetOptionAction, help="HDF format merged single detector " "trigger files.") opt_group.add_argument("--veto-file", default=None, help="XML file with segment_definer and " "segment table.") opt_group.add_argument("--veto-segment-name", default=None, help="Name of segment to use as veto in " "XML file's segment_definer table.") opt_group.add_argument("--search-n-loudest", type=int, default=None, help="Number of triggers to search over.") opt_group.add_argument("--n-loudest", type=int, default=None, help="Number of triggers to return in results.") return opt_group
[ "def", "insert_loudest_triggers_option_group", "(", "parser", ",", "coinc_options", "=", "True", ")", ":", "opt_group", "=", "insert_bank_bins_option_group", "(", "parser", ")", "opt_group", ".", "title", "=", "\"Options for finding loudest triggers.\"", "if", "coinc_opti...
52.375
22.34375
def setup_logging(config_path=None, log_level=logging.INFO, formatter='standard'): """Setup logging configuration """ config = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s' }, 'ui': { 'format': '[%(levelname)s] %(message)s' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'level': log_level, 'formatter': formatter, 'stream': 'ext://sys.stdout' }, }, 'loggers': { '': { 'handlers': ['console'], 'level': log_level, 'propagate': True }, 'yapsy': { 'handlers': ['console'], 'level': logging.INFO } } } if config_path: if os.path.exists(config_path): with open(config_path, 'rt') as f: config = yaml.safe_load(f.read()) else: print('Specified path does not exist: {}, ' 'using default config'.format(config_path)) logging.config.dictConfig(config)
[ "def", "setup_logging", "(", "config_path", "=", "None", ",", "log_level", "=", "logging", ".", "INFO", ",", "formatter", "=", "'standard'", ")", ":", "config", "=", "{", "'version'", ":", "1", ",", "'disable_existing_loggers'", ":", "False", ",", "'formatte...
27.791667
16.729167
def export_csv(self, spec, asset_refs, curves_dict): """ :param asset_ref: name of the asset :param curves_dict: a dictionary tag -> loss curves """ writer = writers.CsvWriter(fmt=writers.FIVEDIGITS) ebr = hasattr(self, 'builder') for key in sorted(curves_dict): recs = curves_dict[key] data = [['asset', 'loss_type', 'loss', 'period' if ebr else 'poe']] for li, loss_type in enumerate(self.loss_types): if ebr: # event_based_risk array = recs[:, :, li] # shape (A, P, LI) periods = self.builder.return_periods for aref, losses in zip(asset_refs, array): for period, loss in zip(periods, losses): data.append((aref, loss_type, loss, period)) else: # classical_risk array = recs[loss_type] # shape (A,) loss_curve_dt for aref, losses, poes in zip( asset_refs, array['losses'], array['poes']): for loss, poe in zip(losses, poes): data.append((aref, loss_type, loss, poe)) dest = self.dstore.build_fname( 'loss_curves', '%s-%s' % (spec, key) if spec else key, 'csv') writer.save(data, dest) return writer.getsaved()
[ "def", "export_csv", "(", "self", ",", "spec", ",", "asset_refs", ",", "curves_dict", ")", ":", "writer", "=", "writers", ".", "CsvWriter", "(", "fmt", "=", "writers", ".", "FIVEDIGITS", ")", "ebr", "=", "hasattr", "(", "self", ",", "'builder'", ")", "...
51.62963
15.407407
def _ProcessMessages(self, notification, queue_manager): """Does the real work with a single flow.""" flow_obj = None session_id = notification.session_id try: # Take a lease on the flow: flow_name = session_id.FlowName() if flow_name in self.well_known_flows: # Well known flows are not necessarily present in the data store so # we need to create them instead of opening. expected_flow = self.well_known_flows[flow_name].__class__ flow_obj = aff4.FACTORY.CreateWithLock( session_id, expected_flow, lease_time=self.well_known_flow_lease_time, blocking=False, token=self.token) else: flow_obj = aff4.FACTORY.OpenWithLock( session_id, lease_time=self.flow_lease_time, blocking=False, token=self.token) now = time.time() logging.debug("Got lock on %s", session_id) # If we get here, we now own the flow. We can delete the notifications # we just retrieved but we need to make sure we don't delete any that # came in later. queue_manager.DeleteNotification(session_id, end=notification.timestamp) if flow_name in self.well_known_flows: stats_collector_instance.Get().IncrementCounter( "well_known_flow_requests", fields=[str(session_id)]) # We remove requests first and then process them in the thread pool. # On one hand this approach increases the risk of losing requests in # case the worker process dies. On the other hand, it doesn't hold # the lock while requests are processed, so other workers can # process well known flows requests as well. with flow_obj: responses = flow_obj.FetchAndRemoveRequestsAndResponses(session_id) flow_obj.ProcessResponses(responses, self.thread_pool) else: with flow_obj: self._ProcessRegularFlowMessages(flow_obj, notification) elapsed = time.time() - now logging.debug("Done processing %s: %s sec", session_id, elapsed) stats_collector_instance.Get().RecordEvent( "worker_flow_processing_time", elapsed, fields=[flow_obj.Name()]) # Everything went well -> session can be run again. self.queued_flows.ExpireObject(session_id) except aff4.LockError: # Another worker is dealing with this flow right now, we just skip it. # We expect lots of these when there are few messages (the system isn't # highly loaded) but it is interesting when the system is under load to # know if we are pulling the optimal number of messages off the queue. # A high number of lock fails when there is plenty of work to do would # indicate we are wasting time trying to process work that has already # been completed by other workers. stats_collector_instance.Get().IncrementCounter("worker_flow_lock_error") except FlowProcessingError: # Do nothing as we expect the error to be correctly logged and accounted # already. pass except Exception as e: # pylint: disable=broad-except # Something went wrong when processing this session. In order not to spin # here, we just remove the notification. logging.exception("Error processing session %s: %s", session_id, e) stats_collector_instance.Get().IncrementCounter( "worker_session_errors", fields=[str(type(e))]) queue_manager.DeleteNotification(session_id)
[ "def", "_ProcessMessages", "(", "self", ",", "notification", ",", "queue_manager", ")", ":", "flow_obj", "=", "None", "session_id", "=", "notification", ".", "session_id", "try", ":", "# Take a lease on the flow:", "flow_name", "=", "session_id", ".", "FlowName", ...
42.567901
23.45679
async def create_signing_key(self, seed: str = None, metadata: dict = None) -> KeyInfo: """ Create a new signing key pair. Raise WalletState if wallet is closed, ExtantRecord if verification key already exists. :param seed: optional seed allowing deterministic key creation :param metadata: optional metadata to store with key pair :return: KeyInfo for new key pair """ LOGGER.debug('Wallet.create_signing_key >>> seed: [SEED], metadata: %s', metadata) if not self.handle: LOGGER.debug('Wallet.create_signing_key <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: verkey = await crypto.create_key(self.handle, json.dumps({'seed': seed} if seed else {})) except IndyError as x_indy: if x_indy.error_code == ErrorCode.WalletItemAlreadyExists: LOGGER.debug('Wallet.create_signing_key <!< Verification key already present in wallet %s', self.name) raise ExtantRecord('Verification key already present in wallet {}'.format(self.name)) LOGGER.debug('Wallet.create_signing_key <!< indy-sdk raised error %s', x_indy.error_code) raise await crypto.set_key_metadata(self.handle, verkey, json.dumps(metadata or {})) # coerce None to empty rv = KeyInfo(verkey, metadata or {}) LOGGER.debug('Wallet.create_signing_key <<< %s', rv) return rv
[ "async", "def", "create_signing_key", "(", "self", ",", "seed", ":", "str", "=", "None", ",", "metadata", ":", "dict", "=", "None", ")", "->", "KeyInfo", ":", "LOGGER", ".", "debug", "(", "'Wallet.create_signing_key >>> seed: [SEED], metadata: %s'", ",", "metada...
47.709677
33.709677
def disconnect(self): """ Disconnect from the Kafka broker. This is used to implement disconnection on timeout as a workaround for Kafka connections occasionally getting stuck on the server side under load. Requests are not cancelled, so they will be retried. """ if self.proto: log.debug('%r Disconnecting from %r', self, self.proto.transport.getPeer()) self.proto.transport.loseConnection()
[ "def", "disconnect", "(", "self", ")", ":", "if", "self", ".", "proto", ":", "log", ".", "debug", "(", "'%r Disconnecting from %r'", ",", "self", ",", "self", ".", "proto", ".", "transport", ".", "getPeer", "(", ")", ")", "self", ".", "proto", ".", "...
42.090909
21.363636
def get_max_instability(self, min_voltage=None, max_voltage=None): """ The maximum instability along a path for a specific voltage range. Args: min_voltage: The minimum allowable voltage. max_voltage: The maximum allowable voltage. Returns: Maximum decomposition energy of all compounds along the insertion path (a subset of the path can be chosen by the optional arguments) """ data = [] for pair in self._select_in_voltage_range(min_voltage, max_voltage): if pair.decomp_e_charge is not None: data.append(pair.decomp_e_charge) if pair.decomp_e_discharge is not None: data.append(pair.decomp_e_discharge) return max(data) if len(data) > 0 else None
[ "def", "get_max_instability", "(", "self", ",", "min_voltage", "=", "None", ",", "max_voltage", "=", "None", ")", ":", "data", "=", "[", "]", "for", "pair", "in", "self", ".", "_select_in_voltage_range", "(", "min_voltage", ",", "max_voltage", ")", ":", "i...
42.157895
21.421053
def _static(self, target, value): """PHP's "static" """ return 'static ' + self.__p(ast.Assign(targets=[target],value=value))
[ "def", "_static", "(", "self", ",", "target", ",", "value", ")", ":", "return", "'static '", "+", "self", ".", "__p", "(", "ast", ".", "Assign", "(", "targets", "=", "[", "target", "]", ",", "value", "=", "value", ")", ")" ]
29.2
16.8
def _write_file(iface, data, folder, pattern): ''' Writes a file to disk ''' filename = os.path.join(folder, pattern.format(iface)) if not os.path.exists(folder): msg = '{0} cannot be written. {1} does not exist' msg = msg.format(filename, folder) log.error(msg) raise AttributeError(msg) with salt.utils.files.flopen(filename, 'w') as fout: fout.write(salt.utils.stringutils.to_str(data)) return filename
[ "def", "_write_file", "(", "iface", ",", "data", ",", "folder", ",", "pattern", ")", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "pattern", ".", "format", "(", "iface", ")", ")", "if", "not", "os", ".", "path", ".", ...
35.461538
15.923077
def get_contents(self, element): """ Retrieve the contents of an element :param element: The XML Element object :type element: etree._Element :return: A list of responses :rtype : list of Response """ return [Response(self.trigger, child, self.file_path) for child in element if child.tag in ['response', 'template']]
[ "def", "get_contents", "(", "self", ",", "element", ")", ":", "return", "[", "Response", "(", "self", ".", "trigger", ",", "child", ",", "self", ".", "file_path", ")", "for", "child", "in", "element", "if", "child", ".", "tag", "in", "[", "'response'",...
35.363636
11.727273
def drop_trailing_zeros(num): """ Drops the trailing zeros in a float that is printed. """ txt = '%f' %(num) txt = txt.rstrip('0') if txt.endswith('.'): txt = txt[:-1] return txt
[ "def", "drop_trailing_zeros", "(", "num", ")", ":", "txt", "=", "'%f'", "%", "(", "num", ")", "txt", "=", "txt", ".", "rstrip", "(", "'0'", ")", "if", "txt", ".", "endswith", "(", "'.'", ")", ":", "txt", "=", "txt", "[", ":", "-", "1", "]", "...
22.888889
13.333333
def _parse_cluster_manage_command(cls, args, action): """ Parse command line arguments for cluster manage commands. """ argparser = ArgumentParser(prog="cluster_manage_command") group = argparser.add_mutually_exclusive_group(required=True) group.add_argument("--id", dest="cluster_id", help="execute on cluster with this id") group.add_argument("--label", dest="label", help="execute on cluster with this label") if action == "remove" or action == "update": argparser.add_argument("--private_dns", help="the private_dns of the machine to be updated/removed", required=True) if action == "update": argparser.add_argument("--command", help="the update command to be executed", required=True, choices=["replace"]) arguments = argparser.parse_args(args) return arguments
[ "def", "_parse_cluster_manage_command", "(", "cls", ",", "args", ",", "action", ")", ":", "argparser", "=", "ArgumentParser", "(", "prog", "=", "\"cluster_manage_command\"", ")", "group", "=", "argparser", ".", "add_mutually_exclusive_group", "(", "required", "=", ...
39.125
24.208333
def get_extended_summaryf(self, *args, **kwargs): """Extract the extended summary from a function docstring This function can be used as a decorator to extract the extended summary of a function docstring (similar to :meth:`get_sectionsf`). Parameters ---------- ``*args`` and ``**kwargs`` See the :meth:`get_extended_summary` method. Note, that the first argument will be the docstring of the specified function Returns ------- function Wrapper that takes a function as input and registers its summary via the :meth:`get_extended_summary` method""" def func(f): doc = f.__doc__ self.get_extended_summary(doc or '', *args, **kwargs) return f return func
[ "def", "get_extended_summaryf", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "func", "(", "f", ")", ":", "doc", "=", "f", ".", "__doc__", "self", ".", "get_extended_summary", "(", "doc", "or", "''", ",", "*", "args", ","...
36.681818
23.636364
def __finish_initializing(self): """ Handle any initialization after arguments & config has been parsed. """ if self.args.debug or self.args.trace: # Set the console (StreamHandler) to allow debug statements. if self.args.debug: self.console.setLevel(logging.DEBUG) self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s')) # Set the global level to debug. if self.args.debug: self.log.setLevel(logging.DEBUG) if self.args.log or self.log_file: # Allow the user to override the default log file handler. try: self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file) except Exception: self.log_file_handler = logging.FileHandler(self.args.log or self.log_file) self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s')) self.log_file_handler.setLevel(logging.DEBUG) self.log.addHandler(self.log_file_handler) # Allow cli.log, args & self to be accessed from __main__ if not hasattr(sys.modules['__main__'], 'log'): sys.modules['__main__'].log = self.log if not hasattr(sys.modules['__main__'], 'cli'): sys.modules['__main__'].cli = self if not hasattr(sys.modules['__main__'], 'args'): sys.modules['__main__'].args = self.args
[ "def", "__finish_initializing", "(", "self", ")", ":", "if", "self", ".", "args", ".", "debug", "or", "self", ".", "args", ".", "trace", ":", "# Set the console (StreamHandler) to allow debug statements.", "if", "self", ".", "args", ".", "debug", ":", "self", ...
41.027027
26.945946
def run_samtools_faidx(job, ref_id): """ Use SAMtools to create reference index file :param JobFunctionWrappingJob job: passed automatically by Toil :param str ref_id: FileStoreID for the reference genome :return: FileStoreID for reference index :rtype: str """ job.fileStore.logToMaster('Created reference index') work_dir = job.fileStore.getLocalTempDir() job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fasta')) command = ['faidx', 'ref.fasta'] dockerCall(job=job, workDir=work_dir, parameters=command, tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e') return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'ref.fasta.fai'))
[ "def", "run_samtools_faidx", "(", "job", ",", "ref_id", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Created reference index'", ")", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "job", ".", "fileStore", ".", "r...
46
19.25
def send_message(self, to=None, msg=None): """ method to send a message to a user Parameters: to -> recipient msg -> message to send """ url = self.root_url + "send_message" values = {} if to is not None: values["to"] = to if msg is not None: values["msg"] = msg return self._query(url, values)
[ "def", "send_message", "(", "self", ",", "to", "=", "None", ",", "msg", "=", "None", ")", ":", "url", "=", "self", ".", "root_url", "+", "\"send_message\"", "values", "=", "{", "}", "if", "to", "is", "not", "None", ":", "values", "[", "\"to\"", "]"...
29
10.214286
def get_all_role_config_groups(self): """ Get a list of role configuration groups in the service. @return: A list of ApiRoleConfigGroup objects. @since: API v3 """ return role_config_groups.get_all_role_config_groups( self._get_resource_root(), self.name, self._get_cluster_name())
[ "def", "get_all_role_config_groups", "(", "self", ")", ":", "return", "role_config_groups", ".", "get_all_role_config_groups", "(", "self", ".", "_get_resource_root", "(", ")", ",", "self", ".", "name", ",", "self", ".", "_get_cluster_name", "(", ")", ")" ]
34
15.777778
def enumerate_keyword_args(tokens): """ Iterates over *tokens* and returns a dictionary with function names as the keys and lists of keyword arguments as the values. """ keyword_args = {} inside_function = False for index, tok in enumerate(tokens): token_type = tok[0] token_string = tok[1] if token_type == tokenize.NEWLINE: inside_function = False if token_type == tokenize.NAME: if token_string == "def": function_name = tokens[index+1][1] inside_function = function_name keyword_args.update({function_name: []}) elif inside_function: if tokens[index+1][1] == '=': # keyword argument keyword_args[function_name].append(token_string) return keyword_args
[ "def", "enumerate_keyword_args", "(", "tokens", ")", ":", "keyword_args", "=", "{", "}", "inside_function", "=", "False", "for", "index", ",", "tok", "in", "enumerate", "(", "tokens", ")", ":", "token_type", "=", "tok", "[", "0", "]", "token_string", "=", ...
39
11.095238
def link(self, *args): """ Start assembling a Map/Reduce operation. A shortcut for :meth:`~riak.mapreduce.RiakMapReduce.link`. :rtype: :class:`~riak.mapreduce.RiakMapReduce` """ mr = RiakMapReduce(self.client) mr.add(self.bucket.name, self.key) return mr.link(*args)
[ "def", "link", "(", "self", ",", "*", "args", ")", ":", "mr", "=", "RiakMapReduce", "(", "self", ".", "client", ")", "mr", ".", "add", "(", "self", ".", "bucket", ".", "name", ",", "self", ".", "key", ")", "return", "mr", ".", "link", "(", "*",...
32.2
12
def put_all(self, map): """ Copies all of the mappings from the specified map to this map. No atomicity guarantees are given. In the case of a failure, some of the key-value tuples may get written, while others are not. :param map: (dict), map which includes mappings to be stored in this map. """ entries = {} for key, value in six.iteritems(map): check_not_none(key, "key can't be None") check_not_none(value, "value can't be None") entries[self._to_data(key)] = self._to_data(value) self._encode_invoke(replicated_map_put_all_codec, entries=entries)
[ "def", "put_all", "(", "self", ",", "map", ")", ":", "entries", "=", "{", "}", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "map", ")", ":", "check_not_none", "(", "key", ",", "\"key can't be None\"", ")", "check_not_none", "(", "val...
49.307692
25.615385
def cancel_milestone_payment(session, milestone_id): """ Release a milestone payment """ params_data = { 'action': 'cancel', } # PUT /api/projects/0.1/milestones/{milestone_id}/?action=release endpoint = 'milestones/{}'.format(milestone_id) response = make_put_request(session, endpoint, params_data=params_data) json_data = response.json() if response.status_code == 200: return json_data['status'] else: raise MilestoneNotCancelledException( message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id'])
[ "def", "cancel_milestone_payment", "(", "session", ",", "milestone_id", ")", ":", "params_data", "=", "{", "'action'", ":", "'cancel'", ",", "}", "# PUT /api/projects/0.1/milestones/{milestone_id}/?action=release", "endpoint", "=", "'milestones/{}'", ".", "format", "(", ...
35.111111
13.111111
def issue_reactions(self, issue_number): """Get reactions of an issue""" payload = { 'per_page': PER_PAGE, 'direction': 'asc', 'sort': 'updated' } path = urijoin("issues", str(issue_number), "reactions") return self.fetch_items(path, payload)
[ "def", "issue_reactions", "(", "self", ",", "issue_number", ")", ":", "payload", "=", "{", "'per_page'", ":", "PER_PAGE", ",", "'direction'", ":", "'asc'", ",", "'sort'", ":", "'updated'", "}", "path", "=", "urijoin", "(", "\"issues\"", ",", "str", "(", ...
28.181818
17.181818
def fill(self, term_dict, terms): # type: (Dict[int, Set[Type[Rule]]], Any) -> None """ Fill first row of the structure witch nonterminal directly rewritable to terminal. :param term_dict: Dictionary of rules directly rewritable to terminal. Key is hash of terminal, value is set of rules with key terminal at the right side. :param terms: Input sequence of terminal. """ for i in range(len(terms)): t = terms[i] self._field[0][i] += term_dict[hash(t)]
[ "def", "fill", "(", "self", ",", "term_dict", ",", "terms", ")", ":", "# type: (Dict[int, Set[Type[Rule]]], Any) -> None", "for", "i", "in", "range", "(", "len", "(", "terms", ")", ")", ":", "t", "=", "terms", "[", "i", "]", "self", ".", "_field", "[", ...
48.181818
18.545455
def ValidateIapJwtFromComputeEngine(iap_jwt, cloud_project_number, backend_service_id): """Validates an IAP JWT for your (Compute|Container) Engine service. Args: iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header. cloud_project_number: The project *number* for your Google Cloud project. This is returned by 'gcloud projects describe $PROJECT_ID', or in the Project Info card in Cloud Console. backend_service_id: The ID of the backend service used to access the application. See https://cloud.google.com/iap/docs/signed-headers-howto for details on how to get this value. Returns: A tuple of (user_id, user_email). Raises: IAPValidationFailedError: if the validation has failed. """ expected_audience = "/projects/{}/global/backendServices/{}".format( cloud_project_number, backend_service_id) return ValidateIapJwt(iap_jwt, expected_audience)
[ "def", "ValidateIapJwtFromComputeEngine", "(", "iap_jwt", ",", "cloud_project_number", ",", "backend_service_id", ")", ":", "expected_audience", "=", "\"/projects/{}/global/backendServices/{}\"", ".", "format", "(", "cloud_project_number", ",", "backend_service_id", ")", "ret...
42.681818
22.636364
def add_portal(self, origin, destination, symmetrical=False, **kwargs): """Connect the origin to the destination with a :class:`Portal`. Keyword arguments are the :class:`Portal`'s attributes. Exception: if keyword ``symmetrical`` == ``True``, a mirror-:class:`Portal` will be placed in the opposite direction between the same nodes. It will always appear to have the placed :class:`Portal`'s stats, and any change to the mirror :class:`Portal`'s stats will affect the placed :class:`Portal`. """ if isinstance(origin, Node): origin = origin.name if isinstance(destination, Node): destination = destination.name super().add_edge(origin, destination, **kwargs) if symmetrical: self.add_portal(destination, origin, is_mirror=True)
[ "def", "add_portal", "(", "self", ",", "origin", ",", "destination", ",", "symmetrical", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "origin", ",", "Node", ")", ":", "origin", "=", "origin", ".", "name", "if", "isinstance"...
44.842105
17.842105
def scan_module(egg_dir, base, name, stubs): """Check whether module possibly uses unsafe-for-zipfile stuff""" filename = os.path.join(base,name) if filename[:-1] in stubs: return True # Extension module pkg = base[len(egg_dir)+1:].replace(os.sep,'.') module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0] if sys.version_info < (3, 3): skip = 8 # skip magic & date else: skip = 12 # skip magic & date & file size f = open(filename,'rb'); f.read(skip) code = marshal.load(f); f.close() safe = True symbols = dict.fromkeys(iter_symbols(code)) for bad in ['__file__', '__path__']: if bad in symbols: log.warn("%s: module references %s", module, bad) safe = False if 'inspect' in symbols: for bad in [ 'getsource', 'getabsfile', 'getsourcefile', 'getfile' 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', 'getinnerframes', 'getouterframes', 'stack', 'trace' ]: if bad in symbols: log.warn("%s: module MAY be using inspect.%s", module, bad) safe = False if '__name__' in symbols and '__main__' in symbols and '.' not in module: if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5 log.warn("%s: top-level module may be 'python -m' script", module) safe = False return safe
[ "def", "scan_module", "(", "egg_dir", ",", "base", ",", "name", ",", "stubs", ")", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "base", ",", "name", ")", "if", "filename", "[", ":", "-", "1", "]", "in", "stubs", ":", "return", "True...
41.441176
16.764706
def _build_path(self): ''' Constructs the actual request URL with accompanying query if any. Returns: None: But does modify self.path, which contains the final request path sent to the server. ''' if not self.path: self.path = '/' if self.uri_parameters: self.path = self.path + ';' + requote_uri(self.uri_parameters) if self.query: self.path = (self.path + '?' + self.query) if self.params: try: if self.query: self.path = self.path + self._dict_to_query( self.params, base_query=True) else: self.path = self.path + self._dict_to_query(self.params) except AttributeError: self.path = self.path + '?' + self.params self.path = requote_uri(self.path) self.req_url = urlunparse( (self.scheme, self.host, (self.path or ''), '', '', ''))
[ "def", "_build_path", "(", "self", ")", ":", "if", "not", "self", ".", "path", ":", "self", ".", "path", "=", "'/'", "if", "self", ".", "uri_parameters", ":", "self", ".", "path", "=", "self", ".", "path", "+", "';'", "+", "requote_uri", "(", "self...
31.28125
23.59375
def parse_polygonal_poi(coords, response): """ Parse areal POI way polygons from OSM node coords. Parameters ---------- coords : dict dict of node IDs and their lat, lon coordinates Returns ------- dict of POIs containing each's nodes, polygon geometry, and osmid """ if 'type' in response and response['type'] == 'way': nodes = response['nodes'] try: polygon = Polygon([(coords[node]['lon'], coords[node]['lat']) for node in nodes]) poi = {'nodes': nodes, 'geometry': polygon, 'osmid': response['id']} if 'tags' in response: for tag in response['tags']: poi[tag] = response['tags'][tag] return poi except Exception: log('Polygon has invalid geometry: {}'.format(nodes)) return None
[ "def", "parse_polygonal_poi", "(", "coords", ",", "response", ")", ":", "if", "'type'", "in", "response", "and", "response", "[", "'type'", "]", "==", "'way'", ":", "nodes", "=", "response", "[", "'nodes'", "]", "try", ":", "polygon", "=", "Polygon", "("...
27.25
21.5
def blit(self, surface, pos=(0, 0)): """ Blits a surface on the screen at pos :param surface: Surface to blit :param pos: Top left corner to start blitting :type surface: Surface :type pos: tuple """ for x in range(surface.width): for y in range(surface.height): point = (x + pos[0], y + pos[1]) if self.point_on_screen(point): self.matrix[point[0]][point[1]] = surface.matrix[x][y]
[ "def", "blit", "(", "self", ",", "surface", ",", "pos", "=", "(", "0", ",", "0", ")", ")", ":", "for", "x", "in", "range", "(", "surface", ".", "width", ")", ":", "for", "y", "in", "range", "(", "surface", ".", "height", ")", ":", "point", "=...
35.571429
10.142857
def x509_name(name): """Parses a subject into a :py:class:`x509.Name <cg:cryptography.x509.Name>`. If ``name`` is a string, :py:func:`parse_name` is used to parse it. >>> x509_name('/C=AT/CN=example.com') <Name(C=AT,CN=example.com)> >>> x509_name([('C', 'AT'), ('CN', 'example.com')]) <Name(C=AT,CN=example.com)> """ if isinstance(name, six.string_types): name = parse_name(name) return x509.Name([x509.NameAttribute(NAME_OID_MAPPINGS[typ], force_text(value)) for typ, value in name])
[ "def", "x509_name", "(", "name", ")", ":", "if", "isinstance", "(", "name", ",", "six", ".", "string_types", ")", ":", "name", "=", "parse_name", "(", "name", ")", "return", "x509", ".", "Name", "(", "[", "x509", ".", "NameAttribute", "(", "NAME_OID_MA...
37
20.285714
def collect_impl(self): """ overrides DistJarChange and DistClassChange from the underlying DistChange with DistJarReport and DistClassReport instances """ for c in DistChange.collect_impl(self): if isinstance(c, DistJarChange): if c.is_change(): ln = DistJarReport.report_name nr = self.reporter.subreporter(c.entry, ln) c = DistJarReport(c.ldata, c.rdata, c.entry, nr) elif isinstance(c, DistClassChange): if c.is_change(): ln = DistClassReport.report_name nr = self.reporter.subreporter(c.entry, ln) c = DistClassReport(c.ldata, c.rdata, c.entry, nr) yield c
[ "def", "collect_impl", "(", "self", ")", ":", "for", "c", "in", "DistChange", ".", "collect_impl", "(", "self", ")", ":", "if", "isinstance", "(", "c", ",", "DistJarChange", ")", ":", "if", "c", ".", "is_change", "(", ")", ":", "ln", "=", "DistJarRep...
42.944444
16.388889
def reduce_min(attrs, inputs, proto_obj): """Reduce the array along a given axis by minimum value""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'min', new_attrs, inputs
[ "def", "reduce_min", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'axes'", ":", "'axis'", "}", ")", "return", "'min'", ",", "new_attrs", ",", "inputs" ]
54
11
def FromJson(json): """ Convert a json object to a ContractParameter object Args: item (dict): The item to convert to a ContractParameter object Returns: ContractParameter """ type = ContractParameterType.FromString(json['type']) value = json['value'] param = ContractParameter(type=type, value=None) if type == ContractParameterType.Signature or type == ContractParameterType.ByteArray: param.Value = bytearray.fromhex(value) elif type == ContractParameterType.Boolean: param.Value = bool(value) elif type == ContractParameterType.Integer: param.Value = int(value) elif type == ContractParameterType.Hash160: param.Value = UInt160.ParseString(value) elif type == ContractParameterType.Hash256: param.Value = UInt256.ParseString(value) # @TODO Not sure if this is working... elif type == ContractParameterType.PublicKey: param.Value = ECDSA.decode_secp256r1(value).G elif type == ContractParameterType.String: param.Value = str(value) elif type == ContractParameterType.Array: val = [ContractParameter.FromJson(item) for item in value] param.Value = val return param
[ "def", "FromJson", "(", "json", ")", ":", "type", "=", "ContractParameterType", ".", "FromString", "(", "json", "[", "'type'", "]", ")", "value", "=", "json", "[", "'value'", "]", "param", "=", "ContractParameter", "(", "type", "=", "type", ",", "value",...
30.534884
22.395349
def _add_sub_elements_from_dict(parent, sub_dict): """ Add SubElements to the parent element. :param parent: ElementTree.Element: The parent element for the newly created SubElement. :param sub_dict: dict: Used to create a new SubElement. See `dict_to_xml_schema` method docstring for more information. e.g.: {"example": { "attrs": { "key1": "value1", ... }, ... }} """ for key, value in sub_dict.items(): if isinstance(value, list): for repeated_element in value: sub_element = ET.SubElement(parent, key) _add_element_attrs(sub_element, repeated_element.get("attrs", {})) children = repeated_element.get("children", None) if isinstance(children, dict): _add_sub_elements_from_dict(sub_element, children) elif isinstance(children, str): sub_element.text = children else: sub_element = ET.SubElement(parent, key) _add_element_attrs(sub_element, value.get("attrs", {})) children = value.get("children", None) if isinstance(children, dict): _add_sub_elements_from_dict(sub_element, children) elif isinstance(children, str): sub_element.text = children
[ "def", "_add_sub_elements_from_dict", "(", "parent", ",", "sub_dict", ")", ":", "for", "key", ",", "value", "in", "sub_dict", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "for", "repeated_element", "in", "value", ...
33.484848
16.636364
def add_to_enum(self, clsdict): """ Compile XML mappings in addition to base add behavior. """ super(XmlMappedEnumMember, self).add_to_enum(clsdict) self.register_xml_mapping(clsdict)
[ "def", "add_to_enum", "(", "self", ",", "clsdict", ")", ":", "super", "(", "XmlMappedEnumMember", ",", "self", ")", ".", "add_to_enum", "(", "clsdict", ")", "self", ".", "register_xml_mapping", "(", "clsdict", ")" ]
36.333333
9
def align_unwrapped(sino): """Align an unwrapped phase array to zero-phase All operations are performed in-place. """ samples = [] if len(sino.shape) == 2: # 2D # take 1D samples at beginning and end of array samples.append(sino[:, 0]) samples.append(sino[:, 1]) samples.append(sino[:, 2]) samples.append(sino[:, -1]) samples.append(sino[:, -2]) elif len(sino.shape) == 3: # 3D # take 1D samples at beginning and end of array samples.append(sino[:, 0, 0]) samples.append(sino[:, 0, -1]) samples.append(sino[:, -1, 0]) samples.append(sino[:, -1, -1]) samples.append(sino[:, 0, 1]) # find discontinuities in the samples steps = np.zeros((len(samples), samples[0].shape[0])) for i in range(len(samples)): t = np.unwrap(samples[i]) steps[i] = samples[i] - t # if the majority believes so, add a step of PI remove = mode(steps, axis=0)[0][0] # obtain divmod min twopi = 2*np.pi minimum = divmod_neg(np.min(sino), twopi)[0] remove += minimum*twopi for i in range(len(sino)): sino[i] -= remove[i]
[ "def", "align_unwrapped", "(", "sino", ")", ":", "samples", "=", "[", "]", "if", "len", "(", "sino", ".", "shape", ")", "==", "2", ":", "# 2D", "# take 1D samples at beginning and end of array", "samples", ".", "append", "(", "sino", "[", ":", ",", "0", ...
28.9
14
def parse_template(input_filename, output_filename=''): """ Parses a template file Replaces all occurences of @@problem_id@@ by the value of the 'problem_id' key in data dictionary input_filename: file to parse output_filename: if not specified, overwrite input file """ data = load_input() with open(input_filename, 'rb') as file: template = file.read().decode("utf-8") # Check if 'input' in data if not 'input' in data: raise ValueError("Could not find 'input' in data") # Parse template for field in data['input']: subs = ["filename", "value"] if isinstance(data['input'][field], dict) and "filename" in data['input'][field] and "value" in data['input'][field] else [""] for sub in subs: displayed_field = field + (":" if sub else "") + sub regex = re.compile("@([^@]*)@" + displayed_field + '@([^@]*)@') for prefix, postfix in set(regex.findall(template)): if sub == "value": text = open(data['input'][field][sub], 'rb').read().decode('utf-8') elif sub: text = data['input'][field][sub] else: text = data['input'][field] rep = "\n".join([prefix + v + postfix for v in text.splitlines()]) template = template.replace("@{0}@{1}@{2}@".format(prefix, displayed_field, postfix), rep) if output_filename == '': output_filename=input_filename # Ensure directory of resulting file exists try: os.makedirs(os.path.dirname(output_filename)) except OSError as e: pass # Write file with open(output_filename, 'wb') as file: file.write(template.encode("utf-8"))
[ "def", "parse_template", "(", "input_filename", ",", "output_filename", "=", "''", ")", ":", "data", "=", "load_input", "(", ")", "with", "open", "(", "input_filename", ",", "'rb'", ")", "as", "file", ":", "template", "=", "file", ".", "read", "(", ")", ...
40.159091
21.909091
def _check_satisfy(self, rand_box, gt_boxes): """ check if overlap with any gt box is larger than threshold """ l, t, r, b = rand_box num_gt = gt_boxes.shape[0] ls = np.ones(num_gt) * l ts = np.ones(num_gt) * t rs = np.ones(num_gt) * r bs = np.ones(num_gt) * b mask = np.where(ls < gt_boxes[:, 1])[0] ls[mask] = gt_boxes[mask, 1] mask = np.where(ts < gt_boxes[:, 2])[0] ts[mask] = gt_boxes[mask, 2] mask = np.where(rs > gt_boxes[:, 3])[0] rs[mask] = gt_boxes[mask, 3] mask = np.where(bs > gt_boxes[:, 4])[0] bs[mask] = gt_boxes[mask, 4] w = rs - ls w[w < 0] = 0 h = bs - ts h[h < 0] = 0 inter_area = h * w union_area = np.ones(num_gt) * max(0, r - l) * max(0, b - t) union_area += (gt_boxes[:, 3] - gt_boxes[:, 1]) * (gt_boxes[:, 4] - gt_boxes[:, 2]) union_area -= inter_area ious = inter_area / union_area ious[union_area <= 0] = 0 max_iou = np.amax(ious) if max_iou < self.min_overlap: return None # check ground-truth constraint if self.config['gt_constraint'] == 'center': for i in range(ious.shape[0]): if ious[i] > 0: gt_x = (gt_boxes[i, 1] + gt_boxes[i, 3]) / 2.0 gt_y = (gt_boxes[i, 2] + gt_boxes[i, 4]) / 2.0 if gt_x < l or gt_x > r or gt_y < t or gt_y > b: return None elif self.config['gt_constraint'] == 'corner': for i in range(ious.shape[0]): if ious[i] > 0: if gt_boxes[i, 1] < l or gt_boxes[i, 3] > r \ or gt_boxes[i, 2] < t or gt_boxes[i, 4] > b: return None return ious
[ "def", "_check_satisfy", "(", "self", ",", "rand_box", ",", "gt_boxes", ")", ":", "l", ",", "t", ",", "r", ",", "b", "=", "rand_box", "num_gt", "=", "gt_boxes", ".", "shape", "[", "0", "]", "ls", "=", "np", ".", "ones", "(", "num_gt", ")", "*", ...
39.695652
12.086957
def prepare(self): """ All codes that create parameters should be put into 'setup' function. """ self.output_dim = 10 self.encoder = Chain(self.input_dim).stack(Dense(self.internal_layer_size, 'tanh')) self.decoder = Chain(self.internal_layer_size).stack(Dense(self.input_dim)) self.classifier = Chain(self.internal_layer_size).stack(Dense(50, 'tanh'), Dense(self.output_dim), Softmax()) self.register_inner_layers(self.encoder, self.decoder, self.classifier) self.target_input = T.ivector('target') self.register_external_inputs(self.target_input)
[ "def", "prepare", "(", "self", ")", ":", "self", ".", "output_dim", "=", "10", "self", ".", "encoder", "=", "Chain", "(", "self", ".", "input_dim", ")", ".", "stack", "(", "Dense", "(", "self", ".", "internal_layer_size", ",", "'tanh'", ")", ")", "se...
48.266667
27.333333
def _get_lib_modules(self, full): """Returns a list of the modules in the same folder as the one being wrapped for compilation as a linked library. :arg full: when True, all the code files in the source file's directory are considered as dependencies; otherwise only those explicitly needed are kept. """ #The only complication with the whole process is that we need to get the list of #dependencies for the current module. For full lib, we compile *all* the files in #the directory, otherwise only those that are explicitly required. result = [] if full: found = {} from os import path mypath = path.dirname(self.module.filepath) self.module.parent.scan_path(mypath, found) for codefile in found: self.module.parent.load_dependency(codefile.replace(".f90", ""), True, True, False) for modname, module in list(self.module.parent.modules.items()): if path.dirname(module.filepath).lower() == mypath.lower(): result.append(modname) else: result.extend(self.module.search_dependencies()) return self._process_module_needs(result)
[ "def", "_get_lib_modules", "(", "self", ",", "full", ")", ":", "#The only complication with the whole process is that we need to get the list of", "#dependencies for the current module. For full lib, we compile *all* the files in", "#the directory, otherwise only those that are explicitly requir...
48.230769
24.115385
def GetValidHostsForCert(cert): """Returns a list of valid host globs for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. Returns: list: A list of valid host globs. """ if 'subjectAltName' in cert: return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns'] else: return [x[0][1] for x in cert['subject'] if x[0][0].lower() == 'commonname']
[ "def", "GetValidHostsForCert", "(", "cert", ")", ":", "if", "'subjectAltName'", "in", "cert", ":", "return", "[", "x", "[", "1", "]", "for", "x", "in", "cert", "[", "'subjectAltName'", "]", "if", "x", "[", "0", "]", ".", "lower", "(", ")", "==", "'...
31.538462
16.769231
def validate_schema(sconf): """ Return True if config schema is correct. Parameters ---------- sconf : dict session configuration Returns ------- bool """ # verify session_name if 'session_name' not in sconf: raise exc.ConfigError('config requires "session_name"') if 'windows' not in sconf: raise exc.ConfigError('config requires list of "windows"') for window in sconf['windows']: if 'window_name' not in window: raise exc.ConfigError('config window is missing "window_name"') if 'panes' not in window: raise exc.ConfigError( 'config window %s requires list of panes' % window['window_name'] ) return True
[ "def", "validate_schema", "(", "sconf", ")", ":", "# verify session_name", "if", "'session_name'", "not", "in", "sconf", ":", "raise", "exc", ".", "ConfigError", "(", "'config requires \"session_name\"'", ")", "if", "'windows'", "not", "in", "sconf", ":", "raise",...
23.612903
22.580645
def as_raw(self): """ Return a representation of this object that can be used with mongoengine Document.objects(__raw__=x) Example: >>> stream_id = StreamId(name='test', meta_data=((u'house', u'1'), (u'resident', u'1'))) >>> stream_id.as_raw() {'stream_id.meta_data': [(u'house', u'1'), (u'resident', u'1')], 'stream_id.name': 'test'} :return: The raw representation of this object. """ return dict(('stream_id.' + k, v) for k, v in self.as_dict().items())
[ "def", "as_raw", "(", "self", ")", ":", "return", "dict", "(", "(", "'stream_id.'", "+", "k", ",", "v", ")", "for", "k", ",", "v", "in", "self", ".", "as_dict", "(", ")", ".", "items", "(", ")", ")" ]
43.25
30.916667
def _get_video_id(self, url=None): """ Extract video id. It will try to avoid making an HTTP request if it can find the ID in the URL, but otherwise it will try to scrape it from the HTML document. Returns None in case it's unable to extract the ID at all. """ if url: html_data = self.http.request("get", url).text else: html_data = self.get_urldata() html_data = self.get_urldata() match = re.search(r'data-video-id="([0-9]+)"', html_data) if match: return match.group(1) match = re.search(r'data-videoid="([0-9]+)', html_data) if match: return match.group(1) match = re.search(r'"mediaGuid":"([0-9]+)"', html_data) if match: return match.group(1) clips = False slug = None match = re.search('params":({.*}),"query', self.get_urldata()) if match: jansson = json.loads(match.group(1)) if "seasonNumberOrVideoId" in jansson: season = jansson["seasonNumberOrVideoId"] match = re.search(r"\w-(\d+)$", season) if match: season = match.group(1) else: match = self._conentpage(self.get_urldata()) if match: # this only happen on the program page? janson2 = json.loads(match.group(1)) if janson2["formatPage"]["format"]: season = janson2["formatPage"]["format"]["seasonNumber"] return janson2["formatPage"]["format"]["videos"][str(season)]["program"][0]["id"] return None if "videoIdOrEpisodeNumber" in jansson: videp = jansson["videoIdOrEpisodeNumber"] match = re.search(r'(\w+)-(\d+)', videp) if match: episodenr = match.group(2) else: episodenr = videp clips = True match = re.search(r'(s\w+)-(\d+)', season) if match: season = match.group(2) else: # sometimes videoIdOrEpisodeNumber does not work.. this is a workaround match = re.search(r'(episode|avsnitt)-(\d+)', self.url) if match: episodenr = match.group(2) else: episodenr = season if "slug" in jansson: slug = jansson["slug"] if clips: return episodenr else: match = self._conentpage(self.get_urldata()) if match: janson = json.loads(match.group(1)) for i in janson["formatPage"]["format"]["videos"].keys(): if "program" in janson["formatPage"]["format"]["videos"][str(i)]: for n in janson["formatPage"]["format"]["videos"][i]["program"]: if str(n["episodeNumber"]) and int(episodenr) == n["episodeNumber"] and int(season) == n["seasonNumber"]: if slug is None or slug == n["formatSlug"]: return n["id"] elif n["id"] == episodenr: return episodenr parse = urlparse(self.url) match = re.search(r'/\w+/(\d+)', parse.path) if match: return match.group(1) match = re.search(r'iframe src="http://play.juicyplay.se[^\"]+id=(\d+)', html_data) if match: return match.group(1) match = re.search(r'<meta property="og:image" content="([\S]+)"', html_data) if match: return match.group(1).split("/")[-2] return None
[ "def", "_get_video_id", "(", "self", ",", "url", "=", "None", ")", ":", "if", "url", ":", "html_data", "=", "self", ".", "http", ".", "request", "(", "\"get\"", ",", "url", ")", ".", "text", "else", ":", "html_data", "=", "self", ".", "get_urldata", ...
42.696629
20.58427
def get_event_details(self, group_url, event_id): ''' a method to retrieve details for an event :param group_url: string with meetup urlname for host group :param event_id: integer with meetup id for event :return: dictionary with list of event details inside [json] key event_details = self._reconstruct_event({}) ''' # https://www.meetup.com/meetup_api/docs/:urlname/events/:id/#get title = '%s.get_event_details' % self.__class__.__name__ # validate inputs input_fields = { 'group_url': group_url, 'event_id': event_id } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct request fields url = '%s/%s/events/%s' % (self.endpoint, group_url, str(event_id)) params = { 'fields': 'comment_count,event_hosts,rsvp_rules,short_link,survey_questions,rsvpable,rsvpable_after_join' } # send request event_details = self._get_request(url, params=params) # construct method output if event_details['json']: event_details['json'] = self._reconstruct_event(event_details['json']) return event_details
[ "def", "get_event_details", "(", "self", ",", "group_url", ",", "event_id", ")", ":", "# https://www.meetup.com/meetup_api/docs/:urlname/events/:id/#get\r", "title", "=", "'%s.get_event_details'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs\r", "input...
34.846154
26.589744
def authenticated(f): """Access only with a valid session.""" @functools.wraps(f) def wrapper(request, *args, **kwargs): if d1_common.const.SUBJECT_AUTHENTICATED not in request.all_subjects_set: raise d1_common.types.exceptions.NotAuthorized( 0, 'Access allowed only for authenticated subjects. Please reconnect with ' 'a valid DataONE session certificate. active_subjects="{}"'.format( d1_gmn.app.auth.format_active_subjects(request) ), ) return f(request, *args, **kwargs) return wrapper
[ "def", "authenticated", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "d1_common", ".", "const", ".", "SUBJECT_AUTHENTICATED", "not", "i...
38.5
24.4375
def get_settings(self): """ Gets the interconnect settings for a logical interconnect group. Returns: dict: Interconnect Settings. """ uri = "{}/settings".format(self.data["uri"]) return self._helper.do_get(uri)
[ "def", "get_settings", "(", "self", ")", ":", "uri", "=", "\"{}/settings\"", ".", "format", "(", "self", ".", "data", "[", "\"uri\"", "]", ")", "return", "self", ".", "_helper", ".", "do_get", "(", "uri", ")" ]
29.333333
14
def create(name, **params): ''' Function to create device in Server Density. For more info, see the `API docs`__. .. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating CLI Example: .. code-block:: bash salt '*' serverdensity_device.create lama salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768 ''' log.debug('Server Density params: %s', params) params = _clean_salt_variables(params) params['name'] = name api_response = requests.post( 'https://api.serverdensity.io/inventory/devices/', params={'token': get_sd_auth('api_token')}, data=params ) log.debug('Server Density API Response: %s', api_response) log.debug('Server Density API Response content: %s', api_response.content) if api_response.status_code == 200: try: return salt.utils.json.loads(api_response.content) except ValueError: log.error('Could not parse API Response content: %s', api_response.content) raise CommandExecutionError( 'Failed to create, API Response: {0}'.format(api_response) ) else: return None
[ "def", "create", "(", "name", ",", "*", "*", "params", ")", ":", "log", ".", "debug", "(", "'Server Density params: %s'", ",", "params", ")", "params", "=", "_clean_salt_variables", "(", "params", ")", "params", "[", "'name'", "]", "=", "name", "api_respon...
33.771429
25.028571
def MessageSetItemEncoder(field_number): """Encoder for extensions of MessageSet. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } } """ start_bytes = b"".join([ TagBytes(1, wire_format.WIRETYPE_START_GROUP), TagBytes(2, wire_format.WIRETYPE_VARINT), _VarintBytes(field_number), TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)]) end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP) local_EncodeVarint = _EncodeVarint def EncodeField(write, value): write(start_bytes) local_EncodeVarint(write, value.ByteSize()) value._InternalSerialize(write) return write(end_bytes) return EncodeField
[ "def", "MessageSetItemEncoder", "(", "field_number", ")", ":", "start_bytes", "=", "b\"\"", ".", "join", "(", "[", "TagBytes", "(", "1", ",", "wire_format", ".", "WIRETYPE_START_GROUP", ")", ",", "TagBytes", "(", "2", ",", "wire_format", ".", "WIRETYPE_VARINT"...
29.192308
14.384615
def generate_html_documentation(self): """generate_html_documentation() => html documentation for the server Generates HTML documentation for the server using introspection for installed functions and instances that do not implement the _dispatch method. Alternatively, instances can choose to implement the _get_method_argstring(method_name) method to provide the argument string used in the documentation and the _methodHelp(method_name) method to provide the help text used in the documentation.""" methods = {} for method_name in self.system_listMethods(): if method_name in self.funcs: method = self.funcs[method_name] elif self.instance is not None: method_info = [None, None] # argspec, documentation if hasattr(self.instance, '_get_method_argstring'): method_info[0] = self.instance._get_method_argstring(method_name) if hasattr(self.instance, '_methodHelp'): method_info[1] = self.instance._methodHelp(method_name) method_info = tuple(method_info) if method_info != (None, None): method = method_info elif not hasattr(self.instance, '_dispatch'): try: method = resolve_dotted_attribute( self.instance, method_name ) except AttributeError: method = method_info else: method = method_info else: assert 0, "Could not find method in self.functions and no "\ "instance installed" methods[method_name] = method documenter = ServerHTMLDoc() documentation = documenter.docserver( self.server_name, self.server_documentation, methods ) return documenter.page(self.server_title, documentation)
[ "def", "generate_html_documentation", "(", "self", ")", ":", "methods", "=", "{", "}", "for", "method_name", "in", "self", ".", "system_listMethods", "(", ")", ":", "if", "method_name", "in", "self", ".", "funcs", ":", "method", "=", "self", ".", "funcs", ...
43.44
18.02
def read(self, data, offset=0): """ Read data structure and return (nested) named tuple(s). """ if isinstance(data, Buffer): return data.read(self) try: args = list(self._struct.unpack_from(data, offset)) except TypeError as error: # Working around struct.unpack_from issue #10212 logger.debug("error: %s", error) args = list(self._struct.unpack_from(str(bytearray(data)), offset)) args.reverse() return self.popValue(args)
[ "def", "read", "(", "self", ",", "data", ",", "offset", "=", "0", ")", ":", "if", "isinstance", "(", "data", ",", "Buffer", ")", ":", "return", "data", ".", "read", "(", "self", ")", "try", ":", "args", "=", "list", "(", "self", ".", "_struct", ...
40
15.615385
def do_file_update_metadata(client, args): """Update file metadata""" client.update_file_metadata(args.uri, filename=args.filename, description=args.description, mtime=args.mtime, privacy=args.privacy) return True
[ "def", "do_file_update_metadata", "(", "client", ",", "args", ")", ":", "client", ".", "update_file_metadata", "(", "args", ".", "uri", ",", "filename", "=", "args", ".", "filename", ",", "description", "=", "args", ".", "description", ",", "mtime", "=", "...
47.333333
17.333333
def needs_quotes(s): """Checks whether a string is a dot language ID. It will check whether the string is solely composed by the characters allowed in an ID or not. If the string is one of the reserved keywords it will need quotes too but the user will need to add them manually. """ # If the name is a reserved keyword it will need quotes but pydot # can't tell when it's being used as a keyword or when it's simply # a name. Hence the user needs to supply the quotes when an element # would use a reserved keyword as name. This function will return # false indicating that a keyword string, if provided as-is, won't # need quotes. if s in DOT_KEYWORDS: return False chars = [ord(c) for c in s if ord(c)>0x7f or ord(c)==0] if chars and not ID_RE_DBL_QUOTED.match(s) and not ID_RE_HTML.match(s): return True for test_re in [ID_RE_ALPHA_NUMS, ID_RE_NUM, ID_RE_DBL_QUOTED, ID_RE_HTML, ID_RE_ALPHA_NUMS_WITH_PORTS]: if test_re.match(s): return False m = ID_RE_WITH_PORT.match(s) if m: return needs_quotes(m.group(1)) or needs_quotes(m.group(2)) return True
[ "def", "needs_quotes", "(", "s", ")", ":", "# If the name is a reserved keyword it will need quotes but pydot", "# can't tell when it's being used as a keyword or when it's simply", "# a name. Hence the user needs to supply the quotes when an element", "# would use a reserved keyword as name. This...
44.307692
21.807692
def cheb_range_simplifier(low, high, text=False): ''' >>> low, high = 0.0023046250851646434, 4.7088985707840125 >>> cheb_range_simplifier(low, high, text=True) 'chebval(0.42493574399544564724*(x + -2.3556015979345885647), coeffs)' ''' constant = 0.5*(-low-high) factor = 2.0/(high-low) if text: return 'chebval(%.20g*(x + %.20g), coeffs)' %(factor, constant) return constant, factor
[ "def", "cheb_range_simplifier", "(", "low", ",", "high", ",", "text", "=", "False", ")", ":", "constant", "=", "0.5", "*", "(", "-", "low", "-", "high", ")", "factor", "=", "2.0", "/", "(", "high", "-", "low", ")", "if", "text", ":", "return", "'...
37.818182
21.454545
def resample_single_nifti(input_nifti): """ Resample a gantry tilted image in place """ # read the input image input_image = nibabel.load(input_nifti) output_image = resample_nifti_images([input_image]) output_image.to_filename(input_nifti)
[ "def", "resample_single_nifti", "(", "input_nifti", ")", ":", "# read the input image", "input_image", "=", "nibabel", ".", "load", "(", "input_nifti", ")", "output_image", "=", "resample_nifti_images", "(", "[", "input_image", "]", ")", "output_image", ".", "to_fil...
32.625
4.625
def download_data(request_list, redownload=False, max_threads=None): """ Download all requested data or read data from disk, if already downloaded and available and redownload is not required. :param request_list: list of DownloadRequests :type request_list: list of DownloadRequests :param redownload: if ``True``, download again the data, although it was already downloaded and is available on the disk. Default is ``False``. :type redownload: bool :param max_threads: number of threads to use when downloading data; default is ``max_threads=None`` which by default uses the number of processors on the system :type max_threads: int :return: list of Futures holding downloaded data, where each element in the list corresponds to an element in the download request list. :rtype: list[concurrent.futures.Future] """ _check_if_must_download(request_list, redownload) LOGGER.debug("Using max_threads=%s for %s requests", max_threads, len(request_list)) with concurrent.futures.ThreadPoolExecutor(max_workers=max_threads) as executor: return [executor.submit(execute_download_request, request) for request in request_list]
[ "def", "download_data", "(", "request_list", ",", "redownload", "=", "False", ",", "max_threads", "=", "None", ")", ":", "_check_if_must_download", "(", "request_list", ",", "redownload", ")", "LOGGER", ".", "debug", "(", "\"Using max_threads=%s for %s requests\"", ...
55.318182
29.045455
def m2m_changed(sender, instance, action, reverse, model, pk_set, using, **kwargs): """https://docs.djangoproject.com/es/1.10/ref/signals/#m2m-changed""" try: with transaction.atomic(): if not should_audit(instance): return False if action not in ("post_add", "post_remove", "post_clear"): return False object_json_repr = serializers.serialize("json", [instance]) if reverse: event_type = CRUDEvent.M2M_CHANGE_REV # add reverse M2M changes to event. must use json lib because # django serializers ignore extra fields. tmp_repr = json.loads(object_json_repr) m2m_rev_field = _m2m_rev_field_name(instance._meta.concrete_model, model) related_instances = getattr(instance, m2m_rev_field).all() related_ids = [r.pk for r in related_instances] tmp_repr[0]['m2m_rev_model'] = force_text(model._meta) tmp_repr[0]['m2m_rev_pks'] = related_ids tmp_repr[0]['m2m_rev_action'] = action object_json_repr = json.dumps(tmp_repr) else: event_type = CRUDEvent.M2M_CHANGE # user try: user = get_current_user() # validate that the user still exists user = get_user_model().objects.get(pk=user.pk) except: user = None if isinstance(user, AnonymousUser): user = None c_t = ContentType.objects.get_for_model(instance) sid = transaction.savepoint() try: with transaction.atomic(): crud_event = CRUDEvent.objects.create( event_type=event_type, object_repr=str(instance), object_json_repr=object_json_repr, content_type_id=c_t.id, object_id=instance.pk, user_id=getattr(user, 'id', None), datetime=timezone.now(), user_pk_as_string=str(user.pk) if user else user ) except Exception as e: logger.exception( "easy audit had a pre-save exception on CRUDEvent creation. instance: {}, instance pk: {}".format( instance, instance.pk)) transaction.savepoint_rollback(sid) except Exception: logger.exception('easy audit had an m2m-changed exception.')
[ "def", "m2m_changed", "(", "sender", ",", "instance", ",", "action", ",", "reverse", ",", "model", ",", "pk_set", ",", "using", ",", "*", "*", "kwargs", ")", ":", "try", ":", "with", "transaction", ".", "atomic", "(", ")", ":", "if", "not", "should_a...
42.262295
21.065574
def tan(cls, x: 'TensorFluent') -> 'TensorFluent': '''Returns a TensorFluent for the tan function. Args: x: The input fluent. Returns: A TensorFluent wrapping the tan function. ''' return cls._unary_op(x, tf.tan, tf.float32)
[ "def", "tan", "(", "cls", ",", "x", ":", "'TensorFluent'", ")", "->", "'TensorFluent'", ":", "return", "cls", ".", "_unary_op", "(", "x", ",", "tf", ".", "tan", ",", "tf", ".", "float32", ")" ]
28.1
21.7
def research_organism(soup): "Find the research-organism from the set of kwd-group tags" if not raw_parser.research_organism_keywords(soup): return [] return list(map(node_text, raw_parser.research_organism_keywords(soup)))
[ "def", "research_organism", "(", "soup", ")", ":", "if", "not", "raw_parser", ".", "research_organism_keywords", "(", "soup", ")", ":", "return", "[", "]", "return", "list", "(", "map", "(", "node_text", ",", "raw_parser", ".", "research_organism_keywords", "(...
47.8
21.8
def _heightmap_cdata(array: np.ndarray) -> ffi.CData: """Return a new TCOD_heightmap_t instance using an array. Formatting is verified during this function. """ if array.flags["F_CONTIGUOUS"]: array = array.transpose() if not array.flags["C_CONTIGUOUS"]: raise ValueError("array must be a contiguous segment.") if array.dtype != np.float32: raise ValueError("array dtype must be float32, not %r" % array.dtype) width, height = array.shape pointer = ffi.cast("float *", array.ctypes.data) return ffi.new("TCOD_heightmap_t *", (width, height, pointer))
[ "def", "_heightmap_cdata", "(", "array", ":", "np", ".", "ndarray", ")", "->", "ffi", ".", "CData", ":", "if", "array", ".", "flags", "[", "\"F_CONTIGUOUS\"", "]", ":", "array", "=", "array", ".", "transpose", "(", ")", "if", "not", "array", ".", "fl...
42.714286
13.428571
def select_port(default_port=20128): """Find and return a non used port""" import socket while True: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind( ("127.0.0.1", default_port) ) except socket.error as _msg: # analysis:ignore default_port += 1 else: break finally: sock.close() sock = None return default_port
[ "def", "select_port", "(", "default_port", "=", "20128", ")", ":", "import", "socket", "while", "True", ":", "try", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ",", "socket", ".", "IPPROTO_...
34.055556
16.722222
async def get_session_data(self): """Get Tautulli sessions.""" cmd = 'get_activity' url = self.base_url + cmd try: async with async_timeout.timeout(8, loop=self._loop): response = await self._session.get(url) logger("Status from Tautulli: " + str(response.status)) self.tautulli_session_data = await response.json() logger(self.tautulli_session_data) except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror, AttributeError) as error: msg = "Can not load data from Tautulli: {} - {}".format(url, error) logger(msg, 40)
[ "async", "def", "get_session_data", "(", "self", ")", ":", "cmd", "=", "'get_activity'", "url", "=", "self", ".", "base_url", "+", "cmd", "try", ":", "async", "with", "async_timeout", ".", "timeout", "(", "8", ",", "loop", "=", "self", ".", "_loop", ")...
41.1875
19.8125
def check_photometry_categorize(x, y, levels, tags=None): '''Put every point in its category. levels must be sorted.''' x = numpy.asarray(x) y = numpy.asarray(y) ys = y.copy() ys.sort() # Mean of the upper half m = ys[len(ys) // 2:].mean() y /= m m = 1.0 s = ys[len(ys) // 2:].std() result = [] if tags is None: tags = list(six.moves.range(len(levels) + 1)) for l, t in zip(levels, tags): indc = y < l if indc.any(): x1 = x[indc] y1 = y[indc] result.append((x1, y1, t)) x = x[~indc] y = y[~indc] else: result.append((x, y, tags[-1])) return result, (m, s)
[ "def", "check_photometry_categorize", "(", "x", ",", "y", ",", "levels", ",", "tags", "=", "None", ")", ":", "x", "=", "numpy", ".", "asarray", "(", "x", ")", "y", "=", "numpy", ".", "asarray", "(", "y", ")", "ys", "=", "y", ".", "copy", "(", "...
22.129032
19.806452
def get_last_origin(tp): """Get the last base of (multiply) subscripted type. Supports generic types, Union, Callable, and Tuple. Returns None for unsupported types. Examples:: get_last_origin(int) == None get_last_origin(ClassVar[int]) == None get_last_origin(Generic[T]) == Generic get_last_origin(Union[T, int][str]) == Union[T, int] get_last_origin(List[Tuple[T, T]][int]) == List[Tuple[T, T]] get_last_origin(List) == List """ if NEW_TYPING: raise ValueError('This function is only supported in Python 3.6,' ' use get_origin instead') sentinel = object() origin = getattr(tp, '__origin__', sentinel) if origin is sentinel: return None if origin is None: return tp return origin
[ "def", "get_last_origin", "(", "tp", ")", ":", "if", "NEW_TYPING", ":", "raise", "ValueError", "(", "'This function is only supported in Python 3.6,'", "' use get_origin instead'", ")", "sentinel", "=", "object", "(", ")", "origin", "=", "getattr", "(", "tp", ",", ...
36.318182
16.636364
def GetHostNumCpuCores(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetHostNumCpuCores(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetHostNumCpuCores", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetHostNumCpuCores", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "...
45.5
22.166667
def getBinaries(self): ''' Return a dictionary of binaries to compile: {"dirname":"exename"}, this is used when automatically generating CMakeLists Note that currently modules may define only a single executable binary or library to be built by the automatic build system, by specifying `"bin": "dir-to-be-built-into-binary"`, or `"lib": "dir-to-be-built-into-library"`, and the bin/lib will always have the same name as the module. The default behaviour if nothing is specified is for the 'source' directory to be built into a library. The module.json syntax may allow for other combinations in the future (and callers of this function should not rely on it returning only a single item). For example, a "bin": {"dirname": "exename"} syntax might be supported, however currently more complex builds must be controlled by custom CMakeLists. ''' # the module.json syntax is a subset of the package.json syntax: a # single string that defines the source directory to use to build an # executable with the same name as the component. This may be extended # to include the rest of the npm syntax in future (map of source-dir to # exe name). if 'bin' in self.description: return {os.path.normpath(self.description['bin']): self.getName()} else: return {}
[ "def", "getBinaries", "(", "self", ")", ":", "# the module.json syntax is a subset of the package.json syntax: a", "# single string that defines the source directory to use to build an", "# executable with the same name as the component. This may be extended", "# to include the rest of the npm syn...
56.384615
31.461538
def filter_exclude_dicts(filter_dict=None, exclude_dict=None, name='acctno', values=[], swap=False): """Produces kwargs dicts for Django Queryset `filter` and `exclude` from a list of values The last, critical step in generating Django ORM kwargs dicts from a natural language query. Properly parses "NOT" unary operators on each field value in the list. Assumes the lists have been pre-processed to consolidate NOTs and normalize values and syntax. Examples: >>> filter_exclude_dicts(name='num', values=['NOT 1', '2', '3', 'NOT 4'] ... ) == ({'num__in': ['2', '3']}, {'num__in': ['1', '4']}) True """ filter_dict = filter_dict or {} exclude_dict = exclude_dict or {} if not name.endswith('__in'): name += '__in' filter_dict[name], exclude_dict[name] = [], [] for v in values: # "NOT " means switch from include (filter) to exclude for that one account number if v.startswith('NOT '): exclude_dict[name] += [v[4:]] else: filter_dict[name] += [v] if swap: return exclude_dict, filter_dict return filter_dict, exclude_dict
[ "def", "filter_exclude_dicts", "(", "filter_dict", "=", "None", ",", "exclude_dict", "=", "None", ",", "name", "=", "'acctno'", ",", "values", "=", "[", "]", ",", "swap", "=", "False", ")", ":", "filter_dict", "=", "filter_dict", "or", "{", "}", "exclude...
40.392857
24.035714
def is_balanced(self): """ Returns True if the (sub)tree is balanced The tree is balanced if the heights of both subtrees differ at most by 1 """ left_height = self.left.height() if self.left else 0 right_height = self.right.height() if self.right else 0 if abs(left_height - right_height) > 1: return False return all(c.is_balanced for c, _ in self.children)
[ "def", "is_balanced", "(", "self", ")", ":", "left_height", "=", "self", ".", "left", ".", "height", "(", ")", "if", "self", ".", "left", "else", "0", "right_height", "=", "self", ".", "right", ".", "height", "(", ")", "if", "self", ".", "right", "...
32.230769
23.153846
def _build_url(self, host, handler): """ Build a url for our request based on the host, handler and use_http property """ scheme = 'https' if self.use_https else 'http' return '%s://%s/%s' % (scheme, host, handler)
[ "def", "_build_url", "(", "self", ",", "host", ",", "handler", ")", ":", "scheme", "=", "'https'", "if", "self", ".", "use_https", "else", "'http'", "return", "'%s://%s/%s'", "%", "(", "scheme", ",", "host", ",", "handler", ")" ]
36.571429
12.857143
def plot_account(self, row, per_capita=False, sector=None, file_name=False, file_dpi=600, population=None, **kwargs): """ Plots D_pba, D_cba, D_imp and D_exp for the specified row (account) Plot either the total country accounts or for a specific sector, depending on the 'sector' parameter. Per default the accounts are plotted as bar charts. However, any valid keyword for the pandas.DataFrame.plot method can be passed. Notes ----- This looks prettier with the seaborn module (import seaborn before calling this method) Parameters ---------- row : string, tuple or int A valid index for the row in the extension which should be plotted (one(!) row - no list allowed) per_capita : boolean, optional Plot the per capita accounts instead of the absolute values default is False sector: string, optional Plot the results for a specific sector of the IO table. If None is given (default), the total regional accounts are plotted. population : pandas.DataFrame or np.array, optional Vector with population per region. This must be given if values should be plotted per_capita for a specific sector since these values are calculated on the fly. file_name : path string, optional If given, saves the plot to the given filename file_dpi : int, optional Dpi for saving the figure, default 600 **kwargs : key word arguments, optional This will be passed directly to the pd.DataFrame.plot method Returns ------- Axis as given by pandas.DataFrame.plot, None in case of errors """ # necessary if row is given for Multiindex without brackets if type(per_capita) is not bool: logging.error('per_capita parameter must be boolean') return None if type(row) is int: row = self.D_cba.ix[row].name name_row = (str(row). replace('(', ''). replace(')', ''). replace("'", ""). replace('[', ''). replace(']', '')) if sector: graph_name = name_row + ' for sector ' + sector else: graph_name = name_row + ' total account' if per_capita: graph_name = graph_name + ' - per capita' graph_name = self.name + ' - ' + graph_name if self.unit is not None: try: # for multiindex the entry is given with header, # for single index just the entry y_label_name = (name_row + ' (' + str(self.unit.ix[row, 'unit'].tolist()[0]) + ')') except: y_label_name = (name_row + ' (' + str(self.unit.ix[row, 'unit']) + ')') else: y_label_name = name_row if 'kind' not in kwargs: kwargs['kind'] = 'bar' if 'colormap' not in kwargs: kwargs['colormap'] = 'Spectral' accounts = collections.OrderedDict() if sector: accounts['Footprint'] = 'D_cba' accounts['Territorial'] = 'D_pba' accounts['Imports'] = 'D_imp' accounts['Exports'] = 'D_exp' else: if per_capita: accounts['Footprint'] = 'D_cba_cap' accounts['Territorial'] = 'D_pba_cap' accounts['Imports'] = 'D_imp_cap' accounts['Exports'] = 'D_exp_cap' else: accounts['Footprint'] = 'D_cba_reg' accounts['Territorial'] = 'D_pba_reg' accounts['Imports'] = 'D_imp_reg' accounts['Exports'] = 'D_exp_reg' data_row = pd.DataFrame(columns=[key for key in accounts]) for key in accounts: if sector: try: _data = pd.DataFrame( getattr(self, accounts[key]).xs( key=sector, axis=1, level='sector').ix[row].T) except (AssertionError, KeyError): _data = pd.DataFrame( getattr(self, accounts[key]).xs( key=sector, axis=1, level=1).ix[row].T) if per_capita: if population is not None: if type(population) is pd.DataFrame: # check for right order: if (population.columns.tolist() != self.D_cba_reg.columns.tolist()): logging.warning( 'Population regions are inconsistent ' 'with IO regions') population = population.values population = population.reshape((-1, 1)) _data = _data / population else: logging.error('Population must be given for sector ' 'results per capita') return else: _data = pd.DataFrame(getattr(self, accounts[key]).ix[row].T) _data.columns = [key] data_row[key] = _data[key] if 'title' not in kwargs: kwargs['title'] = graph_name ax = data_row.plot(**kwargs) plt.xlabel('Regions') plt.ylabel(y_label_name) plt.legend(loc='best') try: plt.tight_layout() except: pass if file_name: plt.savefig(file_name, dpi=file_dpi) return ax
[ "def", "plot_account", "(", "self", ",", "row", ",", "per_capita", "=", "False", ",", "sector", "=", "None", ",", "file_name", "=", "False", ",", "file_dpi", "=", "600", ",", "population", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# necessary if...
37.812903
18.354839
def serialize_text(out, text): """This method is used to append content of the `text` argument to the `out` argument. Depending on how many lines in the text, a padding can be added to all lines except the first one. Concatenation result is appended to the `out` argument. """ padding = len(out) # we need to add padding to all lines # except the first one add_padding = padding_adder(padding) text = add_padding(text, ignore_first_line=True) return out + text
[ "def", "serialize_text", "(", "out", ",", "text", ")", ":", "padding", "=", "len", "(", "out", ")", "# we need to add padding to all lines", "# except the first one", "add_padding", "=", "padding_adder", "(", "padding", ")", "text", "=", "add_padding", "(", "text"...
29.352941
15.882353
def OpenFileEntry(cls, path_spec_object, resolver_context=None): """Opens a file entry object defined by path specification. Args: path_spec_object (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built in context which is not multi process safe. Returns: FileEntry: file entry or None if the path specification could not be resolved. """ file_system = cls.OpenFileSystem( path_spec_object, resolver_context=resolver_context) if resolver_context is None: resolver_context = cls._resolver_context file_entry = file_system.GetFileEntryByPathSpec(path_spec_object) # Release the file system so it will be removed from the cache # when the file entry is destroyed. resolver_context.ReleaseFileSystem(file_system) return file_entry
[ "def", "OpenFileEntry", "(", "cls", ",", "path_spec_object", ",", "resolver_context", "=", "None", ")", ":", "file_system", "=", "cls", ".", "OpenFileSystem", "(", "path_spec_object", ",", "resolver_context", "=", "resolver_context", ")", "if", "resolver_context", ...
34.76
23.24
def _fill_in_cainfo(self): """Fill in the path of the PEM file containing the CA certificate. The priority is: 1. user provided path, 2. path to the cacert.pem bundle provided by certifi (if installed), 3. let pycurl use the system path where libcurl's cacert bundle is assumed to be stored, as established at libcurl build time. """ if self.cainfo: cainfo = self.cainfo else: try: cainfo = certifi.where() except AttributeError: cainfo = None if cainfo: self._pycurl.setopt(pycurl.CAINFO, cainfo)
[ "def", "_fill_in_cainfo", "(", "self", ")", ":", "if", "self", ".", "cainfo", ":", "cainfo", "=", "self", ".", "cainfo", "else", ":", "try", ":", "cainfo", "=", "certifi", ".", "where", "(", ")", "except", "AttributeError", ":", "cainfo", "=", "None", ...
37.294118
16.882353
def continue_to_install(self): """Continue to install ? """ if (self.count_uni > 0 or self.count_upg > 0 or "--download-only" in self.flag or "--rebuild" in self.flag): if self.master_packages and self.msg.answer() in ["y", "Y"]: installs, upgraded = self.build_install() if "--download-only" in self.flag: raise SystemExit() self.msg.reference(installs, upgraded) write_deps(self.deps_dict) delete(self.build_folder)
[ "def", "continue_to_install", "(", "self", ")", ":", "if", "(", "self", ".", "count_uni", ">", "0", "or", "self", ".", "count_upg", ">", "0", "or", "\"--download-only\"", "in", "self", ".", "flag", "or", "\"--rebuild\"", "in", "self", ".", "flag", ")", ...
46.5
11.583333
def tuple(self): """ Tuple conversion to (value, dimensions), e.g.: (123, {dimension_1: "foo", dimension_2: "bar"}) """ return (self.value, {dv.id: dv.value for dv in self.dimensionvalues})
[ "def", "tuple", "(", "self", ")", ":", "return", "(", "self", ".", "value", ",", "{", "dv", ".", "id", ":", "dv", ".", "value", "for", "dv", "in", "self", ".", "dimensionvalues", "}", ")" ]
43.6
15.4
def get_all_rules(self, id_env): """Save an environment rule :param id_env: Environment id :return: Estrutura: :: { 'rules': [{'id': < id >, 'environment': < Environment Object >, 'content': < content >, 'name': < name >, 'custom': < custom > },... ]} :raise AmbienteNaoExisteError: Ambiente não cadastrado. :raise UserNotAuthorizedError: Permissão negada. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta. """ url = 'rule/all/' + str(id_env) code, xml = self.submit(None, 'GET', url) return self.response(code, xml, ['rules'])
[ "def", "get_all_rules", "(", "self", ",", "id_env", ")", ":", "url", "=", "'rule/all/'", "+", "str", "(", "id_env", ")", "code", ",", "xml", "=", "self", ".", "submit", "(", "None", ",", "'GET'", ",", "url", ")", "return", "self", ".", "response", ...
32.458333
19.958333
def del_arg(self, name: str) -> None: """Delete all arguments with the given then.""" for arg in reversed(self.arguments): if arg.name.strip(WS) == name.strip(WS): del arg[:]
[ "def", "del_arg", "(", "self", ",", "name", ":", "str", ")", "->", "None", ":", "for", "arg", "in", "reversed", "(", "self", ".", "arguments", ")", ":", "if", "arg", ".", "name", ".", "strip", "(", "WS", ")", "==", "name", ".", "strip", "(", "W...
42.8
6.6
def component(self, extra_params=None): """ The Component currently assigned to the Ticket """ if self.get('component_id', None): components = self.space.components(id=self['component_id'], extra_params=extra_params) if components: return components[0]
[ "def", "component", "(", "self", ",", "extra_params", "=", "None", ")", ":", "if", "self", ".", "get", "(", "'component_id'", ",", "None", ")", ":", "components", "=", "self", ".", "space", ".", "components", "(", "id", "=", "self", "[", "'component_id...
39.625
11.625
def collapse_nodes(graph, survivor_mapping: Mapping[BaseEntity, Set[BaseEntity]]) -> None: """Collapse all nodes in values to the key nodes, in place. :param pybel.BELGraph graph: A BEL graph :param survivor_mapping: A dictionary with survivors as their keys, and iterables of the corresponding victims as values. """ inconsistencies = surviors_are_inconsistent(survivor_mapping) if inconsistencies: raise ValueError('survivor mapping is inconsistent: {}'.format(inconsistencies)) for survivor, victims in survivor_mapping.items(): for victim in victims: collapse_pair(graph, survivor=survivor, victim=victim) _remove_self_edges(graph)
[ "def", "collapse_nodes", "(", "graph", ",", "survivor_mapping", ":", "Mapping", "[", "BaseEntity", ",", "Set", "[", "BaseEntity", "]", "]", ")", "->", "None", ":", "inconsistencies", "=", "surviors_are_inconsistent", "(", "survivor_mapping", ")", "if", "inconsis...
43
26.875
def computePerturbedExpectation(self, u_n, A_n, compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False): """Compute the expectation of an observable of phase space function A(x) for a single new state. Parameters ---------- u_n : np.ndarray, float, shape=(K, N_max) u_n[n] = u(x_n) - the energy of the new state at all N samples previously sampled. A_n : np.ndarray, float, shape=(K, N_max) A_n[n] = A(x_n) - the phase space function of the new state at all N samples previously sampled. If this does NOT depend on state (e.g. position), it's simply the value of the observation. If it DOES depend on the current state, then the observables from the previous states need to be reevaluated at THIS state. compute_uncertainty : bool, optional If False, the uncertainties will not be computed (default: True) uncertainty_method : string, optional Choice of method used to compute asymptotic covariance method, or None to use default See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None) warning_cutoff : float, optional Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10) return_theta : bool, optional Whether or not to return the theta matrix. Can be useful for complicated differences. Returns ------- A : float A is the estimate for the expectation of A(x) for the specified state dA : float dA is uncertainty estimate for A Notes ----- See Section IV of [1]. # Compute estimators and uncertainty. #A = sum(W_n[:,K] * A_n[:]) # Eq. 15 of [1] #dA = abs(A) * np.sqrt(Theta_ij[K,K] + Theta_ij[K+1,K+1] - 2.0 * Theta_ij[K,K+1]) # Eq. 16 of [1] """ if len(np.shape(u_n)) == 2: u_n = kn_to_n(u_n, N_k=self.N_k) if len(np.shape(A_n)) == 2: A_n = kn_to_n(A_n, N_k=self.N_k) # Convert to np matrix. A_n = np.array(A_n, dtype=np.float64) # Retrieve N and K for convenience. N = self.N K = self.K # Make A_k all positive so we can operate logarithmically for # robustness A_min = np.min(A_n) A_n = A_n - (A_min - 1) # Augment W_nk, N_k, and c_k for q_A(x) for the observable, with one # extra row/column for the specified state (Eq. 13 of [1]). # weight matrix Log_W_nk = np.zeros([N, K + 2], dtype=np.float64) N_k = np.zeros([K + 2], dtype=np.int32) # counts f_k = np.zeros([K + 2], dtype=np.float64) # free energies # Fill in first K states with existing q_k(x) from states. Log_W_nk[:, 0:K] = self.Log_W_nk N_k[0:K] = self.N_k # compute the free energy of the additional state log_w_n = self._computeUnnormalizedLogWeights(u_n) # Compute free energies f_k[K] = -_logsum(log_w_n) Log_W_nk[:, K] = log_w_n + f_k[K] # compute the observable at this state Log_W_nk[:, K + 1] = np.log(A_n) + Log_W_nk[:, K] f_k[K + 1] = -_logsum(Log_W_nk[:, K + 1]) Log_W_nk[:, K + 1] += f_k[K + 1] # normalize the row A = np.exp(-f_k[K + 1]) if (compute_uncertainty or return_theta): # Compute augmented asymptotic covariance matrix. Theta_ij = self._computeAsymptoticCovarianceMatrix( np.exp(Log_W_nk), N_k, method=uncertainty_method) if (compute_uncertainty): dA = np.abs(A) * np.sqrt( Theta_ij[K + 1, K + 1] + Theta_ij[K, K] - 2.0 * Theta_ij[K, K + 1]) # Eq. 16 of [1] # shift answers back with the offset now that variances are computed A += (A_min - 1) returns = [] returns.append(A) if (compute_uncertainty): returns.append(dA) if (return_theta): returns.append(Theta_ij) # Return expectations and uncertainties. return returns
[ "def", "computePerturbedExpectation", "(", "self", ",", "u_n", ",", "A_n", ",", "compute_uncertainty", "=", "True", ",", "uncertainty_method", "=", "None", ",", "warning_cutoff", "=", "1.0e-10", ",", "return_theta", "=", "False", ")", ":", "if", "len", "(", ...
41.292929
26.777778
def set_pkg_chk_sum(self, doc, chk_sum): """Sets the package check sum, if not already set. chk_sum - A string Raises CardinalityError if already defined. Raises OrderError if no package previously defined. """ self.assert_package_exists() if not self.package_chk_sum_set: self.package_chk_sum_set = True doc.package.check_sum = checksum.Algorithm('SHA1', chk_sum) else: raise CardinalityError('Package::CheckSum')
[ "def", "set_pkg_chk_sum", "(", "self", ",", "doc", ",", "chk_sum", ")", ":", "self", ".", "assert_package_exists", "(", ")", "if", "not", "self", ".", "package_chk_sum_set", ":", "self", ".", "package_chk_sum_set", "=", "True", "doc", ".", "package", ".", ...
41.916667
10.333333
def get_configure(self, repo=None, name=None, groups=None, main_cfg=False): """ Get the vent.template settings for a given tool by looking at the plugin_manifest """ constraints = locals() del constraints['main_cfg'] status = (True, None) template_dict = {} return_str = '' if main_cfg: vent_cfg = Template(self.vent_config) for section in vent_cfg.sections()[1]: template_dict[section] = {} for vals in vent_cfg.section(section)[1]: template_dict[section][vals[0]] = vals[1] else: # all possible vent.template options stored in plugin_manifest options = ['info', 'service', 'settings', 'docker', 'gpu'] tools = Template(System().manifest).constrain_opts( constraints, options)[0] if tools: # should only be one tool tool = list(tools.keys())[0] # load all vent.template options into dict for section in tools[tool]: template_dict[section] = json.loads(tools[tool][section]) else: status = (False, "Couldn't get vent.template information") if status[0]: # display all those options as they would in the file for section in template_dict: return_str += '[' + section + ']\n' # ensure instances shows up in configuration for option in template_dict[section]: if option.startswith('#'): return_str += option + '\n' else: return_str += option + ' = ' return_str += template_dict[section][option] + '\n' return_str += '\n' # only one newline at end of file status = (True, return_str[:-1]) return status
[ "def", "get_configure", "(", "self", ",", "repo", "=", "None", ",", "name", "=", "None", ",", "groups", "=", "None", ",", "main_cfg", "=", "False", ")", ":", "constraints", "=", "locals", "(", ")", "del", "constraints", "[", "'main_cfg'", "]", "status"...
42.0625
14.229167
def get_default_subject_guide(campus='seattle'): """ Returns a default SubjectGuide model for the passed campus: seattle, bothell, tacoma """ url = "{}/{}/{}".format(subject_guide_url_prefix, 'defaultGuide', campus) headers = {'Accept': 'application/json'} response = SubjectGuide_DAO().getURL(url, headers) if response.status != 200: raise DataFailureException(url, response.status, response.data) data = json.loads(response.data) return _subject_guide_from_json(data)
[ "def", "get_default_subject_guide", "(", "campus", "=", "'seattle'", ")", ":", "url", "=", "\"{}/{}/{}\"", ".", "format", "(", "subject_guide_url_prefix", ",", "'defaultGuide'", ",", "campus", ")", "headers", "=", "{", "'Accept'", ":", "'application/json'", "}", ...
34
17.333333
def _set_time(self, time): """ Set time in both class and hdf5 file """ if len(self.time) == 0 : self.time = np.array(time) if self.h5 is not None: self.h5.create_dataset('time', self.time.shape, dtype=self.time.dtype, data=self.time, compression="gzip", shuffle=True, scaleoffset=3) else: if(len(time) != len(self.time)): raise AssertionError("\nTime or number of frame mismatch in input files.\n Exiting...\n")
[ "def", "_set_time", "(", "self", ",", "time", ")", ":", "if", "len", "(", "self", ".", "time", ")", "==", "0", ":", "self", ".", "time", "=", "np", ".", "array", "(", "time", ")", "if", "self", ".", "h5", "is", "not", "None", ":", "self", "."...
50.3
23.6