text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def run(items): """Perform detection of structural variations with delly. Performs post-call filtering with a custom filter tuned based on NA12878 Moleculo and PacBio data, using calls prepared by @ryanlayer and @cc2qe Filters using the high quality variant pairs (DV) compared with high quality reference pairs (DR). """ work_dir = utils.safe_makedir(os.path.join(items[0]["dirs"]["work"], "structural", dd.get_sample_name(items[0]), "delly")) # Add core request for delly config = copy.deepcopy(items[0]["config"]) delly_config = utils.get_in(config, ("resources", "delly"), {}) delly_config["cores"] = 1 config["resources"]["delly"] = delly_config parallel = {"type": "local", "cores": config["algorithm"].get("num_cores", 1), "progs": ["delly"]} work_bams = [dd.get_align_bam(d) for d in items] ref_file = dd.get_ref_file(items[0]) exclude_file = _get_full_exclude_file(items, work_bams, work_dir) bytype_vcfs = run_multicore(_run_delly, [(work_bams, chrom, ref_file, work_dir, items) for chrom in sshared.get_sv_chroms(items, exclude_file)], config, parallel) out_file = "%s.vcf.gz" % sshared.outname_from_inputs(bytype_vcfs) combo_vcf = vcfutils.combine_variant_files(bytype_vcfs, out_file, ref_file, config) out = [] upload_counts = collections.defaultdict(int) for data in items: if "sv" not in data: data["sv"] = [] base, ext = utils.splitext_plus(combo_vcf) final_vcf = sshared.finalize_sv(combo_vcf, data, items) if final_vcf: delly_vcf = _delly_count_evidence_filter(final_vcf, data) data["sv"].append({"variantcaller": "delly", "vrn_file": delly_vcf, "do_upload": upload_counts[final_vcf] == 0, # only upload a single file per batch "exclude": exclude_file}) upload_counts[final_vcf] += 1 out.append(data) return out
[ "def", "run", "(", "items", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "items", "[", "0", "]", "[", "\"dirs\"", "]", "[", "\"work\"", "]", ",", "\"structural\"", ",", "dd", ".", "get_sample_name...
48.744186
21.953488
def _logfile_sigterm_handler(*_): # type: (...) -> None """Handle exit signals and write out a log file. Raises: SystemExit: Contains the signal as the return code. """ logging.error('Received SIGTERM.') write_logfile() print('Received signal. Please see the log file for more information.', file=sys.stderr) sys.exit(signal)
[ "def", "_logfile_sigterm_handler", "(", "*", "_", ")", ":", "# type: (...) -> None", "logging", ".", "error", "(", "'Received SIGTERM.'", ")", "write_logfile", "(", ")", "print", "(", "'Received signal. Please see the log file for more information.'", ",", "file", "=", ...
30.416667
16.833333
def grantxml2json(self, grant_xml): """Convert OpenAIRE grant XML into JSON.""" tree = etree.fromstring(grant_xml) # XML harvested from OAI-PMH has a different format/structure if tree.prefix == 'oai': ptree = self.get_subtree( tree, '/oai:record/oai:metadata/oaf:entity/oaf:project')[0] header = self.get_subtree(tree, '/oai:record/oai:header')[0] oai_id = self.get_text_node(header, 'oai:identifier') modified = self.get_text_node(header, 'oai:datestamp') else: ptree = self.get_subtree( tree, '/record/result/metadata/oaf:entity/oaf:project')[0] header = self.get_subtree(tree, '/record/result/header')[0] oai_id = self.get_text_node(header, 'dri:objIdentifier') modified = self.get_text_node(header, 'dri:dateOfTransformation') url = self.get_text_node(ptree, 'websiteurl') code = self.get_text_node(ptree, 'code') title = self.get_text_node(ptree, 'title') acronym = self.get_text_node(ptree, 'acronym') startdate = self.get_text_node(ptree, 'startdate') enddate = self.get_text_node(ptree, 'enddate') funder = self.fundertree2json(ptree, oai_id) internal_id = "{0}::{1}".format(funder['doi'], code) eurepo_id = \ "info:eu-repo/grantAgreement/{funder}/{program}/{code}/".format( funder=quote_plus(funder['name'].encode('utf8')), program=quote_plus(funder['program'].encode('utf8')), code=quote_plus(code.encode('utf8')), ) ret_json = { '$schema': self.schema_formatter.schema_url, 'internal_id': internal_id, 'identifiers': { 'oaf': oai_id, 'eurepo': eurepo_id, 'purl': url if url.startswith("http://purl.org/") else None, }, 'code': code, 'title': title, 'acronym': acronym, 'startdate': startdate, 'enddate': enddate, 'funder': {'$ref': funder['url']}, 'program': funder['program'], 'url': url, 'remote_modified': modified, } return ret_json
[ "def", "grantxml2json", "(", "self", ",", "grant_xml", ")", ":", "tree", "=", "etree", ".", "fromstring", "(", "grant_xml", ")", "# XML harvested from OAI-PMH has a different format/structure", "if", "tree", ".", "prefix", "==", "'oai'", ":", "ptree", "=", "self",...
42.865385
18.576923
def select_random(ports=None, exclude_ports=None): """ Returns random unused port number. """ if ports is None: ports = available_good_ports() if exclude_ports is None: exclude_ports = set() ports.difference_update(set(exclude_ports)) for port in random.sample(ports, min(len(ports), 100)): if not port_is_used(port): return port raise PortForException("Can't select a port")
[ "def", "select_random", "(", "ports", "=", "None", ",", "exclude_ports", "=", "None", ")", ":", "if", "ports", "is", "None", ":", "ports", "=", "available_good_ports", "(", ")", "if", "exclude_ports", "is", "None", ":", "exclude_ports", "=", "set", "(", ...
26.9375
14.5625
def keypress(self, win, char): """ returns: 1: get next char 0: exit edit mode, string isvalid -1: cancel """ if not self._focused: return 1 if self.log is not None: self.log('char = {}\n'.format(char)) if char in (curses.KEY_ENTER, ord('\n'), ord('\r')): """ ENTER """ if self._has_history: self._input_history.add_to_history(self._string) return 0 elif char in (curses.KEY_EXIT, 27): self._edit_win.nodelay(True) char = self._edit_win.getch() self._log_file='/home/spiros/edit.log' self._log(' *** char = {}\n'.format(char)) self._edit_win.nodelay(False) if char == -1: """ ESCAPE """ self._string = '' self._curs_pos = 0 return -1 else: return 1 elif char in (curses.KEY_RIGHT, curses.ascii.ACK): """ KEY_RIGHT, Alt-F """ self._curs_pos += 1 if len(self._string) < self._curs_pos: self._curs_pos = len(self._string) elif char in (curses.KEY_LEFT, ): """ KEY_LEFT """ self._curs_pos -= 1 if self._curs_pos < 0: self._curs_pos = 0 elif char in (curses.KEY_HOME, curses.ascii.SOH): """ KEY_HOME, ^A """ self._curs_pos = 0 elif char in (curses.KEY_END, curses.ascii.ENQ): """ KEY_END, ^E """ self._curs_pos = len(self._string) elif char in (curses.KEY_DC, curses.ascii.EOT): """ DEL key, ^D """ if self._curs_pos < len(self._string): self._string = self._string[:self._curs_pos] + self._string[self._curs_pos+1:] elif char in (curses.KEY_BACKSPACE, curses.ascii.BS,127): """ KEY_BACKSPACE """ if self._curs_pos > 0: self._string = self._string[:self._curs_pos-1] + self._string[self._curs_pos:] self._curs_pos -= 1 elif char in (curses.KEY_UP, curses.ascii.DLE): """ KEY_UP, ^N """ if self._key_up_function_handler is not None: try: self._key_up_function_handler() except: pass else: if self._ungetch_unbound_keys: curses.ungetch(char) elif char in (curses.KEY_DOWN, curses.ascii.SO): """ KEY_DOWN, ^P """ if self._key_down_function_handler is not None: try: self._key_down_function_handler() except: pass else: if self._ungetch_unbound_keys: curses.ungetch(char) elif char in (curses.KEY_NPAGE, ): """ PgDn """ if self._key_pgdown_function_handler is not None: try: self._key_pgdown_function_handler() except: pass else: if self._ungetch_unbound_keys: curses.ungetch(char) elif char in (curses.KEY_PPAGE, ): """ PgUp """ if self._key_pgup_function_handler is not None: try: self._key_pgup_function_handler() except: pass elif char in (9, ): """ TAB """ if self._key_tab_function_handler is not None: try: self._key_tab_function_handler() except: pass else: if self._ungetch_unbound_keys: curses.ungetch(char) elif char in (curses.KEY_BTAB, ): """ Shift-TAB """ if self._key_stab_function_handler is not None: try: self._key_stab_function_handler() except: pass else: if self._ungetch_unbound_keys: curses.ungetch(char) elif char in (curses.ascii.VT, ): """ Ctrl-K - delete to end of line """ self._string = self._string[:self._curs_pos] elif 0<= char <=31: pass else: if len(self._string) + 1 == self._max_width: return 1 if version_info < (3, 0): if 32 <= char < 127: # accept only ascii characters if len(self._string) == self._curs_pos: self._string += chr(char) self._curs_pos += 1 else: self._string = self._string[:self._curs_pos] + chr(char) + self._string[self._curs_pos:] else: char = self._get_char(win, char) if len(self._string) == self._curs_pos: self._string += char self._curs_pos += 1 else: self._string = self._string[:self._curs_pos] + char + self._string[self._curs_pos:] self.refreshEditWindow() return 1
[ "def", "keypress", "(", "self", ",", "win", ",", "char", ")", ":", "if", "not", "self", ".", "_focused", ":", "return", "1", "if", "self", ".", "log", "is", "not", "None", ":", "self", ".", "log", "(", "'char = {}\\n'", ".", "format", "(", "char", ...
37.715328
12.678832
def get_vnetwork_dvpgs_output_has_more(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs") config = get_vnetwork_dvpgs output = ET.SubElement(get_vnetwork_dvpgs, "output") has_more = ET.SubElement(output, "has-more") has_more.text = kwargs.pop('has_more') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_vnetwork_dvpgs_output_has_more", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_vnetwork_dvpgs", "=", "ET", ".", "Element", "(", "\"get_vnetwork_dvpgs\"", ")", "config", "=", "get_v...
39.583333
12.333333
def _convert_verbal_form( analysis ): ''' Converts ordinary verbal categories of the input analysis. Performs one-to-one conversions. ''' assert FORM in analysis, '(!) The input analysis does not contain "'+FORM+'" key.' for form, replacement in _verb_conversion_rules: # Exact match if analysis[FORM] == form: assert analysis[POSTAG] == 'V', \ '(!) Expected analysis of verb, but got analysis of "'+str(analysis[POSTAG])+'" instead.' analysis[FORM] = replacement # Inclusion : the case of some_prefix+' '+form ; elif analysis[FORM].endswith(' '+form): parts = analysis[FORM].split() prefix = ' '.join( parts[:len(parts)-1] ) analysis[FORM] = prefix+' '+replacement return analysis
[ "def", "_convert_verbal_form", "(", "analysis", ")", ":", "assert", "FORM", "in", "analysis", ",", "'(!) The input analysis does not contain \"'", "+", "FORM", "+", "'\" key.'", "for", "form", ",", "replacement", "in", "_verb_conversion_rules", ":", "# Exact match", "...
49.75
16
def index_path(self, root): """Index a path. :param root: Either a package directory, a .so or a .py module. """ basename = os.path.basename(root) if os.path.splitext(basename)[0] != '__init__' and basename.startswith('_'): return location = self._determine_location_for(root) if os.path.isfile(root): self._index_module(root, location) elif os.path.isdir(root) and os.path.exists(os.path.join(root, '__init__.py')): self._index_package(root, location)
[ "def", "index_path", "(", "self", ",", "root", ")", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "root", ")", "if", "os", ".", "path", ".", "splitext", "(", "basename", ")", "[", "0", "]", "!=", "'__init__'", "and", "basename", "....
41.615385
17.846154
def create(cls, messageType, extended, hopsleft=3, hopsmax=3): """Create message flags. messageType: integter 0 to 7: MESSAGE_TYPE_DIRECT_MESSAGE = 0 MESSAGE_TYPE_DIRECT_MESSAGE_ACK = 1 MESSAGE_TYPE_ALL_LINK_CLEANUP = 2 MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK = 3 MESSAGE_TYPE_BROADCAST_MESSAGE = 4 MESSAGE_TYPE_DIRECT_MESSAGE_NAK = 5 MESSAGE_TYPE_ALL_LINK_BROADCAST = 6 MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK = 7 extended: 1 for extended, 0 for standard hopsleft: int 0 - 3 hopsmax: int 0 - 3 """ flags = MessageFlags(None) if messageType < 8: flags._messageType = messageType else: flags._messageType = messageType >> 5 if extended in [0, 1, True, False]: if extended: flags._extended = 1 else: flags._extended = 0 else: flags._extended = extended >> 4 flags._hopsLeft = hopsleft flags._hopsMax = hopsmax return flags
[ "def", "create", "(", "cls", ",", "messageType", ",", "extended", ",", "hopsleft", "=", "3", ",", "hopsmax", "=", "3", ")", ":", "flags", "=", "MessageFlags", "(", "None", ")", "if", "messageType", "<", "8", ":", "flags", ".", "_messageType", "=", "m...
34.903226
10.483871
async def access_log_middleware(app, handler): """Log each request in structured event log.""" event_log = app.get('smartmob.event_log') or structlog.get_logger() clock = app.get('smartmob.clock') or timeit.default_timer # Keep the request arrival time to ensure we get intuitive logging of # events. arrival_time = datetime.utcnow().replace(tzinfo=timezone.utc) async def access_log(request): ref = clock() try: response = await handler(request) event_log.info( 'http.access', path=request.path, outcome=response.status, duration=(clock()-ref), request=request.get('x-request-id', '?'), **{'@timestamp': arrival_time} ) return response except web.HTTPException as error: event_log.info( 'http.access', path=request.path, outcome=error.status, duration=(clock()-ref), request=request.get('x-request-id', '?'), **{'@timestamp': arrival_time} ) raise except Exception: event_log.info( 'http.access', path=request.path, outcome=500, duration=(clock()-ref), request=request.get('x-request-id', '?'), **{'@timestamp': arrival_time} ) raise return access_log
[ "async", "def", "access_log_middleware", "(", "app", ",", "handler", ")", ":", "event_log", "=", "app", ".", "get", "(", "'smartmob.event_log'", ")", "or", "structlog", ".", "get_logger", "(", ")", "clock", "=", "app", ".", "get", "(", "'smartmob.clock'", ...
32.977778
15.8
def rejectEdit(self): """ Cancels the edit for this label. """ if self._lineEdit: self._lineEdit.hide() self.editingCancelled.emit()
[ "def", "rejectEdit", "(", "self", ")", ":", "if", "self", ".", "_lineEdit", ":", "self", ".", "_lineEdit", ".", "hide", "(", ")", "self", ".", "editingCancelled", ".", "emit", "(", ")" ]
26.857143
5.428571
def each_object_id(collection): """Yields each object ID in the given ``collection``. The objects are not loaded.""" c_path = collection_path(collection) paths = glob('%s/*.%s' % (c_path, _ext)) for path in paths: match = regex.match(r'.+/(.+)\.%s$' % _ext, path) yield match.groups()[0]
[ "def", "each_object_id", "(", "collection", ")", ":", "c_path", "=", "collection_path", "(", "collection", ")", "paths", "=", "glob", "(", "'%s/*.%s'", "%", "(", "c_path", ",", "_ext", ")", ")", "for", "path", "in", "paths", ":", "match", "=", "regex", ...
39.5
7.125
def convert_errno(e): """ Convert an errno value (as from an ``OSError`` or ``IOError``) into a standard SFTP result code. This is a convenience function for trapping exceptions in server code and returning an appropriate result. :param int e: an errno code, as from ``OSError.errno``. :return: an `int` SFTP error code like ``SFTP_NO_SUCH_FILE``. """ if e == errno.EACCES: # permission denied return SFTP_PERMISSION_DENIED elif (e == errno.ENOENT) or (e == errno.ENOTDIR): # no such file return SFTP_NO_SUCH_FILE else: return SFTP_FAILURE
[ "def", "convert_errno", "(", "e", ")", ":", "if", "e", "==", "errno", ".", "EACCES", ":", "# permission denied", "return", "SFTP_PERMISSION_DENIED", "elif", "(", "e", "==", "errno", ".", "ENOENT", ")", "or", "(", "e", "==", "errno", ".", "ENOTDIR", ")", ...
39.117647
18.176471
def add_team_repo(repo_name, team_name, profile="github", permission=None): ''' Adds a repository to a team with team_name. repo_name The name of the repository to add. team_name The name of the team of which to add the repository. profile The name of the profile configuration to use. Defaults to ``github``. permission The permission for team members within the repository, can be 'pull', 'push' or 'admin'. If not specified, the default permission specified on the team will be used. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt myminion github.add_team_repo 'my_repo' 'team_name' .. versionadded:: 2016.11.0 ''' team = get_team(team_name, profile=profile) if not team: log.error('Team %s does not exist', team_name) return False try: client = _get_client(profile) organization = client.get_organization( _get_config_value(profile, 'org_name') ) team = organization.get_team(team['id']) repo = organization.get_repo(repo_name) except UnknownObjectException: log.exception('Resource not found: %s', team['id']) return False params = None if permission is not None: params = {'permission': permission} headers, data = team._requester.requestJsonAndCheck( "PUT", team.url + "/repos/" + repo._identity, input=params ) # Try to refresh cache list_team_repos(team_name, profile=profile, ignore_cache=True) return True
[ "def", "add_team_repo", "(", "repo_name", ",", "team_name", ",", "profile", "=", "\"github\"", ",", "permission", "=", "None", ")", ":", "team", "=", "get_team", "(", "team_name", ",", "profile", "=", "profile", ")", "if", "not", "team", ":", "log", ".",...
28.814815
23.222222
def from_credentials(credentials): """Returns a new API object from an existing Credentials object. :param credentials: The existing saved credentials. :type credentials: Credentials :return: A new API object populated with MyGeotab credentials. :rtype: API """ return API(username=credentials.username, password=credentials.password, database=credentials.database, session_id=credentials.session_id, server=credentials.server)
[ "def", "from_credentials", "(", "credentials", ")", ":", "return", "API", "(", "username", "=", "credentials", ".", "username", ",", "password", "=", "credentials", ".", "password", ",", "database", "=", "credentials", ".", "database", ",", "session_id", "=", ...
46.545455
18.818182
def fetch_interfaces(self, interface, way): """Get the list of charms that provides or requires this interface. @param interface The interface for the charm relation. @param way The type of relation, either "provides" or "requires". @return List of charms """ if not interface: return [] if way == 'requires': request = '&requires=' + interface else: request = '&provides=' + interface url = (self.url + '/search?' + 'include=charm-metadata&include=stats&include=supported-series' '&include=extra-info&include=bundle-unit-count' '&limit=1000&include=owner' + request) data = self._get(url) return data.json().values()
[ "def", "fetch_interfaces", "(", "self", ",", "interface", ",", "way", ")", ":", "if", "not", "interface", ":", "return", "[", "]", "if", "way", "==", "'requires'", ":", "request", "=", "'&requires='", "+", "interface", "else", ":", "request", "=", "'&pro...
40.473684
14.894737
def is_grounded_to_name(c: Concept, name: str, cutoff=0.7) -> bool: """ Check if a concept is grounded to a given name. """ return (top_grounding(c) == name) if is_well_grounded(c, cutoff) else False
[ "def", "is_grounded_to_name", "(", "c", ":", "Concept", ",", "name", ":", "str", ",", "cutoff", "=", "0.7", ")", "->", "bool", ":", "return", "(", "top_grounding", "(", "c", ")", "==", "name", ")", "if", "is_well_grounded", "(", "c", ",", "cutoff", "...
68.333333
22
def get(cls): """Get the current API key. if one has not been given via 'set' the env var STEAMODD_API_KEY will be checked instead. """ apikey = cls.__api_key or cls.__api_key_env_var if apikey: return apikey else: raise APIKeyMissingError("API key not set")
[ "def", "get", "(", "cls", ")", ":", "apikey", "=", "cls", ".", "__api_key", "or", "cls", ".", "__api_key_env_var", "if", "apikey", ":", "return", "apikey", "else", ":", "raise", "APIKeyMissingError", "(", "\"API key not set\"", ")" ]
29.909091
19.181818
def _initialize_buffers(self, view_size): """ Create the buffers to cache tile drawing :param view_size: (int, int): size of the draw area :return: None """ def make_rect(x, y): return Rect((x * tw, y * th), (tw, th)) tw, th = self.data.tile_size mw, mh = self.data.map_size buffer_tile_width = int(math.ceil(view_size[0] / tw) + 1) buffer_tile_height = int(math.ceil(view_size[1] / th) + 1) buffer_pixel_size = buffer_tile_width * tw, buffer_tile_height * th self.map_rect = Rect(0, 0, mw * tw, mh * th) self.view_rect.size = view_size self._previous_blit = Rect(self.view_rect) self._tile_view = Rect(0, 0, buffer_tile_width, buffer_tile_height) self._redraw_cutoff = 1 # TODO: optimize this value self._create_buffers(view_size, buffer_pixel_size) self._half_width = view_size[0] // 2 self._half_height = view_size[1] // 2 self._x_offset = 0 self._y_offset = 0 rects = [make_rect(*i) for i in product(range(buffer_tile_width), range(buffer_tile_height))] # TODO: figure out what depth -actually- does # values <= 8 tend to reduce performance self._layer_quadtree = quadtree.FastQuadTree(rects, 4) self.redraw_tiles(self._buffer)
[ "def", "_initialize_buffers", "(", "self", ",", "view_size", ")", ":", "def", "make_rect", "(", "x", ",", "y", ")", ":", "return", "Rect", "(", "(", "x", "*", "tw", ",", "y", "*", "th", ")", ",", "(", "tw", ",", "th", ")", ")", "tw", ",", "th...
39
19.485714
def readBED(basefilename, useMAFencoding=False,blocksize = 1, start = 0, nSNPs = SP.inf, startpos = None, endpos = None, order = 'F',standardizeSNPs=False,ipos = 2,bim=None,fam=None): ''' read [basefilename].bed,[basefilename].bim,[basefilename].fam -------------------------------------------------------------------------- Input: basefilename : string of the basename of [basename].bed, [basename].bim, and [basename].fam blocksize : load blocksize SNPs at a time (default 1) start : index of the first SNP to be loaded from the .bed-file (default 0) nSNPs : load nSNPs from the .bed file (default SP.inf, meaning all) startpos : starting position of the loaded genomic region[chr,bpdist] endpos : end-position of the loaded genomic region [chr,bpdist] order : memory layout of the returned SNP array (default 'F') 'F' : Fortran-style column-major array (SNP-major) 'C' : C-style row-major array (individual-major) standardizeSNPs : bool indeicator if the resulting SNP array is supposed to be zero-mean and unit-vatiance with mean imputed missing values (default False) ipos : the index of the position index to use (default 2) 1 : genomic distance 2 : base-pair distance useMAFencoding : if set to one, the minor allele is encoded with 2, the major allele with 0. otherwise, the plink coding is used (default False). -------------------------------------------------------------------------- Output dictionary: 'rs' : [S] array rs-numbers 'pos' : [S*3] array of positions [chromosome, genetic dist, basepair dist] 'snps' : [N*S] array of snp-data 'iid' : [N*2] array of family IDs and individual IDs -------------------------------------------------------------------------- ''' if bim is None: bim = readBIM(basefilename,usecols=(0,1,2,3)) if fam is None: fam = readFAM(basefilename,usecols=(0,1)) rs = bim[:,1] pos = SP.array(bim[:,(0,2,3)],dtype = 'float') if startpos is not None: #pdb.set_trace() i_c = pos[:,0]==startpos[0] i_largerbp = pos[:,ipos]>=startpos[ipos] start = which(i_c * i_largerbp) while (start-1 >= 0 and pos[start-1,ipos] == startpos[ipos]): start = start -1 i_c = pos[:,0]==endpos[0] i_smallerbp = pos[:,ipos]>=endpos[ipos] end = which(i_c * i_smallerbp) while (end+1 < pos.shape[0] and pos[end+1,ipos] == endpos[ipos]): end = end + 1 nSNPs = end - start if (nSNPs<=0) or (end==0) or (start<=0): ret = { 'pos':SP.zeros((0,3)), 'rs':SP.zeros((0)), 'iid':fam, 'snps':SP.zeros((fam.shape[0],0)) } return ret pass N = fam.shape[0] S = bim.shape[0] S_res = min(S,start + nSNPs) nSNPs = min(S-start,nSNPs) #if startpos is not None: #print("start: " + str(start)) #print("end: " + str(end)) #print("S_res: " + str(S_res)) #print("nSNPs: " + str(nSNPs)) if nSNPs<=0: ret = { 'rs' :rs[start:start], 'pos' :pos[start:start,:], #'snps' :SNPs[0:N,start:start], 'snps' :SP.zeros((N,0)), 'iid' :fam } return ret SNPs = SP.zeros(((SP.ceil(0.25*N)*4),nSNPs),order=order) bed = basefilename + '.bed' with open(bed, "rb") as f: mode = f.read(2) if mode != b'l\x1b': raise Exception('No valid binary PED file') mode = f.read(1) #\x01 = SNP major \x00 = individual major if mode != b'\x01': raise Exception('only SNP-major is implemented') startbit = SP.ceil(0.25*N)*start+3 f.seek(int(startbit)) for blockStart in SP.arange(0,nSNPs,blocksize, dtype=int): blockEnd = int(min(S,blockStart+blocksize)) Sblock = min(nSNPs-blockStart,blocksize) nbyte = int(SP.ceil(0.25*N)*Sblock) bytes = SP.array(bytearray(f.read(nbyte))).reshape((SP.ceil(0.25*N),Sblock),order='F') SNPs[3::4,blockStart:blockEnd][bytes>=64]=SP.nan SNPs[3::4,blockStart:blockEnd][bytes>=128]=1 SNPs[3::4,blockStart:blockEnd][bytes>=192]=2 bytes=SP.mod(bytes,64) SNPs[2::4,blockStart:blockEnd][bytes>=16]=SP.nan SNPs[2::4,blockStart:blockEnd][bytes>=32]=1 SNPs[2::4,blockStart:blockEnd][bytes>=48]=2 bytes=SP.mod(bytes,16) SNPs[1::4,blockStart:blockEnd][bytes>=4]=SP.nan SNPs[1::4,blockStart:blockEnd][bytes>=8]=1 SNPs[1::4,blockStart:blockEnd][bytes>=12]=2 bytes=SP.mod(bytes,4) SNPs[0::4,blockStart:blockEnd][bytes>=1]=SP.nan SNPs[0::4,blockStart:blockEnd][bytes>=2]=1 SNPs[0::4,blockStart:blockEnd][bytes>=3]=2 if 0: #the binary format as described in the documentation (seems wrong) SNPs[3::4][bytes>=128]=SP.nan SNPs[3::4][bytes>=192]=1 bytes=SP.mod(bytes,128) SNPs[3::4][bytes>=64]+=1 bytes=SP.mod(bytes,64) SNPs[2::4][bytes>=32]=SP.nan SNPs[2::4][bytes>=48]=1 bytes=SP.mod(bytes,32) SNPs[2::4][bytes>=16]+=1 bytes=SP.mod(bytes,16) SNPs[1::4][bytes>=8]=SP.nan SNPs[1::4][bytes>=12]=1 bytes=SP.mod(bytes,8) SNPs[1::4][bytes>=4]+=1 bytes=SP.mod(bytes,4) SNPs[0::4][bytes>=2]=SP.nan SNPs[0::4][bytes>=3]=1 bytes=SP.mod(bytes,2) SNPs[0::4][bytes>=1]+=1 snps = SNPs[0:N,:] if useMAFencoding: imaf = SP.sum(snps==2,axis=0)>SP.sum(snps==0,axis=0) snps[:,imaf] = 2 - snps[:,imaf] if standardizeSNPs: snps = standardize(snps) ret = { 'rs' :rs[start:S_res], 'pos' :pos[start:S_res,:], 'snps' :snps, 'iid' :fam } return ret
[ "def", "readBED", "(", "basefilename", ",", "useMAFencoding", "=", "False", ",", "blocksize", "=", "1", ",", "start", "=", "0", ",", "nSNPs", "=", "SP", ".", "inf", ",", "startpos", "=", "None", ",", "endpos", "=", "None", ",", "order", "=", "'F'", ...
40.94
18.966667
def from_edges(edges): """ Return DirectedGraph created from edges :param edges: :return: DirectedGraph """ dag = DirectedGraph() for _u, _v in edges: dag.add_edge(_u, _v) return dag
[ "def", "from_edges", "(", "edges", ")", ":", "dag", "=", "DirectedGraph", "(", ")", "for", "_u", ",", "_v", "in", "edges", ":", "dag", ".", "add_edge", "(", "_u", ",", "_v", ")", "return", "dag" ]
27.777778
10.444444
def list(self, cur_p=''): ''' View the list of the Log. ''' if cur_p == '': current_page_number = 1 else: current_page_number = int(cur_p) current_page_number = 1 if current_page_number < 1 else current_page_number pager_num = int(MLog.total_number() / CMS_CFG['list_num']) kwd = { 'pager': '', 'title': '', 'current_page': current_page_number, } if self.is_p: self.render('admin/log_ajax/user_list.html', kwd=kwd, user_list=MLog.query_all_user(), no_user_list=MLog.query_all(current_page_num=current_page_number), format_date=tools.format_date, userinfo=self.userinfo) else: self.render('misc/log/user_list.html', kwd=kwd, user_list=MLog.query_all_user(), no_user_list=MLog.query_all(current_page_num=current_page_number), format_date=tools.format_date, userinfo=self.userinfo)
[ "def", "list", "(", "self", ",", "cur_p", "=", "''", ")", ":", "if", "cur_p", "==", "''", ":", "current_page_number", "=", "1", "else", ":", "current_page_number", "=", "int", "(", "cur_p", ")", "current_page_number", "=", "1", "if", "current_page_number",...
35.212121
21.818182
def _watcher(self): """Watch out if we've been disconnected, in that case, kill all the jobs. """ while True: gevent.sleep(1.0) if not self.connected: for ns_name, ns in list(six.iteritems(self.active_ns)): ns.recv_disconnect() # Killing Socket-level jobs gevent.killall(self.jobs) break
[ "def", "_watcher", "(", "self", ")", ":", "while", "True", ":", "gevent", ".", "sleep", "(", "1.0", ")", "if", "not", "self", ".", "connected", ":", "for", "ns_name", ",", "ns", "in", "list", "(", "six", ".", "iteritems", "(", "self", ".", "active_...
32
13.230769
def outputs_of(self, partition_index): """The outputs of the partition at ``partition_index``. Note that this returns a tuple of element indices, since coarse- grained blackboxes may have multiple outputs. """ partition = self.partition[partition_index] outputs = set(partition).intersection(self.output_indices) return tuple(sorted(outputs))
[ "def", "outputs_of", "(", "self", ",", "partition_index", ")", ":", "partition", "=", "self", ".", "partition", "[", "partition_index", "]", "outputs", "=", "set", "(", "partition", ")", ".", "intersection", "(", "self", ".", "output_indices", ")", "return",...
43.444444
14.111111
def sign(self, message): """ >>> authlen = OmapiHMACMD5Authenticator.authlen >>> len(OmapiHMACMD5Authenticator(b"foo", 16*b"x").sign(b"baz")) == authlen True @type message: bytes @rtype: bytes @returns: a signature of length self.authlen """ return hmac.HMAC(self.key, message, digestmod=hashlib.md5).digest()
[ "def", "sign", "(", "self", ",", "message", ")", ":", "return", "hmac", ".", "HMAC", "(", "self", ".", "key", ",", "message", ",", "digestmod", "=", "hashlib", ".", "md5", ")", ".", "digest", "(", ")" ]
28.909091
19.454545
def save(self, nodedict, root=''): """ Save a node dictionary in the .hdf5 file, starting from the root dataset. A common application is to convert XML files into .hdf5 files, see the usage in :mod:`openquake.commands.to_hdf5`. :param nodedict: a dictionary with keys 'tag', 'attrib', 'text', 'nodes' """ setitem = super().__setitem__ getitem = super().__getitem__ tag = nodedict['tag'] text = nodedict.get('text', None) if hasattr(text, 'strip'): text = text.strip() attrib = nodedict.get('attrib', {}) path = '/'.join([root, tag]) nodes = nodedict.get('nodes', []) if text not in ('', None): # text=0 is stored try: setitem(path, text) except Exception as exc: sys.stderr.write('%s: %s\n' % (path, exc)) raise elif attrib and not nodes: setitem(path, numpy.nan) for subdict in _resolve_duplicates(nodes): self.save(subdict, path) if attrib: dset = getitem(path) for k, v in attrib.items(): dset.attrs[k] = maybe_encode(v)
[ "def", "save", "(", "self", ",", "nodedict", ",", "root", "=", "''", ")", ":", "setitem", "=", "super", "(", ")", ".", "__setitem__", "getitem", "=", "super", "(", ")", ".", "__getitem__", "tag", "=", "nodedict", "[", "'tag'", "]", "text", "=", "no...
37.4375
11.4375
def write(self, file_or_filename): """ Writes case data as CSV. """ if isinstance(file_or_filename, basestring): file = open(file_or_filename, "wb") else: file = file_or_filename self.writer = csv.writer(file) super(CSVWriter, self).write(file)
[ "def", "write", "(", "self", ",", "file_or_filename", ")", ":", "if", "isinstance", "(", "file_or_filename", ",", "basestring", ")", ":", "file", "=", "open", "(", "file_or_filename", ",", "\"wb\"", ")", "else", ":", "file", "=", "file_or_filename", "self", ...
28
12.818182
def walkSignalPorts(rootPort: LPort): """ recursively walk ports without any children """ if rootPort.children: for ch in rootPort.children: yield from walkSignalPorts(ch) else: yield rootPort
[ "def", "walkSignalPorts", "(", "rootPort", ":", "LPort", ")", ":", "if", "rootPort", ".", "children", ":", "for", "ch", "in", "rootPort", ".", "children", ":", "yield", "from", "walkSignalPorts", "(", "ch", ")", "else", ":", "yield", "rootPort" ]
25.777778
8.888889
def fermat_potential(self, x_image, y_image, x_source, y_source, kwargs_lens): """ fermat potential (negative sign means earlier arrival time) :param x_image: image position :param y_image: image position :param x_source: source position :param y_source: source position :param kwargs_lens: list of keyword arguments of lens model parameters matching the lens model classes :return: fermat potential in arcsec**2 without geometry term (second part of Eqn 1 in Suyu et al. 2013) as a list """ if hasattr(self.lens_model, 'fermat_potential'): return self.lens_model.fermat_potential(x_image, y_image, x_source, y_source, kwargs_lens) else: raise ValueError("Fermat potential is not defined in multi-plane lensing. Please use single plane lens models.")
[ "def", "fermat_potential", "(", "self", ",", "x_image", ",", "y_image", ",", "x_source", ",", "y_source", ",", "kwargs_lens", ")", ":", "if", "hasattr", "(", "self", ".", "lens_model", ",", "'fermat_potential'", ")", ":", "return", "self", ".", "lens_model",...
56.6
29.933333
def assert_is_valid_key(key): """ Raise KeyError if a given config key violates any requirements. The requirements are the following and can be individually deactivated in ``sacred.SETTINGS.CONFIG_KEYS``: * ENFORCE_MONGO_COMPATIBLE (default: True): make sure the keys don't contain a '.' or start with a '$' * ENFORCE_JSONPICKLE_COMPATIBLE (default: True): make sure the keys do not contain any reserved jsonpickle tags This is very important. Only deactivate if you know what you are doing. * ENFORCE_STRING (default: False): make sure all keys are string. * ENFORCE_VALID_PYTHON_IDENTIFIER (default: False): make sure all keys are valid python identifiers. Parameters ---------- key: The key that should be checked Raises ------ KeyError: if the key violates any requirements """ if SETTINGS.CONFIG.ENFORCE_KEYS_MONGO_COMPATIBLE and ( isinstance(key, basestring) and ('.' in key or key[0] == '$')): raise KeyError('Invalid key "{}". Config-keys cannot ' 'contain "." or start with "$"'.format(key)) if SETTINGS.CONFIG.ENFORCE_KEYS_JSONPICKLE_COMPATIBLE and \ isinstance(key, basestring) and ( key in jsonpickle.tags.RESERVED or key.startswith('json://')): raise KeyError('Invalid key "{}". Config-keys cannot be one of the' 'reserved jsonpickle tags: {}' .format(key, jsonpickle.tags.RESERVED)) if SETTINGS.CONFIG.ENFORCE_STRING_KEYS and ( not isinstance(key, basestring)): raise KeyError('Invalid key "{}". Config-keys have to be strings, ' 'but was {}'.format(key, type(key))) if SETTINGS.CONFIG.ENFORCE_VALID_PYTHON_IDENTIFIER_KEYS and ( isinstance(key, basestring) and not PYTHON_IDENTIFIER.match(key)): raise KeyError('Key "{}" is not a valid python identifier' .format(key)) if SETTINGS.CONFIG.ENFORCE_KEYS_NO_EQUALS and ( isinstance(key, basestring) and '=' in key): raise KeyError('Invalid key "{}". Config keys may not contain an' 'equals sign ("=").'.format('='))
[ "def", "assert_is_valid_key", "(", "key", ")", ":", "if", "SETTINGS", ".", "CONFIG", ".", "ENFORCE_KEYS_MONGO_COMPATIBLE", "and", "(", "isinstance", "(", "key", ",", "basestring", ")", "and", "(", "'.'", "in", "key", "or", "key", "[", "0", "]", "==", "'$...
41.90566
22.509434
def _bind_target(self, target, ctx=None): """Method to override in order to specialize binding of target. :param target: target to bind. :param ctx: target ctx. :return: bound target. """ result = target try: # get annotations from target if exists. local_annotations = get_local_property( target, Annotation.__ANNOTATIONS_KEY__, [], ctx=ctx ) except TypeError: raise TypeError('target {0} must be hashable.'.format(target)) # if local_annotations do not exist, put them in target if not local_annotations: put_properties( target, properties={Annotation.__ANNOTATIONS_KEY__: local_annotations}, ctx=ctx ) # insert self at first position local_annotations.insert(0, self) # add target to self targets if target not in self.targets: self.targets.append(target) return result
[ "def", "_bind_target", "(", "self", ",", "target", ",", "ctx", "=", "None", ")", ":", "result", "=", "target", "try", ":", "# get annotations from target if exists.", "local_annotations", "=", "get_local_property", "(", "target", ",", "Annotation", ".", "__ANNOTAT...
29.823529
18.941176
def build_vars(path=None): """Build initial vars.""" init_vars = { "__name__": "__main__", "__package__": None, "reload": reload, } if path is not None: init_vars["__file__"] = fixpath(path) # put reserved_vars in for auto-completion purposes for var in reserved_vars: init_vars[var] = None return init_vars
[ "def", "build_vars", "(", "path", "=", "None", ")", ":", "init_vars", "=", "{", "\"__name__\"", ":", "\"__main__\"", ",", "\"__package__\"", ":", "None", ",", "\"reload\"", ":", "reload", ",", "}", "if", "path", "is", "not", "None", ":", "init_vars", "["...
31.615385
12.153846
def _get_bandgap_from_bands(energies, nelec): """Compute difference in conduction band min and valence band max""" nelec = int(nelec) valence = [x[nelec-1] for x in energies] conduction = [x[nelec] for x in energies] return max(min(conduction) - max(valence), 0.0)
[ "def", "_get_bandgap_from_bands", "(", "energies", ",", "nelec", ")", ":", "nelec", "=", "int", "(", "nelec", ")", "valence", "=", "[", "x", "[", "nelec", "-", "1", "]", "for", "x", "in", "energies", "]", "conduction", "=", "[", "x", "[", "nelec", ...
49.833333
8.5
def new_event(self, subject=None): """ Returns a new (unsaved) Event object :rtype: Event """ return self.event_constructor(parent=self, subject=subject, calendar_id=self.calendar_id)
[ "def", "new_event", "(", "self", ",", "subject", "=", "None", ")", ":", "return", "self", ".", "event_constructor", "(", "parent", "=", "self", ",", "subject", "=", "subject", ",", "calendar_id", "=", "self", ".", "calendar_id", ")" ]
35.428571
17
def context(self, identifier=None, meta=None): """ Get or create a context, with the given identifier and/or provenance meta data. A context can be used to add, update or delete objects in the store. """ return Context(self, identifier=identifier, meta=meta)
[ "def", "context", "(", "self", ",", "identifier", "=", "None", ",", "meta", "=", "None", ")", ":", "return", "Context", "(", "self", ",", "identifier", "=", "identifier", ",", "meta", "=", "meta", ")" ]
57.2
12.8
def create_session(self, lock_type=library.LockType.shared, session=None): """Lock this machine Arguments: lock_type - see IMachine.lock_machine for details session - optionally define a session object to lock this machine against. If not defined, a new ISession object is created to lock against return an ISession object """ if session is None: session = library.ISession() # NOTE: The following hack handles the issue of unknown machine state. # This occurs most frequently when a machine is powered off and # in spite waiting for the completion event to end, the state of # machine still raises the following Error: # virtualbox.library.VBoxErrorVmError: 0x80bb0003 (Failed to \ # get a console object from the direct session (Unknown \ # Status 0x80BB0002)) error = None for _ in range(10): try: self.lock_machine(session, lock_type) except Exception as exc: error = exc time.sleep(1) continue else: break else: if error is not None: raise Exception("Failed to create clone - %s" % error) return session
[ "def", "create_session", "(", "self", ",", "lock_type", "=", "library", ".", "LockType", ".", "shared", ",", "session", "=", "None", ")", ":", "if", "session", "is", "None", ":", "session", "=", "library", ".", "ISession", "(", ")", "# NOTE: The following ...
39.914286
19.485714
def insert_item(self): """Insert item""" index = self.currentIndex() if not index.isValid(): row = self.model.rowCount() else: row = index.row() data = self.model.get_data() if isinstance(data, list): key = row data.insert(row, '') elif isinstance(data, dict): key, valid = QInputDialog.getText(self, _( 'Insert'), _( 'Key:'), QLineEdit.Normal) if valid and to_text_string(key): key = try_to_eval(to_text_string(key)) else: return else: return value, valid = QInputDialog.getText(self, _('Insert'), _('Value:'), QLineEdit.Normal) if valid and to_text_string(value): self.new_value(key, try_to_eval(to_text_string(value)))
[ "def", "insert_item", "(", "self", ")", ":", "index", "=", "self", ".", "currentIndex", "(", ")", "if", "not", "index", ".", "isValid", "(", ")", ":", "row", "=", "self", ".", "model", ".", "rowCount", "(", ")", "else", ":", "row", "=", "index", ...
38.791667
14.958333
def find_visible_birthdays(request, data): """Return only the birthdays visible to current user. """ if request.user and (request.user.is_teacher or request.user.is_eighthoffice or request.user.is_eighth_admin): return data data['today']['users'] = [u for u in data['today']['users'] if u['public']] data['tomorrow']['users'] = [u for u in data['tomorrow']['users'] if u['public']] return data
[ "def", "find_visible_birthdays", "(", "request", ",", "data", ")", ":", "if", "request", ".", "user", "and", "(", "request", ".", "user", ".", "is_teacher", "or", "request", ".", "user", ".", "is_eighthoffice", "or", "request", ".", "user", ".", "is_eighth...
52.25
25.75
def get_mode(path, follow_symlinks=True): ''' Return the mode of a file path file or directory of which to get the mode follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_mode /etc/passwd .. versionchanged:: 2014.1.0 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('mode', '')
[ "def", "get_mode", "(", "path", ",", "follow_symlinks", "=", "True", ")", ":", "return", "stats", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ",", "follow_symlinks", "=", "follow_symlinks", ")", ".", "get", "(", "'mode'", ",", "''", ")...
22.7
24.5
def triangle_areas(p1,p2,p3): """Compute an array of triangle areas given three arrays of triangle pts p1,p2,p3 - three Nx2 arrays of points """ v1 = (p2 - p1).astype(np.float) v2 = (p3 - p1).astype(np.float) # Original: # cross1 = v1[:,1] * v2[:,0] # cross2 = v2[:,1] * v1[:,0] # a = (cross1-cross2) / 2 # Memory reduced: cross1 = v1[:, 1] cross1 *= v2[:, 0] cross2 = v2[:, 1] cross2 *= v1[:, 0] a = cross1 a -= cross2 a /= 2.0 del v1, v2, cross1, cross2 a = a.copy() # a is a view on v1; shed one dimension. a = np.abs(a) # # Handle small round-off errors # a[a<np.finfo(np.float32).eps] = 0 return a
[ "def", "triangle_areas", "(", "p1", ",", "p2", ",", "p3", ")", ":", "v1", "=", "(", "p2", "-", "p1", ")", ".", "astype", "(", "np", ".", "float", ")", "v2", "=", "(", "p3", "-", "p1", ")", ".", "astype", "(", "np", ".", "float", ")", "# Ori...
25.481481
16.037037
def by_population_density(self, lower=-1, upper=2 ** 31, zipcode_type=ZipcodeType.Standard, sort_by=SimpleZipcode.population_density.name, ascending=False, returns=DEFAULT_LIMIT): """ Search zipcode information by population density range. `population density` is `population per square miles on land` """ return self.query( population_density_lower=lower, population_density_upper=upper, sort_by=sort_by, zipcode_type=zipcode_type, ascending=ascending, returns=returns, )
[ "def", "by_population_density", "(", "self", ",", "lower", "=", "-", "1", ",", "upper", "=", "2", "**", "31", ",", "zipcode_type", "=", "ZipcodeType", ".", "Standard", ",", "sort_by", "=", "SimpleZipcode", ".", "population_density", ".", "name", ",", "asce...
40.666667
14.444444
def setup_opt_parser(): """ Setup the optparser @returns: opt_parser.OptionParser """ #pylint: disable-msg=C0301 #line too long usage = "usage: %prog [options]" opt_parser = optparse.OptionParser(usage=usage) opt_parser.add_option("--version", action='store_true', dest= "yolk_version", default=False, help= "Show yolk version and exit.") opt_parser.add_option("--debug", action='store_true', dest= "debug", default=False, help= "Show debugging information.") opt_parser.add_option("-q", "--quiet", action='store_true', dest= "quiet", default=False, help= "Show less output.") group_local = optparse.OptionGroup(opt_parser, "Query installed Python packages", "The following options show information about installed Python packages. Activated packages are normal packages on sys.path that can be imported. Non-activated packages need 'pkg_resources.require()' before they can be imported, such as packages installed with 'easy_install --multi-version'. PKG_SPEC can be either a package name or package name and version e.g. Paste==0.9") group_local.add_option("-l", "--list", action='store_true', dest= "show_all", default=False, help= "List all Python packages installed by distutils or setuptools. Use PKG_SPEC to narrow results.") group_local.add_option("-a", "--activated", action='store_true', dest="show_active", default=False, help= 'List activated packages installed by distutils or ' + 'setuptools. Use PKG_SPEC to narrow results.') group_local.add_option("-n", "--non-activated", action='store_true', dest="show_non_active", default=False, help= 'List non-activated packages installed by distutils or ' + 'setuptools. Use PKG_SPEC to narrow results.') group_local.add_option("-m", "--metadata", action='store_true', dest= "metadata", default=False, help= 'Show all metadata for packages installed by ' + 'setuptools (use with -l -a or -n)') group_local.add_option("-f", "--fields", action="store", dest= "fields", default=False, help= 'Show specific metadata fields. ' + '(use with -m or -M)') group_local.add_option("-d", "--depends", action='store', dest= "show_deps", metavar='PKG_SPEC', help= "Show dependencies for a package installed by " + "setuptools if they are available.") group_local.add_option("--entry-points", action='store', dest="show_entry_points", default=False, help= 'List entry points for a module. e.g. --entry-points nose.plugins', metavar="MODULE") group_local.add_option("--entry-map", action='store', dest="show_entry_map", default=False, help= 'List entry map for a package. e.g. --entry-map yolk', metavar="PACKAGE_NAME") group_pypi = optparse.OptionGroup(opt_parser, "PyPI (Cheese Shop) options", "The following options query the Python Package Index:") group_pypi.add_option("-C", "--changelog", action='store', dest="show_pypi_changelog", metavar='HOURS', default=False, help= "Show detailed ChangeLog for PyPI for last n hours. ") group_pypi.add_option("-D", "--download-links", action='store', metavar="PKG_SPEC", dest="show_download_links", default=False, help= "Show download URL's for package listed on PyPI. Use with -T to specify egg, source etc.") group_pypi.add_option("-F", "--fetch-package", action='store', metavar="PKG_SPEC", dest="fetch", default=False, help= "Download package source or egg. You can specify a file type with -T") group_pypi.add_option("-H", "--browse-homepage", action='store', metavar="PKG_SPEC", dest="browse_website", default=False, help= "Launch web browser at home page for package.") group_pypi.add_option("-I", "--pypi-index", action='store', dest="pypi_index", default=False, help= "Specify PyPI mirror for package index.") group_pypi.add_option("-L", "--latest-releases", action='store', dest="show_pypi_releases", metavar="HOURS", default=False, help= "Show PyPI releases for last n hours. ") group_pypi.add_option("-M", "--query-metadata", action='store', dest="query_metadata_pypi", default=False, metavar="PKG_SPEC", help= "Show metadata for a package listed on PyPI. Use -f to show particular fields.") group_pypi.add_option("-S", "", action="store", dest="pypi_search", default=False, help= "Search PyPI by spec and optional AND/OR operator.", metavar='SEARCH_SPEC <AND/OR SEARCH_SPEC>') group_pypi.add_option("-T", "--file-type", action="store", dest= "file_type", default="all", help= "You may specify 'source', 'egg', 'svn' or 'all' when using -D.") group_pypi.add_option("-U", "--show-updates", action='store_true', dest="show_updates", metavar='<PKG_NAME>', default=False, help= "Check PyPI for updates on package(s).") group_pypi.add_option("-V", "--versions-available", action= 'store', dest="versions_available", default=False, metavar='PKG_SPEC', help="Show available versions for given package " + "listed on PyPI.") opt_parser.add_option_group(group_local) opt_parser.add_option_group(group_pypi) # add opts from plugins all_plugins = [] for plugcls in load_plugins(others=True): plug = plugcls() try: plug.add_options(opt_parser) except AttributeError: pass return opt_parser
[ "def", "setup_opt_parser", "(", ")", ":", "#pylint: disable-msg=C0301", "#line too long", "usage", "=", "\"usage: %prog [options]\"", "opt_parser", "=", "optparse", ".", "OptionParser", "(", "usage", "=", "usage", ")", "opt_parser", ".", "add_option", "(", "\"--versio...
49.632353
28.397059
def modify_prefix(arg, opts, shell_opts): """ Modify the prefix 'arg' with the options 'opts' """ modify_confirmed = shell_opts.force spec = { 'prefix': arg } v = get_vrf(opts.get('vrf_rt'), abort=True) spec['vrf_rt'] = v.rt res = Prefix.list(spec) if len(res) == 0: print("Prefix %s not found in %s." % (arg, vrf_format(v)), file=sys.stderr) return p = res[0] if 'prefix' in opts: p.prefix = opts['prefix'] if 'description' in opts: p.description = opts['description'] if 'comment' in opts: p.comment = opts['comment'] if 'tags' in opts: tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0] p.tags = {} for tag_name in tags: tag = Tag() tag.name = tag_name p.tags[tag_name] = tag if 'node' in opts: p.node = opts['node'] if 'type' in opts: p.type = opts['type'] if 'status' in opts: p.status = opts['status'] if 'country' in opts: p.country = opts['country'] if 'order_id' in opts: p.order_id = opts['order_id'] if 'customer_id' in opts: p.customer_id = opts['customer_id'] if 'vlan' in opts: p.vlan = opts['vlan'] if 'alarm_priority' in opts: p.alarm_priority = opts['alarm_priority'] if 'monitor' in opts: p.monitor = _str_to_bool(opts['monitor']) if 'expires' in opts: p.expires = opts['expires'] for avp in opts.get('extra-attribute', []): try: key, value = avp.split('=', 1) except ValueError: print("ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp, file=sys.stderr) return p.avps[key] = value # Promt user if prefix has authoritative source != nipap if not modify_confirmed and p.authoritative_source.lower() != 'nipap': res = input("Prefix %s in %s is managed by system '%s'. Are you sure you want to modify it? [y/n]: " % (p.prefix, vrf_format(p.vrf), p.authoritative_source)) # If the user declines, short-circuit... if res.lower() not in [ 'y', 'yes' ]: print("Operation aborted.") return try: p.save() except NipapError as exc: print("Could not save prefix changes: %s" % str(exc), file=sys.stderr) sys.exit(1) print("Prefix %s in %s saved." % (p.display_prefix, vrf_format(p.vrf)))
[ "def", "modify_prefix", "(", "arg", ",", "opts", ",", "shell_opts", ")", ":", "modify_confirmed", "=", "shell_opts", ".", "force", "spec", "=", "{", "'prefix'", ":", "arg", "}", "v", "=", "get_vrf", "(", "opts", ".", "get", "(", "'vrf_rt'", ")", ",", ...
30.897436
19.769231
def _af_annotate_and_filter(paired, items, in_file, out_file): """Populating FORMAT/AF, and dropping variants with AF<min_allele_fraction Strelka2 doesn't report exact AF for a variant, however it can be calculated as alt_counts/dp from existing fields: somatic snps: GT:DP:FDP:SDP:SUBDP:AU:CU:GU:TU dp=DP {ALT}U[0] = alt_counts(tier1,tier2) indels: GT:DP:DP2:TAR:TIR:TOR:DP50:FDP50:SUBDP50:BCN50 dp=DP TIR = alt_counts(tier1,tier2) germline snps: GT:GQ:GQX:DP:DPF:AD:ADF:ADR:SB:FT:PL(:PS) dp=sum(alt_counts) AD = ref_count,alt_counts indels: GT:GQ:GQX:DPI:AD:ADF:ADR:FT:PL(:PS) dp=sum(alt_counts) AD = ref_count,alt_counts """ data = paired.tumor_data if paired else items[0] min_freq = float(utils.get_in(data["config"], ("algorithm", "min_allele_fraction"), 10)) / 100.0 logger.debug("Filtering Strelka2 calls with allele fraction threshold of %s" % min_freq) ungz_out_file = "%s.vcf" % utils.splitext_plus(out_file)[0] if not utils.file_exists(ungz_out_file) and not utils.file_exists(ungz_out_file + ".gz"): with file_transaction(data, ungz_out_file) as tx_out_file: vcf = cyvcf2.VCF(in_file) vcf.add_format_to_header({ 'ID': 'AF', 'Description': 'Allele frequency, as calculated in bcbio: AD/DP (germline), <ALT>U/DP (somatic snps), ' 'TIR/DPI (somatic indels)', 'Type': 'Float', 'Number': '.'}) vcf.add_filter_to_header({ 'ID': 'MinAF', 'Description': 'Allele frequency is lower than %s%% ' % (min_freq*100) + ( '(configured in bcbio as min_allele_fraction)' if utils.get_in(data["config"], ("algorithm", "min_allele_fraction")) else '(default threshold in bcbio; override with min_allele_fraction in the algorithm section)')}) w = cyvcf2.Writer(tx_out_file, vcf) tumor_index = vcf.samples.index(data['description']) for rec in vcf: if paired: # somatic? if rec.is_snp: # snps? alt_counts = rec.format(rec.ALT[0] + 'U')[:,0] # {ALT}U=tier1_depth,tier2_depth else: # indels alt_counts = rec.format('TIR')[:,0] # TIR=tier1_depth,tier2_depth dp = rec.format('DP')[:,0] elif rec.format("AD") is not None: # germline? alt_counts = rec.format('AD')[:,1:] # AD=REF,ALT1,ALT2,... dp = np.sum(rec.format('AD')[:,0:], axis=1)[:, None] else: # germline gVCF record alt_counts, dp = (None, None) if dp is not None: with np.errstate(divide='ignore', invalid='ignore'): # ignore division by zero and put AF=.0 af = np.true_divide(alt_counts, dp) af[~np.isfinite(af)] = .0 # -inf inf NaN -> .0 rec.set_format('AF', af) if paired and np.all(af[tumor_index] < min_freq): vcfutils.cyvcf_add_filter(rec, 'MinAF') w.write_record(rec) w.close() return vcfutils.bgzip_and_index(ungz_out_file, data["config"])
[ "def", "_af_annotate_and_filter", "(", "paired", ",", "items", ",", "in_file", ",", "out_file", ")", ":", "data", "=", "paired", ".", "tumor_data", "if", "paired", "else", "items", "[", "0", "]", "min_freq", "=", "float", "(", "utils", ".", "get_in", "("...
62.111111
29.203704
def send(self, topic, kmsg): """ Send the message into the given topic :param str topic: a kafka topic :param ksr.transport.Message kmsg: Message to serialize :return: Execution result :rtype: kser.result.Result """ try: self.client.do_request( method="POST", params=dict(format="raw"), path="/topic/{}".format(topic), data=kmsg.MARSHMALLOW_SCHEMA.dump(kmsg) ) result = Result( uuid=kmsg.uuid, stdout="Message sent: {} ({})".format( kmsg.uuid, kmsg.entrypoint ) ) except Exception as exc: result = Result.from_exception(exc, kmsg.uuid) finally: # noinspection PyUnboundLocalVariable if result.retcode < 300: return self._onsuccess(kmsg=kmsg, result=result) else: return self._onerror(kmsg=kmsg, result=result)
[ "def", "send", "(", "self", ",", "topic", ",", "kmsg", ")", ":", "try", ":", "self", ".", "client", ".", "do_request", "(", "method", "=", "\"POST\"", ",", "params", "=", "dict", "(", "format", "=", "\"raw\"", ")", ",", "path", "=", "\"/topic/{}\"", ...
33.931034
17.172414
def allocate(self, dut_configuration_list, args=None): """ Allocates resources from available local devices. :param dut_configuration_list: List of ResourceRequirements objects :param args: Not used :return: AllocationContextList with allocated resources """ dut_config_list = dut_configuration_list.get_dut_configuration() # if we need one or more local hardware duts let's search attached # devices using DutDetection if not isinstance(dut_config_list, list): raise AllocationError("Invalid dut configuration format!") if next((item for item in dut_config_list if item.get("type") == "hardware"), False): self._available_devices = DutDetection().get_available_devices() if len(self._available_devices) < len(dut_config_list): raise AllocationError("Required amount of devices not available.") # Enumerate all required DUT's try: for dut_config in dut_config_list: if not self.can_allocate(dut_config.get_requirements()): raise AllocationError("Resource type is not supported") self._allocate(dut_config) except AllocationError: # Locally allocated don't need to be released any way for # now, so just re-raise the error raise alloc_list = AllocationContextList() res_id = None for conf in dut_config_list: if conf.get("type") == "mbed": res_id = conf.get("allocated").get("target_id") context = AllocationContext(resource_id=res_id, alloc_data=conf) alloc_list.append(context) alloc_list.set_dut_init_function("serial", init_generic_serial_dut) alloc_list.set_dut_init_function("process", init_process_dut) alloc_list.set_dut_init_function("mbed", init_mbed_dut) return alloc_list
[ "def", "allocate", "(", "self", ",", "dut_configuration_list", ",", "args", "=", "None", ")", ":", "dut_config_list", "=", "dut_configuration_list", ".", "get_dut_configuration", "(", ")", "# if we need one or more local hardware duts let's search attached", "# devices using ...
45.666667
22.142857
def wait_time(self, value): """ Setter for **self.__wait_time** attribute. :param value: Attribute value. :type value: int or float """ if value is not None: assert type(value) in (int, float), "'{0}' attribute: '{1}' type is not 'int' or 'float'!".format( "wait_time", value) assert value >= 0, "'{0}' attribute: '{1}' need to be positive!".format("wait_time", value) self.__wait_time = value
[ "def", "wait_time", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "in", "(", "int", ",", "float", ")", ",", "\"'{0}' attribute: '{1}' type is not 'int' or 'float'!\"", ".", "format", "(", ...
36.846154
20.692308
def commit(self): """Commit a batch.""" assert self.batch is not None, "No active batch, call start() first" logger.debug("Comitting batch from %d sources...", len(self.batch)) # Determine item priority. by_priority = [] for name in self.batch.keys(): priority = self.priorities.get(name, self.default_priority) by_priority.append((priority, name)) for priority, name in sorted(by_priority, key=lambda key: key[0]): logger.debug("Processing items from '%s' (priority=%d)...", name, priority) items = self.batch[name] for handlers in items.values(): for agg, handler in handlers: try: if agg is None: handler() else: handler(agg) except Exception as error: # Log errors and proceed to evaluate the next handler. logger.exception("Error while invoking handler.") self.batch = None logger.debug("Batch committed.")
[ "def", "commit", "(", "self", ")", ":", "assert", "self", ".", "batch", "is", "not", "None", ",", "\"No active batch, call start() first\"", "logger", ".", "debug", "(", "\"Comitting batch from %d sources...\"", ",", "len", "(", "self", ".", "batch", ")", ")", ...
38.896552
19.758621
def scaled_tile(self, tile): '''return a scaled tile''' width = int(TILES_WIDTH / tile.scale) height = int(TILES_HEIGHT / tile.scale) scaled_tile = cv.CreateImage((width,height), 8, 3) full_tile = self.load_tile(tile) cv.Resize(full_tile, scaled_tile) return scaled_tile
[ "def", "scaled_tile", "(", "self", ",", "tile", ")", ":", "width", "=", "int", "(", "TILES_WIDTH", "/", "tile", ".", "scale", ")", "height", "=", "int", "(", "TILES_HEIGHT", "/", "tile", ".", "scale", ")", "scaled_tile", "=", "cv", ".", "CreateImage", ...
34.625
8.625
def add_path_with_storage_account(self, remote_path, storage_account): # type: (SourcePath, str, str) -> None """Add a path with an associated storage account :param SourcePath self: this :param str remote_path: remote path :param str storage_account: storage account to associate with path """ if len(self._path_map) >= 1: raise RuntimeError( 'cannot add multiple remote paths to SourcePath objects') rpath = blobxfer.util.normalize_azure_path(remote_path) self.add_path(rpath) self._path_map[rpath] = storage_account
[ "def", "add_path_with_storage_account", "(", "self", ",", "remote_path", ",", "storage_account", ")", ":", "# type: (SourcePath, str, str) -> None", "if", "len", "(", "self", ".", "_path_map", ")", ">=", "1", ":", "raise", "RuntimeError", "(", "'cannot add multiple re...
47.230769
12.692308
def get_authorizations_by_genus_type(self, authorization_genus_type): """Gets an ``AuthorizationList`` corresponding to the given authorization genus ``Type`` which does not include authorizations of genus types derived from the specified ``Type``. In plenary mode, the returned list contains all known authorizations or an error results. Otherwise, the returned list may contain only those authorizations that are accessible through this session. arg: authorization_genus_type (osid.type.Type): an authorization genus type return: (osid.authorization.AuthorizationList) - the returned ``Authorization`` list raise: NullArgument - ``authorization_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_genus_type # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('authorization', collection='Authorization', runtime=self._runtime) result = collection.find( dict({'genusTypeId': str(authorization_genus_type)}, **self._view_filter())).sort('_id', DESCENDING) return objects.AuthorizationList(result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_authorizations_by_genus_type", "(", "self", ",", "authorization_genus_type", ")", ":", "# Implemented from template for", "# osid.resource.ResourceLookupSession.get_resources_by_genus_type", "# NOTE: This implementation currently ignores plenary view", "collection", "=", "JSONC...
56.464286
22.357143
def __makeShowColumnFunction(self, column_idx): """ Creates a function that shows or hides a column.""" show_column = lambda checked: self.setColumnHidden(column_idx, not checked) return show_column
[ "def", "__makeShowColumnFunction", "(", "self", ",", "column_idx", ")", ":", "show_column", "=", "lambda", "checked", ":", "self", ".", "setColumnHidden", "(", "column_idx", ",", "not", "checked", ")", "return", "show_column" ]
54.75
16
def github_tags_newer(github_repo, versions_file, update_majors): """ Get new tags from a github repository. Cannot use github API because it doesn't support chronological ordering of tags. @param github_repo: the github repository, e.g. 'drupal/drupal/'. @param versions_file: the file path where the versions database can be found. @param update_majors: major versions to update. If you want to update the 6.x and 7.x branch, you would supply a list which would look like ['6', '7'] @return: a boolean value indicating whether an update is needed @raise MissingMajorException: A new version from a newer major branch is exists, but will not be downloaded due to it not being in majors. """ github_repo = _github_normalize(github_repo) vf = VersionsFile(versions_file) current_highest = vf.highest_version_major(update_majors) tags_url = '%s%stags' % (GH, github_repo) resp = requests.get(tags_url) bs = BeautifulSoup(resp.text, 'lxml') gh_versions = [] for header in bs.find_all('h4'): tag = header.findChild('a') if not tag: continue # Ignore learn more header. gh_versions.append(tag.text.strip()) newer = _newer_tags_get(current_highest, gh_versions) return len(newer) > 0
[ "def", "github_tags_newer", "(", "github_repo", ",", "versions_file", ",", "update_majors", ")", ":", "github_repo", "=", "_github_normalize", "(", "github_repo", ")", "vf", "=", "VersionsFile", "(", "versions_file", ")", "current_highest", "=", "vf", ".", "highes...
41.516129
20.290323
def clone_bs4_elem(el): """Clone a bs4 tag before modifying it. Code from `http://stackoverflow.com/questions/23057631/clone-element-with -beautifulsoup` """ if isinstance(el, NavigableString): return type(el)(el) copy = Tag(None, el.builder, el.name, el.namespace, el.nsprefix) # work around bug where there is no builder set # https://bugs.launchpad.net/beautifulsoup/+bug/1307471 copy.attrs = dict(el.attrs) for attr in ('can_be_empty_element', 'hidden'): setattr(copy, attr, getattr(el, attr)) for child in el.contents: copy.append(clone_bs4_elem(child)) return copy
[ "def", "clone_bs4_elem", "(", "el", ")", ":", "if", "isinstance", "(", "el", ",", "NavigableString", ")", ":", "return", "type", "(", "el", ")", "(", "el", ")", "copy", "=", "Tag", "(", "None", ",", "el", ".", "builder", ",", "el", ".", "name", "...
34.833333
16.166667
def run(configobj=None): """TEAL interface for the `clean` function.""" clean(configobj['input'], suffix=configobj['suffix'], stat=configobj['stat'], maxiter=configobj['maxiter'], sigrej=configobj['sigrej'], lower=configobj['lower'], upper=configobj['upper'], binwidth=configobj['binwidth'], mask1=configobj['mask1'], mask2=configobj['mask2'], dqbits=configobj['dqbits'], rpt_clean=configobj['rpt_clean'], atol=configobj['atol'], cte_correct=configobj['cte_correct'], clobber=configobj['clobber'], verbose=configobj['verbose'])
[ "def", "run", "(", "configobj", "=", "None", ")", ":", "clean", "(", "configobj", "[", "'input'", "]", ",", "suffix", "=", "configobj", "[", "'suffix'", "]", ",", "stat", "=", "configobj", "[", "'stat'", "]", ",", "maxiter", "=", "configobj", "[", "'...
37.111111
4.666667
def encode(self, word, max_length=4, zero_pad=True): """Return the SoundexBR encoding of a word. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to 4) zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The SoundexBR code Examples -------- >>> soundex_br('Oliveira') 'O416' >>> soundex_br('Almeida') 'A453' >>> soundex_br('Barbosa') 'B612' >>> soundex_br('Araújo') 'A620' >>> soundex_br('Gonçalves') 'G524' >>> soundex_br('Goncalves') 'G524' """ word = unicode_normalize('NFKD', text_type(word.upper())) word = ''.join(c for c in word if c in self._uc_set) if word[:2] == 'WA': first = 'V' elif word[:1] == 'K' and word[1:2] in {'A', 'O', 'U'}: first = 'C' elif word[:1] == 'C' and word[1:2] in {'I', 'E'}: first = 'S' elif word[:1] == 'G' and word[1:2] in {'E', 'I'}: first = 'J' elif word[:1] == 'Y': first = 'I' elif word[:1] == 'H': first = word[1:2] word = word[1:] else: first = word[:1] sdx = first + word[1:].translate(self._trans) sdx = self._delete_consecutive_repeats(sdx) sdx = sdx.replace('0', '') if zero_pad: sdx += '0' * max_length return sdx[:max_length]
[ "def", "encode", "(", "self", ",", "word", ",", "max_length", "=", "4", ",", "zero_pad", "=", "True", ")", ":", "word", "=", "unicode_normalize", "(", "'NFKD'", ",", "text_type", "(", "word", ".", "upper", "(", ")", ")", ")", "word", "=", "''", "."...
26.42623
19.540984
def n2s(n): """ Number to string. """ s = hex(n)[2:].rstrip("L") if len(s) % 2 != 0: s = "0" + s return s.decode("hex")
[ "def", "n2s", "(", "n", ")", ":", "s", "=", "hex", "(", "n", ")", "[", "2", ":", "]", ".", "rstrip", "(", "\"L\"", ")", "if", "len", "(", "s", ")", "%", "2", "!=", "0", ":", "s", "=", "\"0\"", "+", "s", "return", "s", ".", "decode", "("...
18
13.75
def store(self, loc, df): """Store dataframe in the given location. Store some arbitrary dataframe: >>> data.store('my_data', df) Now recover it from the global store. >>> data.my_data ... """ path = "%s.%s" % (self._root / "processed" / loc, FILE_EXTENSION) WRITE_DF(df, path, **WRITE_DF_OPTS) self._cache[loc] = df
[ "def", "store", "(", "self", ",", "loc", ",", "df", ")", ":", "path", "=", "\"%s.%s\"", "%", "(", "self", ".", "_root", "/", "\"processed\"", "/", "loc", ",", "FILE_EXTENSION", ")", "WRITE_DF", "(", "df", ",", "path", ",", "*", "*", "WRITE_DF_OPTS", ...
25.733333
18.4
def authenticated(func): """ Decorator to check if Smappee's access token has expired. If it has, use the refresh token to request a new access token """ @wraps(func) def wrapper(*args, **kwargs): self = args[0] if self.refresh_token is not None and \ self.token_expiration_time <= dt.datetime.utcnow(): self.re_authenticate() return func(*args, **kwargs) return wrapper
[ "def", "authenticated", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", "=", "args", "[", "0", "]", "if", "self", ".", "refresh_token", "is", "not", "None", "an...
33.307692
13.307692
async def handle_json_response(responses): """ get the json data response :param responses: the json response :return the json data without 'root' node """ json_data = {} if responses.status != 200: err_msg = HttpProcessingError(code=responses.status, message=await responses.json()) logging.error("Wallabag: aiohttp error {err_msg}".format( err_msg=err_msg)) else: try: json_data = responses.json() except ClientResponseError as e: # sometimes json_data does not return any json() without # any error. This is due to the grabbing URL which "rejects" # the URL logging.error("Wallabag: aiohttp error {code} {message}" .format(code=e.code, message=e.message)) return await json_data
[ "async", "def", "handle_json_response", "(", "responses", ")", ":", "json_data", "=", "{", "}", "if", "responses", ".", "status", "!=", "200", ":", "err_msg", "=", "HttpProcessingError", "(", "code", "=", "responses", ".", "status", ",", "message", "=", "a...
43.090909
15.909091
def curve(self): """Curve of the super helix.""" return HelicalCurve.pitch_and_radius( self.major_pitch, self.major_radius, handedness=self.major_handedness)
[ "def", "curve", "(", "self", ")", ":", "return", "HelicalCurve", ".", "pitch_and_radius", "(", "self", ".", "major_pitch", ",", "self", ".", "major_radius", ",", "handedness", "=", "self", ".", "major_handedness", ")" ]
38.6
8.4
def start_service(addr, n): """ Start a service """ s = Subscriber(addr) s.socket.set_string_option(nanomsg.SUB, nanomsg.SUB_SUBSCRIBE, 'test') started = time.time() for _ in range(n): msg = s.socket.recv() s.socket.close() duration = time.time() - started print('Raw SUB service stats:') util.print_stats(n, duration) return
[ "def", "start_service", "(", "addr", ",", "n", ")", ":", "s", "=", "Subscriber", "(", "addr", ")", "s", ".", "socket", ".", "set_string_option", "(", "nanomsg", ".", "SUB", ",", "nanomsg", ".", "SUB_SUBSCRIBE", ",", "'test'", ")", "started", "=", "time...
24.133333
19.533333
def get_cursor(cls, cursor_type=_CursorType.PLAIN) -> Cursor: """ Yields: new client-side cursor from existing db connection pool """ _cur = None if cls._use_pool: _connection_source = yield from cls.get_pool() else: _connection_source = yield from aiopg.connect(echo=False, **cls._connection_params) if cursor_type == _CursorType.PLAIN: _cur = yield from _connection_source.cursor() if cursor_type == _CursorType.NAMEDTUPLE: _cur = yield from _connection_source.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor) if cursor_type == _CursorType.DICT: _cur = yield from _connection_source.cursor(cursor_factory=psycopg2.extras.DictCursor) if not cls._use_pool: _cur = cursor_context_manager(_connection_source, _cur) return _cur
[ "def", "get_cursor", "(", "cls", ",", "cursor_type", "=", "_CursorType", ".", "PLAIN", ")", "->", "Cursor", ":", "_cur", "=", "None", "if", "cls", ".", "_use_pool", ":", "_connection_source", "=", "yield", "from", "cls", ".", "get_pool", "(", ")", "else"...
40.227273
24.681818
def get_segmentation(X, rank, R, rank_labels, R_labels, niter=300, bound_idxs=None, in_labels=None): """ Gets the segmentation (boundaries and labels) from the factorization matrices. Parameters ---------- X: np.array() Features matrix (e.g. chromagram) rank: int Rank of decomposition R: int Size of the median filter for activation matrix niter: int Number of iterations for k-means bound_idxs : list Use previously found boundaries (None to detect them) in_labels : np.array() List of input labels (None to compute them) Returns ------- bounds_idx: np.array Bound indeces found labels: np.array Indeces of the labels representing the similarity between segments. """ #import pylab as plt #plt.imshow(X, interpolation="nearest", aspect="auto") #plt.show() # Find non filtered boundaries compute_bounds = True if bound_idxs is None else False while True: if bound_idxs is None: try: F, G = cnmf(X, rank, niter=niter, hull=False) except: return np.empty(0), [1] # Filter G G = filter_activation_matrix(G.T, R) if bound_idxs is None: bound_idxs = np.where(np.diff(G) != 0)[0] + 1 # Increase rank if we found too few boundaries if compute_bounds and len(np.unique(bound_idxs)) <= 2: rank += 1 bound_idxs = None else: break # Add first and last boundary bound_idxs = np.concatenate(([0], bound_idxs, [X.shape[1] - 1])) bound_idxs = np.asarray(bound_idxs, dtype=int) if in_labels is None: labels = compute_labels(X, rank_labels, R_labels, bound_idxs, niter=niter) else: labels = np.ones(len(bound_idxs) - 1) #plt.imshow(G[:, np.newaxis], interpolation="nearest", aspect="auto") #for b in bound_idxs: #plt.axvline(b, linewidth=2.0, color="k") #plt.show() return bound_idxs, labels
[ "def", "get_segmentation", "(", "X", ",", "rank", ",", "R", ",", "rank_labels", ",", "R_labels", ",", "niter", "=", "300", ",", "bound_idxs", "=", "None", ",", "in_labels", "=", "None", ")", ":", "#import pylab as plt", "#plt.imshow(X, interpolation=\"nearest\",...
29.811594
20.681159
def process(*args, **kwargs): """Runs the decorated function in a concurrent process, taking care of the result and error management. Decorated functions will return a concurrent.futures.Future object once called. The timeout parameter will set a maximum execution time for the decorated function. If the execution exceeds the timeout, the process will be stopped and the Future will raise TimeoutError. """ timeout = kwargs.get('timeout') # decorator without parameters if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return _process_wrapper(args[0], timeout) else: # decorator with parameters if timeout is not None and not isinstance(timeout, (int, float)): raise TypeError('Timeout expected to be None or integer or float') def decorating_function(function): return _process_wrapper(function, timeout) return decorating_function
[ "def", "process", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "timeout", "=", "kwargs", ".", "get", "(", "'timeout'", ")", "# decorator without parameters", "if", "len", "(", "args", ")", "==", "1", "and", "len", "(", "kwargs", ")", "==", "0...
36.115385
21.884615
def shot_taskfile_sel_changed(self, tf): """Callback for when the version selection has changed :param tf: the selected taskfileinfo :type tf: :class:`TaskFileInfo` | None :returns: None :rtype: None :raises: None """ self.shot_open_pb.setEnabled(bool(tf)) # only allow new, if the releasetype is work # only allow new, if there is a shot. if there is a shot, there should always be a task. enablenew = bool(self.browser.shotbrws.selected_indexes(1)) and self.browser.get_releasetype() == djadapter.RELEASETYPES['work'] self.shot_save_pb.setEnabled(enablenew) self.shot_descriptor_le.setEnabled(enablenew) self.shot_comment_pte.setEnabled(enablenew) self.update_descriptor_le(self.shot_descriptor_le, tf)
[ "def", "shot_taskfile_sel_changed", "(", "self", ",", "tf", ")", ":", "self", ".", "shot_open_pb", ".", "setEnabled", "(", "bool", "(", "tf", ")", ")", "# only allow new, if the releasetype is work", "# only allow new, if there is a shot. if there is a shot, there should alwa...
47.588235
19.411765
def get_dev_run_config(devid, auth, url): """ function takes the devId of a specific device and issues a RESTFUL call to get the most current running config file as known by the HP IMC Base Platform ICC module for the target device. :param devid: int or str value of the target device :return: str which contains the entire content of the target device running configuration. If the device is not currently supported in the HP IMC Base Platform ICC module, this call returns a string of "This feature is not supported on this device" """ # checks to see if the imc credentials are already available get_dev_run_url = "/imcrs/icc/deviceCfg/" + str(devid) + "/currentRun" f_url = url + get_dev_run_url # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=HEADERS) # print (r.status_code) try: if r.status_code == 200: try: run_conf = (json.loads(r.text))['content'] return run_conf except: return "This features is no supported on this device" except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " get_dev_run_config: An Error has occured"
[ "def", "get_dev_run_config", "(", "devid", ",", "auth", ",", "url", ")", ":", "# checks to see if the imc credentials are already available", "get_dev_run_url", "=", "\"/imcrs/icc/deviceCfg/\"", "+", "str", "(", "devid", ")", "+", "\"/currentRun\"", "f_url", "=", "url",...
52.041667
25.208333
def search_weekday(weekday, jd, direction, offset): '''Determine the Julian date for the next or previous weekday''' return weekday_before(weekday, jd + (direction * offset))
[ "def", "search_weekday", "(", "weekday", ",", "jd", ",", "direction", ",", "offset", ")", ":", "return", "weekday_before", "(", "weekday", ",", "jd", "+", "(", "direction", "*", "offset", ")", ")" ]
60
20
def add_parameter(self, parameter): """Adds the specified parameter value to the list.""" if parameter.name.lower() not in self.paramorder: self.paramorder.append(parameter.name.lower()) self._parameters[parameter.name.lower()] = parameter
[ "def", "add_parameter", "(", "self", ",", "parameter", ")", ":", "if", "parameter", ".", "name", ".", "lower", "(", ")", "not", "in", "self", ".", "paramorder", ":", "self", ".", "paramorder", ".", "append", "(", "parameter", ".", "name", ".", "lower",...
54.2
12
def removeNode(self, node): """ Remove the given node from the graph if it exists """ ident = self.getIdent(node) if ident is not None: self.graph.hide_node(ident)
[ "def", "removeNode", "(", "self", ",", "node", ")", ":", "ident", "=", "self", ".", "getIdent", "(", "node", ")", "if", "ident", "is", "not", "None", ":", "self", ".", "graph", ".", "hide_node", "(", "ident", ")" ]
29.857143
6.714286
def _init_unhandled(l,inited_matrix): ''' from elist.elist import * from elist.jprint import pobj l = [1,[4],2,[3,[5,6]]] desc_matrix = init_desc_matrix(l) unhandled = _init_unhandled(l,desc_matrix) unhandled_data = unhandled['data'] unhandled_desc = unhandled['desc'] unhandled_data[0] unhandled_desc[0] unhandled_data[1] unhandled_desc[1] ''' root_desc = inited_matrix[0][0] unhandled = {'data':[],'desc':[]} length = l.__len__() root_desc['sons_count'] = length root_desc['leaf_son_paths'] = [] root_desc['non_leaf_son_paths'] = [] if(length == 0): pass else: inited_matrix.append([]) level = inited_matrix[1] for i in range(0,length): child = l[i] desc = copy.deepcopy(root_desc) desc = reset_parent_desc_template(desc) desc['depth'] = 1 desc['breadth'] = i desc['parent_breadth_path'] = copy.deepcopy(desc['breadth_path']) desc['breadth_path'].append(i) desc['sib_seq'] = i desc['parent_path'] = copy.deepcopy(desc['path']) desc['path'].append(i) if(i==0): pass else: desc['lsib_path'] = [i-1] if(i == (length - 1)): pass else: desc['rsib_path'] = [i+1] if(is_leaf(child)): desc['leaf'] = True desc['sons_count'] = 0 root_desc['leaf_son_paths'].append(copy.deepcopy(desc['path'])) else: desc['leaf'] = False root_desc['non_leaf_son_paths'].append(copy.deepcopy(desc['path'])) unhandled['data'].append(child) unhandled['desc'].append(desc) level.append(desc) return(unhandled)
[ "def", "_init_unhandled", "(", "l", ",", "inited_matrix", ")", ":", "root_desc", "=", "inited_matrix", "[", "0", "]", "[", "0", "]", "unhandled", "=", "{", "'data'", ":", "[", "]", ",", "'desc'", ":", "[", "]", "}", "length", "=", "l", ".", "__len_...
34.072727
12.836364
def get_related_node(self, node, relation): """Looks for an edge from node to some other node, such that the edge is annotated with the given relation. If there exists such an edge, returns the name of the node it points to. Otherwise, returns None.""" G = self.G for edge in G.edges(node): to = edge[1] to_relation = G.edges[node, to]['relation'] if to_relation == relation: return to return None
[ "def", "get_related_node", "(", "self", ",", "node", ",", "relation", ")", ":", "G", "=", "self", ".", "G", "for", "edge", "in", "G", ".", "edges", "(", "node", ")", ":", "to", "=", "edge", "[", "1", "]", "to_relation", "=", "G", ".", "edges", ...
40.583333
14.5
def strip_cols(self): """ Remove leading and trailing white spaces in columns names :example: ``ds.strip_cols()`` """ cols = {} skipped = [] for col in self.df.columns.values: try: cols[col] = col.strip() except Exception: skipped.append(str(col)) self.df = self.df.rename(columns=cols) self.ok("White spaces removed in columns names") if len(skipped) > 0: self.info("Skipped columns", ','.join( skipped), "while removing white spaces")
[ "def", "strip_cols", "(", "self", ")", ":", "cols", "=", "{", "}", "skipped", "=", "[", "]", "for", "col", "in", "self", ".", "df", ".", "columns", ".", "values", ":", "try", ":", "cols", "[", "col", "]", "=", "col", ".", "strip", "(", ")", "...
32.444444
12.666667
def prepare_data(fm, max_back, dur_cap=700): ''' Computes angle and length differences up to given order and deletes suspiciously long fixations. Input fm: Fixmat Fixmat for which to comput angle and length differences max_back: Int Computes delta angle and amplitude up to order max_back. dur_cap: Int Longest allowed fixation duration Output fm: Fixmat Filtered fixmat that aligns to the other outputs. durations: ndarray Duration for each fixation in fm forward_angle: Angle between previous and next saccade. ''' durations = np.roll(fm.end - fm.start, 1).astype(float) angles, lengths, ads, lds = anglendiff(fm, roll=max_back, return_abs=True) # durations and ads are aligned in a way that an entry in ads # encodes the angle of the saccade away from a fixation in # durations forward_angle = abs(reshift(ads[0])).astype(float) ads = [abs(reshift(a)) for a in ads] # Now filter out weird fixation durations id_in = durations > dur_cap durations[id_in] = np.nan forward_angle[id_in] = np.nan return fm, durations, forward_angle, ads, lds
[ "def", "prepare_data", "(", "fm", ",", "max_back", ",", "dur_cap", "=", "700", ")", ":", "durations", "=", "np", ".", "roll", "(", "fm", ".", "end", "-", "fm", ".", "start", ",", "1", ")", ".", "astype", "(", "float", ")", "angles", ",", "lengths...
35.352941
20.176471
def get_object(self, name): """Retrieve an object by a dotted name relative to the model.""" parts = name.split(".") space = self.spaces[parts.pop(0)] if parts: return space.get_object(".".join(parts)) else: return space
[ "def", "get_object", "(", "self", ",", "name", ")", ":", "parts", "=", "name", ".", "split", "(", "\".\"", ")", "space", "=", "self", ".", "spaces", "[", "parts", ".", "pop", "(", "0", ")", "]", "if", "parts", ":", "return", "space", ".", "get_ob...
34.625
12.625
def malloc(self, dwSize, lpAddress = None): """ Allocates memory into the address space of the process. @see: L{free} @type dwSize: int @param dwSize: Number of bytes to allocate. @type lpAddress: int @param lpAddress: (Optional) Desired address for the newly allocated memory. This is only a hint, the memory could still be allocated somewhere else. @rtype: int @return: Address of the newly allocated memory. @raise WindowsError: On error an exception is raised. """ hProcess = self.get_handle(win32.PROCESS_VM_OPERATION) return win32.VirtualAllocEx(hProcess, lpAddress, dwSize)
[ "def", "malloc", "(", "self", ",", "dwSize", ",", "lpAddress", "=", "None", ")", ":", "hProcess", "=", "self", ".", "get_handle", "(", "win32", ".", "PROCESS_VM_OPERATION", ")", "return", "win32", ".", "VirtualAllocEx", "(", "hProcess", ",", "lpAddress", "...
32.136364
21.227273
def connect(self, host, port): '''Connect to the provided host, port''' conn = connection.Connection(host, port, reconnection_backoff=self._reconnection_backoff, auth_secret=self._auth_secret, timeout=self._connect_timeout, **self._identify_options) if conn.alive(): conn.setblocking(0) self.add(conn) return conn
[ "def", "connect", "(", "self", ",", "host", ",", "port", ")", ":", "conn", "=", "connection", ".", "Connection", "(", "host", ",", "port", ",", "reconnection_backoff", "=", "self", ".", "_reconnection_backoff", ",", "auth_secret", "=", "self", ".", "_auth_...
36.636364
10.636364
def run(self, view, submitters, commenters): """Run stats and return the created Submission.""" logger.info('Analyzing subreddit: {}'.format(self.subreddit)) if view in TOP_VALUES: callback = self.fetch_top_submissions else: callback = self.fetch_recent_submissions view = int(view) self.fetch_submissions(callback, view) if not self.submissions: logger.warning('No submissions were found.') return return self.publish_results(view, submitters, commenters)
[ "def", "run", "(", "self", ",", "view", ",", "submitters", ",", "commenters", ")", ":", "logger", ".", "info", "(", "'Analyzing subreddit: {}'", ".", "format", "(", "self", ".", "subreddit", ")", ")", "if", "view", "in", "TOP_VALUES", ":", "callback", "=...
35
18.75
def insert_many(self, doc_or_docs, **kwargs): """Insert method """ check = kwargs.pop('check', True) if check is True: for i in doc_or_docs: i = self._valid_record(i) return self.__collect.insert_many(doc_or_docs, **kwargs)
[ "def", "insert_many", "(", "self", ",", "doc_or_docs", ",", "*", "*", "kwargs", ")", ":", "check", "=", "kwargs", ".", "pop", "(", "'check'", ",", "True", ")", "if", "check", "is", "True", ":", "for", "i", "in", "doc_or_docs", ":", "i", "=", "self"...
31.555556
10.333333
def _escape(self, text): """Escape text according to self.escape""" ret = EMPTYSTRING if text is None else str(text) if self.escape: return html_escape(ret) else: return ret
[ "def", "_escape", "(", "self", ",", "text", ")", ":", "ret", "=", "EMPTYSTRING", "if", "text", "is", "None", "else", "str", "(", "text", ")", "if", "self", ".", "escape", ":", "return", "html_escape", "(", "ret", ")", "else", ":", "return", "ret" ]
31.857143
14.142857
def _parse_properties(response, result_class): ''' Extracts out resource properties and metadata information. Ignores the standard http headers. ''' if response is None or response.headers is None: return None props = result_class() for key, value in response.headers.items(): info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key) if info: if info[0] is None: setattr(props, info[1], info[2](value)) else: attr = getattr(props, info[0]) setattr(attr, info[1], info[2](value)) if hasattr(props, 'blob_type') and props.blob_type == 'PageBlob' and hasattr(props, 'blob_tier') and props.blob_tier is not None: props.blob_tier = _to_upper_str(props.blob_tier) return props
[ "def", "_parse_properties", "(", "response", ",", "result_class", ")", ":", "if", "response", "is", "None", "or", "response", ".", "headers", "is", "None", ":", "return", "None", "props", "=", "result_class", "(", ")", "for", "key", ",", "value", "in", "...
35.454545
23
def fix_e125(self, result): """Fix indentation undistinguish from the next logical line.""" num_indent_spaces = int(result['info'].split()[1]) line_index = result['line'] - 1 target = self.source[line_index] spaces_to_add = num_indent_spaces - len(_get_indentation(target)) indent = len(_get_indentation(target)) modified_lines = [] while len(_get_indentation(self.source[line_index])) >= indent: self.source[line_index] = (' ' * spaces_to_add + self.source[line_index]) modified_lines.append(1 + line_index) # Line indexed at 1. line_index -= 1 return modified_lines
[ "def", "fix_e125", "(", "self", ",", "result", ")", ":", "num_indent_spaces", "=", "int", "(", "result", "[", "'info'", "]", ".", "split", "(", ")", "[", "1", "]", ")", "line_index", "=", "result", "[", "'line'", "]", "-", "1", "target", "=", "self...
41.294118
19.588235
def is_total_slice(item, shape): """Determine whether `item` specifies a complete slice of array with the given `shape`. Used to optimize __setitem__ operations on the Chunk class.""" # N.B., assume shape is normalized if item == Ellipsis: return True if item == slice(None): return True if isinstance(item, slice): item = item, if isinstance(item, tuple): return all( (isinstance(s, slice) and ((s == slice(None)) or ((s.stop - s.start == l) and (s.step in [1, None])))) for s, l in zip(item, shape) ) else: raise TypeError('expected slice or tuple of slices, found %r' % item)
[ "def", "is_total_slice", "(", "item", ",", "shape", ")", ":", "# N.B., assume shape is normalized", "if", "item", "==", "Ellipsis", ":", "return", "True", "if", "item", "==", "slice", "(", "None", ")", ":", "return", "True", "if", "isinstance", "(", "item", ...
31.818182
17.5
def attach_parser(subparser): """Given a subparser, build and return the server parser.""" return subparser.add_parser( 'server', help='Run a bottle based server', parents=[ CONFIG.build_parser( add_help=False, # might need conflict_handler ), ], )
[ "def", "attach_parser", "(", "subparser", ")", ":", "return", "subparser", ".", "add_parser", "(", "'server'", ",", "help", "=", "'Run a bottle based server'", ",", "parents", "=", "[", "CONFIG", ".", "build_parser", "(", "add_help", "=", "False", ",", "# migh...
28.083333
14.916667
def _getuie(self): """Return data as unsigned interleaved exponential-Golomb code. Raises InterpretError if bitstring is not a single exponential-Golomb code. """ try: value, newpos = self._readuie(0) if value is None or newpos != self.len: raise ReadError except ReadError: raise InterpretError("Bitstring is not a single interleaved exponential-Golomb code.") return value
[ "def", "_getuie", "(", "self", ")", ":", "try", ":", "value", ",", "newpos", "=", "self", ".", "_readuie", "(", "0", ")", "if", "value", "is", "None", "or", "newpos", "!=", "self", ".", "len", ":", "raise", "ReadError", "except", "ReadError", ":", ...
35.692308
22.307692
def apply(self, doc, clear, **kwargs): """Extract mentions from the given Document. :param doc: A document to process. :param clear: Whether or not to clear the existing database entries. """ # Reattach doc with the current session or DetachedInstanceError happens doc = self.session.merge(doc) # Iterate over each mention class for i, mention_class in enumerate(self.mention_classes): tc_to_insert = defaultdict(list) # Generate TemporaryContexts that are children of the context using # the mention_space and filtered by the Matcher self.child_context_set.clear() for tc in self.matchers[i].apply(self.mention_spaces[i].apply(doc)): rec = tc._load_id_or_insert(self.session) if rec: tc_to_insert[tc._get_table()].append(rec) self.child_context_set.add(tc) # Bulk insert temporary contexts for table, records in tc_to_insert.items(): stmt = insert(table.__table__).values(records) self.session.execute(stmt) # Generates and persists mentions mention_args = {"document_id": doc.id} for child_context in self.child_context_set: # Assemble mention arguments for arg_name in mention_class.__argnames__: mention_args[arg_name + "_id"] = child_context.id # Checking for existence if not clear: q = select([mention_class.id]) for key, value in list(mention_args.items()): q = q.where(getattr(mention_class, key) == value) mention_id = self.session.execute(q).first() if mention_id is not None: continue # Add Mention to session yield mention_class(**mention_args)
[ "def", "apply", "(", "self", ",", "doc", ",", "clear", ",", "*", "*", "kwargs", ")", ":", "# Reattach doc with the current session or DetachedInstanceError happens", "doc", "=", "self", ".", "session", ".", "merge", "(", "doc", ")", "# Iterate over each mention clas...
44.272727
17.431818
def get_queryset(self): """ Override :meth:``get_queryset`` """ queryset = super(MultipleIDMixin, self).get_queryset() if hasattr(self.request, 'query_params'): ids = dict(self.request.query_params).get('ids[]') else: ids = dict(self.request.QUERY_PARAMS).get('ids[]') if ids: queryset = queryset.filter(id__in=ids) return queryset
[ "def", "get_queryset", "(", "self", ")", ":", "queryset", "=", "super", "(", "MultipleIDMixin", ",", "self", ")", ".", "get_queryset", "(", ")", "if", "hasattr", "(", "self", ".", "request", ",", "'query_params'", ")", ":", "ids", "=", "dict", "(", "se...
35
14.333333
def _format_help(self, scope_info): """Return a help message for the options registered on this object. Assumes that self._help_request is an instance of OptionsHelp. :param scope_info: Scope of the options. """ scope = scope_info.scope description = scope_info.description show_recursive = self._help_request.advanced show_advanced = self._help_request.advanced color = sys.stdout.isatty() help_formatter = HelpFormatter(scope, show_recursive, show_advanced, color) return '\n'.join(help_formatter.format_options(scope, description, self._options.get_parser(scope).option_registrations_iter()))
[ "def", "_format_help", "(", "self", ",", "scope_info", ")", ":", "scope", "=", "scope_info", ".", "scope", "description", "=", "scope_info", ".", "description", "show_recursive", "=", "self", ".", "_help_request", ".", "advanced", "show_advanced", "=", "self", ...
42.333333
16.6
def rank(self): """ Return the rank of the given hypergraph. @rtype: int @return: Rank of graph. """ max_rank = 0 for each in self.hyperedges(): if len(self.edge_links[each]) > max_rank: max_rank = len(self.edge_links[each]) return max_rank
[ "def", "rank", "(", "self", ")", ":", "max_rank", "=", "0", "for", "each", "in", "self", ".", "hyperedges", "(", ")", ":", "if", "len", "(", "self", ".", "edge_links", "[", "each", "]", ")", ">", "max_rank", ":", "max_rank", "=", "len", "(", "sel...
25.357143
15.357143
def to_etree(self): """ creates an etree element of a ``SaltEdge`` that mimicks a SaltXMI <edges> element """ layers_attrib_val = ' '.join('//@layers.{}'.format(layer_id) for layer_id in self.layers) attribs = { '{{{pre}}}type'.format(pre=NAMESPACES['xsi']): self.xsi_type, 'source': "//@nodes.{}".format(self.source), 'target': "//@nodes.{}".format(self.target), 'layers': layers_attrib_val} # an edge might belong to one or more layers non_empty_attribs = {key: val for (key, val) in attribs.items() if val is not None} E = ElementMaker() edge = E('edges', non_empty_attribs) label_elements = (label.to_etree() for label in self.labels) edge.extend(label_elements) return edge
[ "def", "to_etree", "(", "self", ")", ":", "layers_attrib_val", "=", "' '", ".", "join", "(", "'//@layers.{}'", ".", "format", "(", "layer_id", ")", "for", "layer_id", "in", "self", ".", "layers", ")", "attribs", "=", "{", "'{{{pre}}}type'", ".", "format", ...
39.863636
18.772727
def PauliY(local_space, states=None): r""" Pauli-type Y-operator .. math:: \hat{\sigma}_x = \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix} on an arbitrary two-level system. See :func:`PauliX` """ local_space, states = _get_pauli_args(local_space, states) g, e = states return I * (-LocalSigma.create(g, e, hs=local_space) + LocalSigma.create(e, g, hs=local_space))
[ "def", "PauliY", "(", "local_space", ",", "states", "=", "None", ")", ":", "local_space", ",", "states", "=", "_get_pauli_args", "(", "local_space", ",", "states", ")", "g", ",", "e", "=", "states", "return", "I", "*", "(", "-", "LocalSigma", ".", "cre...
23.052632
20.578947
def averageSequenceAccuracy(self, minOverlap, maxOverlap, firstStat=0, lastStat=None): """ For each object, decide whether the TM uniquely classified it by checking that the number of predictedActive cells are in an acceptable range. """ numCorrectSparsity = 0.0 numCorrectClassifications = 0.0 numStats = 0.0 # For each object or sequence we classify every point or element # # A sequence element is considered correctly classified only if the number # of predictedActive cells is within a reasonable range and if the KNN # Classifier correctly classifies the active cell representation as # belonging to this sequence. # # A point on an object is considered correctly classified by the TM if the # number of predictedActive cells is within range. for stats in self.statistics[firstStat:lastStat]: # Keep running total of how often the number of predictedActive cells are # in the range. We always skip the first (unpredictable) count. predictedActiveStat = stats["TM PredictedActive C0"][1:] TMRepresentationStat = stats["TM Full Representation C0"][1:] # print "\n-----------" # print stats["object"], predictedActiveStat for numCells,sdr in zip(predictedActiveStat, TMRepresentationStat): numStats += 1.0 # print "numCells: ", numCells if numCells in range(minOverlap, maxOverlap + 1): numCorrectSparsity += 1.0 # Check KNN Classifier sdr = list(sdr) sdr.sort() dense = numpy.zeros(self.numTMCells) dense[sdr] = 1.0 (winner, inferenceResult, dist, categoryDist) = \ self.classifier.infer(dense) # print sdr, winner, stats['object'], winner == stats['object'] # print categoryDist # print if winner == stats['object']: numCorrectClassifications += 1.0 if numStats==0: return 0.0, 0.0 return ((numCorrectSparsity / numStats), (numCorrectClassifications / numStats) )
[ "def", "averageSequenceAccuracy", "(", "self", ",", "minOverlap", ",", "maxOverlap", ",", "firstStat", "=", "0", ",", "lastStat", "=", "None", ")", ":", "numCorrectSparsity", "=", "0.0", "numCorrectClassifications", "=", "0.0", "numStats", "=", "0.0", "# For eac...
39.384615
20.730769
def headloss_fric_rect(FlowRate, Width, DistCenter, Length, Nu, PipeRough, openchannel): """Return the major head loss due to wall shear in a rectangular channel. This equation applies to both laminar and turbulent flows. """ #Checking input validity - inputs not checked here are checked by #functions this function calls. ut.check_range([Length, ">0", "Length"]) return (fric_rect(FlowRate, Width, DistCenter, Nu, PipeRough, openchannel) * Length / (4 * radius_hydraulic(Width, DistCenter, openchannel).magnitude) * FlowRate**2 / (2 * gravity.magnitude * (Width*DistCenter)**2) )
[ "def", "headloss_fric_rect", "(", "FlowRate", ",", "Width", ",", "DistCenter", ",", "Length", ",", "Nu", ",", "PipeRough", ",", "openchannel", ")", ":", "#Checking input validity - inputs not checked here are checked by", "#functions this function calls.", "ut", ".", "che...
45.2
19.2
def load_time_data(self, RelativeChannelNo=None, SampleFreq=None, PointsToLoad=-1, NormaliseByMonitorOutput=False): """ Loads the time and voltage data and the wave description from the associated file. Parameters ---------- RelativeChannelNo : int, optional Channel number for loading saleae data files If loading a .dat file produced by the labview NI5122 daq card, used to specifiy the channel number if two channels where saved, if left None with .dat files it will assume that the file to load only contains one channel. If NormaliseByMonitorOutput is True then RelativeChannelNo specifies the monitor channel for loading a .dat file produced by the labview NI5122 daq card. SampleFreq : float, optional Manual selection of sample frequency for loading labview NI5122 daq files PointsToLoad : int, optional Number of first points to read. -1 means all points (i.e., the complete file) WORKS WITH NI5122 DATA SO FAR ONLY!!! NormaliseByMonitorOutput : bool, optional If True the particle signal trace will be divided by the monitor output, which is specified by the channel number set in the RelativeChannelNo parameter. WORKS WITH NI5122 DATA SO FAR ONLY!!! """ f = open(self.filepath, 'rb') raw = f.read() f.close() FileExtension = self.filepath.split('.')[-1] if FileExtension == "raw" or FileExtension == "trc": with _warnings.catch_warnings(): # supress missing data warning and raise a missing # data warning from optoanalysis with the filepath _warnings.simplefilter("ignore") waveDescription, timeParams, self.voltage, _, missingdata = optoanalysis.LeCroy.InterpretWaveform(raw, noTimeArray=True) if missingdata: _warnings.warn("Waveform not of expected length. File {} may be missing data.".format(self.filepath)) self.SampleFreq = (1 / waveDescription["HORIZ_INTERVAL"]) elif FileExtension == "bin": if RelativeChannelNo == None: raise ValueError("If loading a .bin file from the Saleae data logger you must enter a relative channel number to load") timeParams, self.voltage = optoanalysis.Saleae.interpret_waveform(raw, RelativeChannelNo) self.SampleFreq = 1/timeParams[2] elif FileExtension == "dat": #for importing a file written by labview using the NI5122 daq card if SampleFreq == None: raise ValueError("If loading a .dat file from the NI5122 daq card you must enter a SampleFreq") if RelativeChannelNo == None: self.voltage = _np.fromfile(self.filepath, dtype='>h',count=PointsToLoad) elif RelativeChannelNo != None: filedata = _np.fromfile(self.filepath, dtype='>h',count=PointsToLoad) if NormaliseByMonitorOutput == True: if RelativeChannelNo == 0: monitorsignal = filedata[:len(filedata):2] self.voltage = filedata[1:len(filedata):2]/monitorsignal elif RelativeChannelNo == 1: monitorsignal = filedata[1:len(filedata):2] self.voltage = filedata[:len(filedata):2]/monitorsignal elif NormaliseByMonitorOutput == False: self.voltage = filedata[RelativeChannelNo:len(filedata):2] timeParams = (0,(len(self.voltage)-1)/SampleFreq,1/SampleFreq) self.SampleFreq = 1/timeParams[2] elif FileExtension == "tdms": # for importing a file written by labview form the NI7961 FPGA with the RecordDataPC VI if SampleFreq == None: raise ValueError("If loading a .tdms file saved from the FPGA you must enter a SampleFreq") self.SampleFreq = SampleFreq dt = 1/self.SampleFreq FIFO_SIZE = 262143 # this is the maximum size of the DMA FIFO on the NI 7961 FPGA with the NI 5781 DAC card tdms_file = _TdmsFile(self.filepath) channel = tdms_file.object('Measured_Data', 'data') data = channel.data[FIFO_SIZE:] # dump first 1048575 points of data # as this is the values that had already filled the buffer # from before when the record code started running volts_per_unit = 2/(2**14) self.voltage = volts_per_unit*data timeParams = [0, (data.shape[0]-1)*dt, dt] elif FileExtension == 'txt': # .txt file created by LeCroy Oscilloscope data = [] with open(self.filepath, 'r') as csvfile: reader = csv.reader(csvfile) for row in reader: data.append(row) data = _np.array(data[5:]).astype(float).transpose() t0 = data[0][0] tend = data[0][-1] dt = data[0][1] - data[0][0] self.SampleFreq = 1/dt self.voltage = data[1] del(data) timeParams = [t0, tend, dt] else: raise ValueError("Filetype not supported") startTime, endTime, Timestep = timeParams self.timeStart = startTime self.timeEnd = endTime self.timeStep = Timestep self.time = frange(startTime, endTime+Timestep, Timestep) return None
[ "def", "load_time_data", "(", "self", ",", "RelativeChannelNo", "=", "None", ",", "SampleFreq", "=", "None", ",", "PointsToLoad", "=", "-", "1", ",", "NormaliseByMonitorOutput", "=", "False", ")", ":", "f", "=", "open", "(", "self", ".", "filepath", ",", ...
57.957447
25.574468
def rm_anova2(dv=None, within=None, subject=None, data=None, export_filename=None): """Two-way repeated measures ANOVA. This is an internal function. The main call to this function should be done by the :py:func:`pingouin.rm_anova` function. Parameters ---------- dv : string Name of column containing the dependant variable. within : list Names of column containing the two within factor (e.g. ['Time', 'Treatment']) subject : string Name of column containing the subject identifier. data : pandas DataFrame DataFrame export_filename : string Filename (without extension) for the output file. If None, do not export the table. By default, the file will be created in the current python console directory. To change that, specify the filename with full path. Returns ------- aov : DataFrame ANOVA summary :: 'Source' : Name of the within-group factors 'ddof1' : Degrees of freedom (numerator) 'ddof2' : Degrees of freedom (denominator) 'F' : F-value 'p-unc' : Uncorrected p-value 'np2' : Partial eta-square effect size 'eps' : Greenhouse-Geisser epsilon factor (= index of sphericity) 'p-GG-corr' : Greenhouse-Geisser corrected p-value """ a, b = within # Validate the dataframe _check_dataframe(dv=dv, within=within, data=data, subject=subject, effects='within') # Remove NaN if data[[subject, a, b, dv]].isnull().any().any(): data = remove_rm_na(dv=dv, subject=subject, within=[a, b], data=data[[subject, a, b, dv]]) # Collapse to the mean (that this is also done in remove_rm_na) data = data.groupby([subject, a, b]).mean().reset_index() assert not data[a].isnull().any(), 'Cannot have NaN in %s' % a assert not data[b].isnull().any(), 'Cannot have NaN in %s' % b assert not data[subject].isnull().any(), 'Cannot have NaN in %s' % subject # Group sizes and grandmean n_a = data[a].nunique() n_b = data[b].nunique() n_s = data[subject].nunique() mu = data[dv].mean() # Groupby means grp_s = data.groupby(subject)[dv].mean() grp_a = data.groupby([a])[dv].mean() grp_b = data.groupby([b])[dv].mean() grp_ab = data.groupby([a, b])[dv].mean() grp_as = data.groupby([a, subject])[dv].mean() grp_bs = data.groupby([b, subject])[dv].mean() # Sums of squares ss_tot = np.sum((data[dv] - mu)**2) ss_s = (n_a * n_b) * np.sum((grp_s - mu)**2) ss_a = (n_b * n_s) * np.sum((grp_a - mu)**2) ss_b = (n_a * n_s) * np.sum((grp_b - mu)**2) ss_ab_er = n_s * np.sum((grp_ab - mu)**2) ss_ab = ss_ab_er - ss_a - ss_b ss_as_er = n_b * np.sum((grp_as - mu)**2) ss_as = ss_as_er - ss_s - ss_a ss_bs_er = n_a * np.sum((grp_bs - mu)**2) ss_bs = ss_bs_er - ss_s - ss_b ss_abs = ss_tot - ss_a - ss_b - ss_s - ss_ab - ss_as - ss_bs # DOF df_a = n_a - 1 df_b = n_b - 1 df_s = n_s - 1 df_ab_er = n_a * n_b - 1 df_ab = df_ab_er - df_a - df_b df_as_er = n_a * n_s - 1 df_as = df_as_er - df_s - df_a df_bs_er = n_b * n_s - 1 df_bs = df_bs_er - df_s - df_b df_tot = n_a * n_b * n_s - 1 df_abs = df_tot - df_a - df_b - df_s - df_ab - df_as - df_bs # Mean squares ms_a = ss_a / df_a ms_b = ss_b / df_b ms_ab = ss_ab / df_ab ms_as = ss_as / df_as ms_bs = ss_bs / df_bs ms_abs = ss_abs / df_abs # F-values f_a = ms_a / ms_as f_b = ms_b / ms_bs f_ab = ms_ab / ms_abs # P-values p_a = f(df_a, df_as).sf(f_a) p_b = f(df_b, df_bs).sf(f_b) p_ab = f(df_ab, df_abs).sf(f_ab) # Partial eta-square eta_a = (f_a * df_a) / (f_a * df_a + df_as) eta_b = (f_b * df_b) / (f_b * df_b + df_bs) eta_ab = (f_ab * df_ab) / (f_ab * df_ab + df_abs) # Epsilon piv_a = data.pivot_table(index=subject, columns=a, values=dv) piv_b = data.pivot_table(index=subject, columns=b, values=dv) piv_ab = data.pivot_table(index=subject, columns=[a, b], values=dv) eps_a = epsilon(piv_a, correction='gg') eps_b = epsilon(piv_b, correction='gg') # For the interaction term we use the lower bound epsilon factor # (same behavior as described on real-statistics.com) # TODO: understand how the Greenhouse-Geisser epsilon is computed for # the interaction term. eps_ab = epsilon(piv_ab, correction='lb') # Greenhouse-Geisser correction df_a_c, df_as_c = [np.maximum(d * eps_a, 1.) for d in (df_a, df_as)] df_b_c, df_bs_c = [np.maximum(d * eps_b, 1.) for d in (df_b, df_bs)] df_ab_c, df_abs_c = [np.maximum(d * eps_ab, 1.) for d in (df_ab, df_abs)] p_a_corr = f(df_a_c, df_as_c).sf(f_a) p_b_corr = f(df_b_c, df_bs_c).sf(f_b) p_ab_corr = f(df_ab_c, df_abs_c).sf(f_ab) # Create dataframe aov = pd.DataFrame({'Source': [a, b, a + ' * ' + b], 'SS': [ss_a, ss_b, ss_ab], 'ddof1': [df_a, df_b, df_ab], 'ddof2': [df_as, df_bs, df_abs], 'MS': [ms_a, ms_b, ms_ab], 'F': [f_a, f_b, f_ab], 'p-unc': [p_a, p_b, p_ab], 'p-GG-corr': [p_a_corr, p_b_corr, p_ab_corr], 'np2': [eta_a, eta_b, eta_ab], 'eps': [eps_a, eps_b, eps_ab], }) col_order = ['Source', 'SS', 'ddof1', 'ddof2', 'MS', 'F', 'p-unc', 'p-GG-corr', 'np2', 'eps'] # Round aov[['SS', 'MS', 'F', 'eps', 'np2']] = aov[['SS', 'MS', 'F', 'eps', 'np2']].round(3) aov = aov.reindex(columns=col_order) # Export to .csv if export_filename is not None: _export_table(aov, export_filename) return aov
[ "def", "rm_anova2", "(", "dv", "=", "None", ",", "within", "=", "None", ",", "subject", "=", "None", ",", "data", "=", "None", ",", "export_filename", "=", "None", ")", ":", "a", ",", "b", "=", "within", "# Validate the dataframe", "_check_dataframe", "(...
35.170732
18.804878
def create_local_arrays_on_cube(cube, reified_arrays=None, array_stitch=None, array_factory=None): """ Function that creates arrays on the supplied hypercube, given the supplied reified_arrays dictionary and array_stitch and array_factory functions. Arguments --------- cube : HyperCube A hypercube object on which arrays will be created. Keyword Arguments ----------------- reified_arrays : dictionary Dictionary keyed on array name and array definitions. If None, obtained from cube.arrays(reify=True) array_stitch : function A function that stitches array objects onto the cube object. It's signature should be array_stitch(cube, arrays) where cube is a HyperCube object and arrays is a dictionary containing array objects keyed by their name. If None, a default function will be used that creates python descriptors associated with the individual array objects. array_factory : function A function that creates array objects. It's signature should be array_factory(shape, dtype) and should return a constructed array of the supplied shape and data type. If None, numpy.empty will be used. Returns ------- A dictionary of array objects, keyed on array names """ # Create a default array stitching method if array_stitch is None: array_stitch = generic_stitch # Get reified arrays from the cube if necessary if reified_arrays is None: reified_arrays = cube.arrays(reify=True) arrays = create_local_arrays(reified_arrays, array_factory=array_factory) array_stitch(cube, arrays) return arrays
[ "def", "create_local_arrays_on_cube", "(", "cube", ",", "reified_arrays", "=", "None", ",", "array_stitch", "=", "None", ",", "array_factory", "=", "None", ")", ":", "# Create a default array stitching method", "if", "array_stitch", "is", "None", ":", "array_stitch", ...
37.630435
23.804348
def clean_up(group, identifier, date): """Delete all of a groups local mbox, index, and state files. :type group: str :param group: group name :type identifier: str :param identifier: the identifier for the given group. :rtype: bool :returns: True """ #log.error('exception raised, cleaning up files.') glob_pat = '{g}.{d}.mbox*'.format(g=group, d=date) for f in glob(glob_pat): #log.error('removing {f}'.format(f=f)) try: os.remove(f) except OSError: continue glob_pat = '{id}_state.json'.format(id=identifier) for f in glob(glob_pat): #log.error('removing {f}'.format(f=f)) try: os.remove(f) except OSError: continue return True
[ "def", "clean_up", "(", "group", ",", "identifier", ",", "date", ")", ":", "#log.error('exception raised, cleaning up files.')", "glob_pat", "=", "'{g}.{d}.mbox*'", ".", "format", "(", "g", "=", "group", ",", "d", "=", "date", ")", "for", "f", "in", "glob", ...
26.137931
18.551724
def plot_result(x_p, y_p, y_p_e, smoothed_data, smoothed_data_diff, filename=None): ''' Fit spline to the profile histogramed data, differentiate, determine MPV and plot. Parameters ---------- x_p, y_p : array like data points (x,y) y_p_e : array like error bars in y ''' logging.info('Plot results') plt.close() p1 = plt.errorbar(x_p * analysis_configuration['vcal_calibration'], y_p, yerr=y_p_e, fmt='o') # plot data with error bars p2, = plt.plot(x_p * analysis_configuration['vcal_calibration'], smoothed_data, '-r') # plot smoothed data factor = np.amax(y_p) / np.amin(smoothed_data_diff) * 1.1 p3, = plt.plot(x_p * analysis_configuration['vcal_calibration'], factor * smoothed_data_diff, '-', lw=2) # plot differentiated data mpv_index = np.argmax(-analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=1)) p4, = plt.plot([x_p[mpv_index] * analysis_configuration['vcal_calibration'], x_p[mpv_index] * analysis_configuration['vcal_calibration']], [0, factor * smoothed_data_diff[mpv_index]], 'k-', lw=2) text = 'MPV ' + str(int(x_p[mpv_index] * analysis_configuration['vcal_calibration'])) + ' e' plt.text(1.01 * x_p[mpv_index] * analysis_configuration['vcal_calibration'], -10. * smoothed_data_diff[mpv_index], text, ha='left') plt.legend([p1, p2, p3, p4], ['data', 'smoothed spline', 'spline differentiation', text], prop={'size': 12}, loc=0) plt.title('\'Single hit cluster\'-occupancy for different pixel thresholds') plt.xlabel('Pixel threshold [e]') plt.ylabel('Single hit cluster occupancy [a.u.]') plt.ylim(0, np.amax(y_p) * 1.15) if filename is None: plt.show() else: filename.savefig(plt.gcf()) return smoothed_data_diff
[ "def", "plot_result", "(", "x_p", ",", "y_p", ",", "y_p_e", ",", "smoothed_data", ",", "smoothed_data_diff", ",", "filename", "=", "None", ")", ":", "logging", ".", "info", "(", "'Plot results'", ")", "plt", ".", "close", "(", ")", "p1", "=", "plt", "....
61.233333
41.1
def convert_coordinates(self): """ Convert coordinate string to objects """ coord_list = [] # strip out "null" elements, i.e. ''. It might be possible to eliminate # these some other way, i.e. with regex directly, but I don't know how. # We need to copy in order not to burn up the iterators elements = [x for x in regex_splitter.split(self.coord_str) if x] element_parsers = self.language_spec[self.region_type] for ii, (element, element_parser) in enumerate(zip(elements, element_parsers)): if element_parser is coordinate: unit = self.coordinate_units[self.coordsys][ii % 2] coord_list.append(element_parser(element, unit)) elif self.coordinate_units[self.coordsys][0] is u.dimensionless_unscaled: coord_list.append(element_parser(element, unit=u.dimensionless_unscaled)) else: coord_list.append(element_parser(element)) if self.region_type in ['ellipse', 'box'] and len(coord_list) % 2 == 1: coord_list[-1] = CoordinateParser.parse_angular_length_quantity(elements[len(coord_list)-1]) # Reset iterator for ellipse and annulus # Note that this cannot be done with copy.deepcopy on python2 if self.region_type in ['ellipse', 'annulus']: self.language_spec[self.region_type] = itertools.chain( (coordinate, coordinate), itertools.cycle((radius,))) self.coord = coord_list
[ "def", "convert_coordinates", "(", "self", ")", ":", "coord_list", "=", "[", "]", "# strip out \"null\" elements, i.e. ''. It might be possible to eliminate", "# these some other way, i.e. with regex directly, but I don't know how.", "# We need to copy in order not to burn up the iterators"...
52.466667
26.333333
def nativestring(val, encodings=None): """ Converts the inputted value to a native python string-type format. :param val | <variant> encodings | (<str>, ..) || None :sa decoded :return <unicode> || <str> """ # if it is already a native python string, don't do anything if type(val) in (bytes_type, unicode_type): return val # otherwise, attempt to return a decoded value try: return unicode_type(val) except StandardError: pass try: return bytes_type(val) except StandardError: return decoded(val)
[ "def", "nativestring", "(", "val", ",", "encodings", "=", "None", ")", ":", "# if it is already a native python string, don't do anything", "if", "type", "(", "val", ")", "in", "(", "bytes_type", ",", "unicode_type", ")", ":", "return", "val", "# otherwise, attempt ...
25
18.76