code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def atq(tag=None): ''' List all queued and running jobs or only those with an optional 'tag'. CLI Example: .. code-block:: bash salt '*' at.atq salt '*' at.atq [tag] salt '*' at.atq [job number] ''' jobs = [] # Shim to produce output similar to what __virtual__() should do # but __salt__ isn't available in __virtual__() # Tested on CentOS 5.8 if __grains__['os_family'] == 'RedHat': output = _cmd('at', '-l') else: output = _cmd('atq') if output is None: return '\'at.atq\' is not available.' # No jobs so return if output == '': return {'jobs': jobs} # Jobs created with at.at() will use the following # comment to denote a tagged job. job_kw_regex = re.compile(r'^### SALT: (\w+)') # Split each job into a dictionary and handle # pulling out tags or only listing jobs with a certain # tag for line in output.splitlines(): job_tag = '' # Redhat/CentOS if __grains__['os_family'] == 'RedHat': job, spec = line.split('\t') specs = spec.split() elif __grains__['os'] == 'OpenBSD': if line.startswith(' Rank'): continue else: tmp = line.split() timestr = ' '.join(tmp[1:5]) job = tmp[6] specs = datetime.datetime(*(time.strptime(timestr, '%b %d, %Y ' '%H:%M')[0:5])).isoformat().split('T') specs.append(tmp[7]) specs.append(tmp[5]) elif __grains__['os'] == 'FreeBSD': if line.startswith('Date'): continue else: tmp = line.split() timestr = ' '.join(tmp[1:6]) job = tmp[8] specs = datetime.datetime(*(time.strptime(timestr, '%b %d %H:%M:%S %Z %Y')[0:5])).isoformat().split('T') specs.append(tmp[7]) specs.append(tmp[6]) else: job, spec = line.split('\t') tmp = spec.split() timestr = ' '.join(tmp[0:5]) specs = datetime.datetime(*(time.strptime(timestr) [0:5])).isoformat().split('T') specs.append(tmp[5]) specs.append(tmp[6]) # Search for any tags atc_out = _cmd('at', '-c', job) for line in atc_out.splitlines(): tmp = job_kw_regex.match(line) if tmp: job_tag = tmp.groups()[0] if __grains__['os'] in BSD: job = six.text_type(job) else: job = int(job) # If a tag is supplied, only list jobs with that tag if tag: # TODO: Looks like there is a difference between salt and salt-call # If I don't wrap job in an int(), it fails on salt but works on # salt-call. With the int(), it fails with salt-call but not salt. if tag == job_tag or tag == job: jobs.append({'job': job, 'date': specs[0], 'time': specs[1], 'queue': specs[2], 'user': specs[3], 'tag': job_tag}) else: jobs.append({'job': job, 'date': specs[0], 'time': specs[1], 'queue': specs[2], 'user': specs[3], 'tag': job_tag}) return {'jobs': jobs}
def function[atq, parameter[tag]]: constant[ List all queued and running jobs or only those with an optional 'tag'. CLI Example: .. code-block:: bash salt '*' at.atq salt '*' at.atq [tag] salt '*' at.atq [job number] ] variable[jobs] assign[=] list[[]] if compare[call[name[__grains__]][constant[os_family]] equal[==] constant[RedHat]] begin[:] variable[output] assign[=] call[name[_cmd], parameter[constant[at], constant[-l]]] if compare[name[output] is constant[None]] begin[:] return[constant['at.atq' is not available.]] if compare[name[output] equal[==] constant[]] begin[:] return[dictionary[[<ast.Constant object at 0x7da1b2139090>], [<ast.Name object at 0x7da1b2139600>]]] variable[job_kw_regex] assign[=] call[name[re].compile, parameter[constant[^### SALT: (\w+)]]] for taget[name[line]] in starred[call[name[output].splitlines, parameter[]]] begin[:] variable[job_tag] assign[=] constant[] if compare[call[name[__grains__]][constant[os_family]] equal[==] constant[RedHat]] begin[:] <ast.Tuple object at 0x7da1b213aec0> assign[=] call[name[line].split, parameter[constant[ ]]] variable[specs] assign[=] call[name[spec].split, parameter[]] variable[atc_out] assign[=] call[name[_cmd], parameter[constant[at], constant[-c], name[job]]] for taget[name[line]] in starred[call[name[atc_out].splitlines, parameter[]]] begin[:] variable[tmp] assign[=] call[name[job_kw_regex].match, parameter[name[line]]] if name[tmp] begin[:] variable[job_tag] assign[=] call[call[name[tmp].groups, parameter[]]][constant[0]] if compare[call[name[__grains__]][constant[os]] in name[BSD]] begin[:] variable[job] assign[=] call[name[six].text_type, parameter[name[job]]] if name[tag] begin[:] if <ast.BoolOp object at 0x7da1b1f48f40> begin[:] call[name[jobs].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1f49090>, <ast.Constant object at 0x7da1b1f49150>, <ast.Constant object at 0x7da1b1f48070>, <ast.Constant object at 0x7da1b1f4bf10>, <ast.Constant object at 0x7da1b1f487f0>, <ast.Constant object at 0x7da1b1f48d30>], [<ast.Name object at 0x7da1b1f492a0>, <ast.Subscript object at 0x7da1b1f487c0>, <ast.Subscript object at 0x7da1b1f48c70>, <ast.Subscript object at 0x7da1b1f494b0>, <ast.Subscript object at 0x7da1b1f48e80>, <ast.Name object at 0x7da1b1f48b20>]]]] return[dictionary[[<ast.Constant object at 0x7da1b21a1690>], [<ast.Name object at 0x7da1b21a1990>]]]
keyword[def] identifier[atq] ( identifier[tag] = keyword[None] ): literal[string] identifier[jobs] =[] keyword[if] identifier[__grains__] [ literal[string] ]== literal[string] : identifier[output] = identifier[_cmd] ( literal[string] , literal[string] ) keyword[else] : identifier[output] = identifier[_cmd] ( literal[string] ) keyword[if] identifier[output] keyword[is] keyword[None] : keyword[return] literal[string] keyword[if] identifier[output] == literal[string] : keyword[return] { literal[string] : identifier[jobs] } identifier[job_kw_regex] = identifier[re] . identifier[compile] ( literal[string] ) keyword[for] identifier[line] keyword[in] identifier[output] . identifier[splitlines] (): identifier[job_tag] = literal[string] keyword[if] identifier[__grains__] [ literal[string] ]== literal[string] : identifier[job] , identifier[spec] = identifier[line] . identifier[split] ( literal[string] ) identifier[specs] = identifier[spec] . identifier[split] () keyword[elif] identifier[__grains__] [ literal[string] ]== literal[string] : keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): keyword[continue] keyword[else] : identifier[tmp] = identifier[line] . identifier[split] () identifier[timestr] = literal[string] . identifier[join] ( identifier[tmp] [ literal[int] : literal[int] ]) identifier[job] = identifier[tmp] [ literal[int] ] identifier[specs] = identifier[datetime] . identifier[datetime] (*( identifier[time] . identifier[strptime] ( identifier[timestr] , literal[string] literal[string] )[ literal[int] : literal[int] ])). identifier[isoformat] (). identifier[split] ( literal[string] ) identifier[specs] . identifier[append] ( identifier[tmp] [ literal[int] ]) identifier[specs] . identifier[append] ( identifier[tmp] [ literal[int] ]) keyword[elif] identifier[__grains__] [ literal[string] ]== literal[string] : keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): keyword[continue] keyword[else] : identifier[tmp] = identifier[line] . identifier[split] () identifier[timestr] = literal[string] . identifier[join] ( identifier[tmp] [ literal[int] : literal[int] ]) identifier[job] = identifier[tmp] [ literal[int] ] identifier[specs] = identifier[datetime] . identifier[datetime] (*( identifier[time] . identifier[strptime] ( identifier[timestr] , literal[string] )[ literal[int] : literal[int] ])). identifier[isoformat] (). identifier[split] ( literal[string] ) identifier[specs] . identifier[append] ( identifier[tmp] [ literal[int] ]) identifier[specs] . identifier[append] ( identifier[tmp] [ literal[int] ]) keyword[else] : identifier[job] , identifier[spec] = identifier[line] . identifier[split] ( literal[string] ) identifier[tmp] = identifier[spec] . identifier[split] () identifier[timestr] = literal[string] . identifier[join] ( identifier[tmp] [ literal[int] : literal[int] ]) identifier[specs] = identifier[datetime] . identifier[datetime] (*( identifier[time] . identifier[strptime] ( identifier[timestr] ) [ literal[int] : literal[int] ])). identifier[isoformat] (). identifier[split] ( literal[string] ) identifier[specs] . identifier[append] ( identifier[tmp] [ literal[int] ]) identifier[specs] . identifier[append] ( identifier[tmp] [ literal[int] ]) identifier[atc_out] = identifier[_cmd] ( literal[string] , literal[string] , identifier[job] ) keyword[for] identifier[line] keyword[in] identifier[atc_out] . identifier[splitlines] (): identifier[tmp] = identifier[job_kw_regex] . identifier[match] ( identifier[line] ) keyword[if] identifier[tmp] : identifier[job_tag] = identifier[tmp] . identifier[groups] ()[ literal[int] ] keyword[if] identifier[__grains__] [ literal[string] ] keyword[in] identifier[BSD] : identifier[job] = identifier[six] . identifier[text_type] ( identifier[job] ) keyword[else] : identifier[job] = identifier[int] ( identifier[job] ) keyword[if] identifier[tag] : keyword[if] identifier[tag] == identifier[job_tag] keyword[or] identifier[tag] == identifier[job] : identifier[jobs] . identifier[append] ({ literal[string] : identifier[job] , literal[string] : identifier[specs] [ literal[int] ], literal[string] : identifier[specs] [ literal[int] ], literal[string] : identifier[specs] [ literal[int] ], literal[string] : identifier[specs] [ literal[int] ], literal[string] : identifier[job_tag] }) keyword[else] : identifier[jobs] . identifier[append] ({ literal[string] : identifier[job] , literal[string] : identifier[specs] [ literal[int] ], literal[string] : identifier[specs] [ literal[int] ], literal[string] : identifier[specs] [ literal[int] ], literal[string] : identifier[specs] [ literal[int] ], literal[string] : identifier[job_tag] }) keyword[return] { literal[string] : identifier[jobs] }
def atq(tag=None): """ List all queued and running jobs or only those with an optional 'tag'. CLI Example: .. code-block:: bash salt '*' at.atq salt '*' at.atq [tag] salt '*' at.atq [job number] """ jobs = [] # Shim to produce output similar to what __virtual__() should do # but __salt__ isn't available in __virtual__() # Tested on CentOS 5.8 if __grains__['os_family'] == 'RedHat': output = _cmd('at', '-l') # depends on [control=['if'], data=[]] else: output = _cmd('atq') if output is None: return "'at.atq' is not available." # depends on [control=['if'], data=[]] # No jobs so return if output == '': return {'jobs': jobs} # depends on [control=['if'], data=[]] # Jobs created with at.at() will use the following # comment to denote a tagged job. job_kw_regex = re.compile('^### SALT: (\\w+)') # Split each job into a dictionary and handle # pulling out tags or only listing jobs with a certain # tag for line in output.splitlines(): job_tag = '' # Redhat/CentOS if __grains__['os_family'] == 'RedHat': (job, spec) = line.split('\t') specs = spec.split() # depends on [control=['if'], data=[]] elif __grains__['os'] == 'OpenBSD': if line.startswith(' Rank'): continue # depends on [control=['if'], data=[]] else: tmp = line.split() timestr = ' '.join(tmp[1:5]) job = tmp[6] specs = datetime.datetime(*time.strptime(timestr, '%b %d, %Y %H:%M')[0:5]).isoformat().split('T') specs.append(tmp[7]) specs.append(tmp[5]) # depends on [control=['if'], data=[]] elif __grains__['os'] == 'FreeBSD': if line.startswith('Date'): continue # depends on [control=['if'], data=[]] else: tmp = line.split() timestr = ' '.join(tmp[1:6]) job = tmp[8] specs = datetime.datetime(*time.strptime(timestr, '%b %d %H:%M:%S %Z %Y')[0:5]).isoformat().split('T') specs.append(tmp[7]) specs.append(tmp[6]) # depends on [control=['if'], data=[]] else: (job, spec) = line.split('\t') tmp = spec.split() timestr = ' '.join(tmp[0:5]) specs = datetime.datetime(*time.strptime(timestr)[0:5]).isoformat().split('T') specs.append(tmp[5]) specs.append(tmp[6]) # Search for any tags atc_out = _cmd('at', '-c', job) for line in atc_out.splitlines(): tmp = job_kw_regex.match(line) if tmp: job_tag = tmp.groups()[0] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] if __grains__['os'] in BSD: job = six.text_type(job) # depends on [control=['if'], data=[]] else: job = int(job) # If a tag is supplied, only list jobs with that tag if tag: # TODO: Looks like there is a difference between salt and salt-call # If I don't wrap job in an int(), it fails on salt but works on # salt-call. With the int(), it fails with salt-call but not salt. if tag == job_tag or tag == job: jobs.append({'job': job, 'date': specs[0], 'time': specs[1], 'queue': specs[2], 'user': specs[3], 'tag': job_tag}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: jobs.append({'job': job, 'date': specs[0], 'time': specs[1], 'queue': specs[2], 'user': specs[3], 'tag': job_tag}) # depends on [control=['for'], data=['line']] return {'jobs': jobs}
def _find_by_id(self, resource, _id, parent=None): """Find the document by Id. If parent is not provided then on routing exception try to find using search. """ def is_found(hit): if 'exists' in hit: hit['found'] = hit['exists'] return hit.get('found', False) args = self._es_args(resource) try: # set the parent if available if parent: args['parent'] = parent hit = self.elastic(resource).get(id=_id, **args) if not is_found(hit): return docs = self._parse_hits({'hits': {'hits': [hit]}}, resource) return docs.first() except elasticsearch.NotFoundError: return except elasticsearch.TransportError as tex: if tex.error == 'routing_missing_exception' or 'RoutingMissingException' in tex.error: # search for the item args = self._es_args(resource) query = {'query': {'bool': {'must': [{'term': {'_id': _id}}]}}} try: args['size'] = 1 hits = self.elastic(resource).search(body=query, **args) docs = self._parse_hits(hits, resource) return docs.first() except elasticsearch.NotFoundError: return
def function[_find_by_id, parameter[self, resource, _id, parent]]: constant[Find the document by Id. If parent is not provided then on routing exception try to find using search. ] def function[is_found, parameter[hit]]: if compare[constant[exists] in name[hit]] begin[:] call[name[hit]][constant[found]] assign[=] call[name[hit]][constant[exists]] return[call[name[hit].get, parameter[constant[found], constant[False]]]] variable[args] assign[=] call[name[self]._es_args, parameter[name[resource]]] <ast.Try object at 0x7da18ede6f20>
keyword[def] identifier[_find_by_id] ( identifier[self] , identifier[resource] , identifier[_id] , identifier[parent] = keyword[None] ): literal[string] keyword[def] identifier[is_found] ( identifier[hit] ): keyword[if] literal[string] keyword[in] identifier[hit] : identifier[hit] [ literal[string] ]= identifier[hit] [ literal[string] ] keyword[return] identifier[hit] . identifier[get] ( literal[string] , keyword[False] ) identifier[args] = identifier[self] . identifier[_es_args] ( identifier[resource] ) keyword[try] : keyword[if] identifier[parent] : identifier[args] [ literal[string] ]= identifier[parent] identifier[hit] = identifier[self] . identifier[elastic] ( identifier[resource] ). identifier[get] ( identifier[id] = identifier[_id] ,** identifier[args] ) keyword[if] keyword[not] identifier[is_found] ( identifier[hit] ): keyword[return] identifier[docs] = identifier[self] . identifier[_parse_hits] ({ literal[string] :{ literal[string] :[ identifier[hit] ]}}, identifier[resource] ) keyword[return] identifier[docs] . identifier[first] () keyword[except] identifier[elasticsearch] . identifier[NotFoundError] : keyword[return] keyword[except] identifier[elasticsearch] . identifier[TransportError] keyword[as] identifier[tex] : keyword[if] identifier[tex] . identifier[error] == literal[string] keyword[or] literal[string] keyword[in] identifier[tex] . identifier[error] : identifier[args] = identifier[self] . identifier[_es_args] ( identifier[resource] ) identifier[query] ={ literal[string] :{ literal[string] :{ literal[string] :[{ literal[string] :{ literal[string] : identifier[_id] }}]}}} keyword[try] : identifier[args] [ literal[string] ]= literal[int] identifier[hits] = identifier[self] . identifier[elastic] ( identifier[resource] ). identifier[search] ( identifier[body] = identifier[query] ,** identifier[args] ) identifier[docs] = identifier[self] . identifier[_parse_hits] ( identifier[hits] , identifier[resource] ) keyword[return] identifier[docs] . identifier[first] () keyword[except] identifier[elasticsearch] . identifier[NotFoundError] : keyword[return]
def _find_by_id(self, resource, _id, parent=None): """Find the document by Id. If parent is not provided then on routing exception try to find using search. """ def is_found(hit): if 'exists' in hit: hit['found'] = hit['exists'] # depends on [control=['if'], data=['hit']] return hit.get('found', False) args = self._es_args(resource) try: # set the parent if available if parent: args['parent'] = parent # depends on [control=['if'], data=[]] hit = self.elastic(resource).get(id=_id, **args) if not is_found(hit): return # depends on [control=['if'], data=[]] docs = self._parse_hits({'hits': {'hits': [hit]}}, resource) return docs.first() # depends on [control=['try'], data=[]] except elasticsearch.NotFoundError: return # depends on [control=['except'], data=[]] except elasticsearch.TransportError as tex: if tex.error == 'routing_missing_exception' or 'RoutingMissingException' in tex.error: # search for the item args = self._es_args(resource) query = {'query': {'bool': {'must': [{'term': {'_id': _id}}]}}} try: args['size'] = 1 hits = self.elastic(resource).search(body=query, **args) docs = self._parse_hits(hits, resource) return docs.first() # depends on [control=['try'], data=[]] except elasticsearch.NotFoundError: return # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['tex']]
def main(args): """first we need sorted genepreds""" cmd = ['sort',args.reference,'--gpd','--tempdir',args.tempdir,'--threads', str(args.threads),'-o',args.tempdir+'/ref.sorted.gpd'] sys.stderr.write(cmd+"\n") gpd_sort(cmd) cmd = ['sort',args.gpd,'--gpd','--tempdir',args.tempdir,'--threads', str(args.threads),'-o',args.tempdir+'/my.sorted.gpd'] sys.stderr.write(cmd+"\n") gpd_sort(cmd) rstream = GPDStream(open(args.tempdir+'/ref.sorted.gpd')) mstream = GPDStream(open(args.tempdir+'/my.sorted.gpd')) stream = MultiLocusStream([rstream,mstream]) of = sys.stdout if args.output != '-': if args.output[-3:] == '.gz': of = gzip.open(args.output,'w') else: of = open(args.output,'w') for locus_rng in stream: (rgpds, mgpds) = locus_rng.get_payload() if len(mgpds) == 0: continue sys.stderr.write(locus_rng.get_range_string()+" "+str(len(rgpds))+" "+str(len(mgpds))+" \r") ref_juncs = {} for ref in rgpds: ref_juncs[ref.get_junction_string()] = ref annotated = [] unannotated = [] annotated = [ref_juncs[x.get_junction_string()] for x in mgpds if x.get_exon_count() > 1 and x.get_junction_string() in ref_juncs] unannotated = [x for x in mgpds if x.get_exon_count() > 1 and x.get_junction_string() not in ref_juncs] # now unannotated needs an annotation. my_unannotated = [x for x in mgpds if x.get_exon_count() == 1] single_reference = [x for x in rgpds if x.get_exon_count() == 1] single_annotated = [] single_unannotated = [] #print len(single_reference) #print len(single_unannotated) for gpd in my_unannotated: overs = sorted([x for x in single_reference if x.overlap_size(gpd) > 0],\ key=lambda y: y.avg_mutual_coverage(gpd), reverse=True) if len(overs) > 0: single_annotated.append(overs[0]) else: single_unannotated.append(gpd) # now annotated and single_annotated are done unannotated += single_unannotated # now single or multi we need to annotated unanotated gene_annotated = [] no_annotation = [] for m in unannotated: overs = sorted([x for x in rgpds if x.overlap_size(m) > 0],\ key=lambda y: y.avg_mutual_coverage(m), reverse=True) if len(overs) > 0: gname = overs[0].value('gene_name') f = overs[0].get_gpd_line().rstrip().split("\t") f[0] = gname f[1] = str(uuid.uuid4()) g = GPD("\t".join(f)) gene_annotated.append(g) else: no_annotation.append(m) finished = [] # now we need to annotate no_annotation while len(no_annotation) > 0: m = no_annotation.pop(0) matched = False for i in range(0,len(finished)): if len([x for x in finished[i] if x.overlap_size(m) > 0]) > 0: finished[i].append(m) matched = True break if not matched: finished.append([m]) # now finished has gene groups original = [] for group in finished: gname = str(uuid.uuid4()) for member in group: tname = str(uuid.uuid4()) f = member.get_gpd_line().rstrip().split("\t") f[0] = gname f[1] = tname g = GPD("\t".join(f)) original.append(g) for gpd in original + annotated + single_annotated + gene_annotated: of.write(gpd.get_gpd_line()+"\n") of.close() sys.stderr.write("\n") # Temporary working directory step 3 of 3 - Cleanup if not args.specific_tempdir: rmtree(args.tempdir)
def function[main, parameter[args]]: constant[first we need sorted genepreds] variable[cmd] assign[=] list[[<ast.Constant object at 0x7da18dc05f30>, <ast.Attribute object at 0x7da18dc05db0>, <ast.Constant object at 0x7da18dc07be0>, <ast.Constant object at 0x7da18dc07370>, <ast.Attribute object at 0x7da18dc04bb0>, <ast.Constant object at 0x7da18dc06d70>, <ast.Call object at 0x7da18dc064a0>, <ast.Constant object at 0x7da18dc073d0>, <ast.BinOp object at 0x7da18dc04e80>]] call[name[sys].stderr.write, parameter[binary_operation[name[cmd] + constant[ ]]]] call[name[gpd_sort], parameter[name[cmd]]] variable[cmd] assign[=] list[[<ast.Constant object at 0x7da18dc050f0>, <ast.Attribute object at 0x7da18dc07f40>, <ast.Constant object at 0x7da18dc04070>, <ast.Constant object at 0x7da18dc07460>, <ast.Attribute object at 0x7da18dc05b40>, <ast.Constant object at 0x7da18dc047c0>, <ast.Call object at 0x7da18dc041c0>, <ast.Constant object at 0x7da18dc07190>, <ast.BinOp object at 0x7da18dc06ce0>]] call[name[sys].stderr.write, parameter[binary_operation[name[cmd] + constant[ ]]]] call[name[gpd_sort], parameter[name[cmd]]] variable[rstream] assign[=] call[name[GPDStream], parameter[call[name[open], parameter[binary_operation[name[args].tempdir + constant[/ref.sorted.gpd]]]]]] variable[mstream] assign[=] call[name[GPDStream], parameter[call[name[open], parameter[binary_operation[name[args].tempdir + constant[/my.sorted.gpd]]]]]] variable[stream] assign[=] call[name[MultiLocusStream], parameter[list[[<ast.Name object at 0x7da18dc06410>, <ast.Name object at 0x7da18dc04310>]]]] variable[of] assign[=] name[sys].stdout if compare[name[args].output not_equal[!=] constant[-]] begin[:] if compare[call[name[args].output][<ast.Slice object at 0x7da18dc054b0>] equal[==] constant[.gz]] begin[:] variable[of] assign[=] call[name[gzip].open, parameter[name[args].output, constant[w]]] for taget[name[locus_rng]] in starred[name[stream]] begin[:] <ast.Tuple object at 0x7da18dc05810> assign[=] call[name[locus_rng].get_payload, parameter[]] if compare[call[name[len], parameter[name[mgpds]]] equal[==] constant[0]] begin[:] continue call[name[sys].stderr.write, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[locus_rng].get_range_string, parameter[]] + constant[ ]] + call[name[str], parameter[call[name[len], parameter[name[rgpds]]]]]] + constant[ ]] + call[name[str], parameter[call[name[len], parameter[name[mgpds]]]]]] + constant[ ]]]] variable[ref_juncs] assign[=] dictionary[[], []] for taget[name[ref]] in starred[name[rgpds]] begin[:] call[name[ref_juncs]][call[name[ref].get_junction_string, parameter[]]] assign[=] name[ref] variable[annotated] assign[=] list[[]] variable[unannotated] assign[=] list[[]] variable[annotated] assign[=] <ast.ListComp object at 0x7da18dc05060> variable[unannotated] assign[=] <ast.ListComp object at 0x7da18dc05330> variable[my_unannotated] assign[=] <ast.ListComp object at 0x7da18dc071f0> variable[single_reference] assign[=] <ast.ListComp object at 0x7da18dc077f0> variable[single_annotated] assign[=] list[[]] variable[single_unannotated] assign[=] list[[]] for taget[name[gpd]] in starred[name[my_unannotated]] begin[:] variable[overs] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da18ede6bc0>]] if compare[call[name[len], parameter[name[overs]]] greater[>] constant[0]] begin[:] call[name[single_annotated].append, parameter[call[name[overs]][constant[0]]]] <ast.AugAssign object at 0x7da18ede6440> variable[gene_annotated] assign[=] list[[]] variable[no_annotation] assign[=] list[[]] for taget[name[m]] in starred[name[unannotated]] begin[:] variable[overs] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da18ede5540>]] if compare[call[name[len], parameter[name[overs]]] greater[>] constant[0]] begin[:] variable[gname] assign[=] call[call[name[overs]][constant[0]].value, parameter[constant[gene_name]]] variable[f] assign[=] call[call[call[call[name[overs]][constant[0]].get_gpd_line, parameter[]].rstrip, parameter[]].split, parameter[constant[ ]]] call[name[f]][constant[0]] assign[=] name[gname] call[name[f]][constant[1]] assign[=] call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]] variable[g] assign[=] call[name[GPD], parameter[call[constant[ ].join, parameter[name[f]]]]] call[name[gene_annotated].append, parameter[name[g]]] variable[finished] assign[=] list[[]] while compare[call[name[len], parameter[name[no_annotation]]] greater[>] constant[0]] begin[:] variable[m] assign[=] call[name[no_annotation].pop, parameter[constant[0]]] variable[matched] assign[=] constant[False] for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[finished]]]]]] begin[:] if compare[call[name[len], parameter[<ast.ListComp object at 0x7da18ede4d30>]] greater[>] constant[0]] begin[:] call[call[name[finished]][name[i]].append, parameter[name[m]]] variable[matched] assign[=] constant[True] break if <ast.UnaryOp object at 0x7da2054a6c80> begin[:] call[name[finished].append, parameter[list[[<ast.Name object at 0x7da2054a75e0>]]]] variable[original] assign[=] list[[]] for taget[name[group]] in starred[name[finished]] begin[:] variable[gname] assign[=] call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]] for taget[name[member]] in starred[name[group]] begin[:] variable[tname] assign[=] call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]] variable[f] assign[=] call[call[call[name[member].get_gpd_line, parameter[]].rstrip, parameter[]].split, parameter[constant[ ]]] call[name[f]][constant[0]] assign[=] name[gname] call[name[f]][constant[1]] assign[=] name[tname] variable[g] assign[=] call[name[GPD], parameter[call[constant[ ].join, parameter[name[f]]]]] call[name[original].append, parameter[name[g]]] for taget[name[gpd]] in starred[binary_operation[binary_operation[binary_operation[name[original] + name[annotated]] + name[single_annotated]] + name[gene_annotated]]] begin[:] call[name[of].write, parameter[binary_operation[call[name[gpd].get_gpd_line, parameter[]] + constant[ ]]]] call[name[of].close, parameter[]] call[name[sys].stderr.write, parameter[constant[ ]]] if <ast.UnaryOp object at 0x7da2054a7820> begin[:] call[name[rmtree], parameter[name[args].tempdir]]
keyword[def] identifier[main] ( identifier[args] ): literal[string] identifier[cmd] =[ literal[string] , identifier[args] . identifier[reference] , literal[string] , literal[string] , identifier[args] . identifier[tempdir] , literal[string] , identifier[str] ( identifier[args] . identifier[threads] ), literal[string] , identifier[args] . identifier[tempdir] + literal[string] ] identifier[sys] . identifier[stderr] . identifier[write] ( identifier[cmd] + literal[string] ) identifier[gpd_sort] ( identifier[cmd] ) identifier[cmd] =[ literal[string] , identifier[args] . identifier[gpd] , literal[string] , literal[string] , identifier[args] . identifier[tempdir] , literal[string] , identifier[str] ( identifier[args] . identifier[threads] ), literal[string] , identifier[args] . identifier[tempdir] + literal[string] ] identifier[sys] . identifier[stderr] . identifier[write] ( identifier[cmd] + literal[string] ) identifier[gpd_sort] ( identifier[cmd] ) identifier[rstream] = identifier[GPDStream] ( identifier[open] ( identifier[args] . identifier[tempdir] + literal[string] )) identifier[mstream] = identifier[GPDStream] ( identifier[open] ( identifier[args] . identifier[tempdir] + literal[string] )) identifier[stream] = identifier[MultiLocusStream] ([ identifier[rstream] , identifier[mstream] ]) identifier[of] = identifier[sys] . identifier[stdout] keyword[if] identifier[args] . identifier[output] != literal[string] : keyword[if] identifier[args] . identifier[output] [- literal[int] :]== literal[string] : identifier[of] = identifier[gzip] . identifier[open] ( identifier[args] . identifier[output] , literal[string] ) keyword[else] : identifier[of] = identifier[open] ( identifier[args] . identifier[output] , literal[string] ) keyword[for] identifier[locus_rng] keyword[in] identifier[stream] : ( identifier[rgpds] , identifier[mgpds] )= identifier[locus_rng] . identifier[get_payload] () keyword[if] identifier[len] ( identifier[mgpds] )== literal[int] : keyword[continue] identifier[sys] . identifier[stderr] . identifier[write] ( identifier[locus_rng] . identifier[get_range_string] ()+ literal[string] + identifier[str] ( identifier[len] ( identifier[rgpds] ))+ literal[string] + identifier[str] ( identifier[len] ( identifier[mgpds] ))+ literal[string] ) identifier[ref_juncs] ={} keyword[for] identifier[ref] keyword[in] identifier[rgpds] : identifier[ref_juncs] [ identifier[ref] . identifier[get_junction_string] ()]= identifier[ref] identifier[annotated] =[] identifier[unannotated] =[] identifier[annotated] =[ identifier[ref_juncs] [ identifier[x] . identifier[get_junction_string] ()] keyword[for] identifier[x] keyword[in] identifier[mgpds] keyword[if] identifier[x] . identifier[get_exon_count] ()> literal[int] keyword[and] identifier[x] . identifier[get_junction_string] () keyword[in] identifier[ref_juncs] ] identifier[unannotated] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[mgpds] keyword[if] identifier[x] . identifier[get_exon_count] ()> literal[int] keyword[and] identifier[x] . identifier[get_junction_string] () keyword[not] keyword[in] identifier[ref_juncs] ] identifier[my_unannotated] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[mgpds] keyword[if] identifier[x] . identifier[get_exon_count] ()== literal[int] ] identifier[single_reference] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[rgpds] keyword[if] identifier[x] . identifier[get_exon_count] ()== literal[int] ] identifier[single_annotated] =[] identifier[single_unannotated] =[] keyword[for] identifier[gpd] keyword[in] identifier[my_unannotated] : identifier[overs] = identifier[sorted] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[single_reference] keyword[if] identifier[x] . identifier[overlap_size] ( identifier[gpd] )> literal[int] ], identifier[key] = keyword[lambda] identifier[y] : identifier[y] . identifier[avg_mutual_coverage] ( identifier[gpd] ), identifier[reverse] = keyword[True] ) keyword[if] identifier[len] ( identifier[overs] )> literal[int] : identifier[single_annotated] . identifier[append] ( identifier[overs] [ literal[int] ]) keyword[else] : identifier[single_unannotated] . identifier[append] ( identifier[gpd] ) identifier[unannotated] += identifier[single_unannotated] identifier[gene_annotated] =[] identifier[no_annotation] =[] keyword[for] identifier[m] keyword[in] identifier[unannotated] : identifier[overs] = identifier[sorted] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[rgpds] keyword[if] identifier[x] . identifier[overlap_size] ( identifier[m] )> literal[int] ], identifier[key] = keyword[lambda] identifier[y] : identifier[y] . identifier[avg_mutual_coverage] ( identifier[m] ), identifier[reverse] = keyword[True] ) keyword[if] identifier[len] ( identifier[overs] )> literal[int] : identifier[gname] = identifier[overs] [ literal[int] ]. identifier[value] ( literal[string] ) identifier[f] = identifier[overs] [ literal[int] ]. identifier[get_gpd_line] (). identifier[rstrip] (). identifier[split] ( literal[string] ) identifier[f] [ literal[int] ]= identifier[gname] identifier[f] [ literal[int] ]= identifier[str] ( identifier[uuid] . identifier[uuid4] ()) identifier[g] = identifier[GPD] ( literal[string] . identifier[join] ( identifier[f] )) identifier[gene_annotated] . identifier[append] ( identifier[g] ) keyword[else] : identifier[no_annotation] . identifier[append] ( identifier[m] ) identifier[finished] =[] keyword[while] identifier[len] ( identifier[no_annotation] )> literal[int] : identifier[m] = identifier[no_annotation] . identifier[pop] ( literal[int] ) identifier[matched] = keyword[False] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[finished] )): keyword[if] identifier[len] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[finished] [ identifier[i] ] keyword[if] identifier[x] . identifier[overlap_size] ( identifier[m] )> literal[int] ])> literal[int] : identifier[finished] [ identifier[i] ]. identifier[append] ( identifier[m] ) identifier[matched] = keyword[True] keyword[break] keyword[if] keyword[not] identifier[matched] : identifier[finished] . identifier[append] ([ identifier[m] ]) identifier[original] =[] keyword[for] identifier[group] keyword[in] identifier[finished] : identifier[gname] = identifier[str] ( identifier[uuid] . identifier[uuid4] ()) keyword[for] identifier[member] keyword[in] identifier[group] : identifier[tname] = identifier[str] ( identifier[uuid] . identifier[uuid4] ()) identifier[f] = identifier[member] . identifier[get_gpd_line] (). identifier[rstrip] (). identifier[split] ( literal[string] ) identifier[f] [ literal[int] ]= identifier[gname] identifier[f] [ literal[int] ]= identifier[tname] identifier[g] = identifier[GPD] ( literal[string] . identifier[join] ( identifier[f] )) identifier[original] . identifier[append] ( identifier[g] ) keyword[for] identifier[gpd] keyword[in] identifier[original] + identifier[annotated] + identifier[single_annotated] + identifier[gene_annotated] : identifier[of] . identifier[write] ( identifier[gpd] . identifier[get_gpd_line] ()+ literal[string] ) identifier[of] . identifier[close] () identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] ) keyword[if] keyword[not] identifier[args] . identifier[specific_tempdir] : identifier[rmtree] ( identifier[args] . identifier[tempdir] )
def main(args): """first we need sorted genepreds""" cmd = ['sort', args.reference, '--gpd', '--tempdir', args.tempdir, '--threads', str(args.threads), '-o', args.tempdir + '/ref.sorted.gpd'] sys.stderr.write(cmd + '\n') gpd_sort(cmd) cmd = ['sort', args.gpd, '--gpd', '--tempdir', args.tempdir, '--threads', str(args.threads), '-o', args.tempdir + '/my.sorted.gpd'] sys.stderr.write(cmd + '\n') gpd_sort(cmd) rstream = GPDStream(open(args.tempdir + '/ref.sorted.gpd')) mstream = GPDStream(open(args.tempdir + '/my.sorted.gpd')) stream = MultiLocusStream([rstream, mstream]) of = sys.stdout if args.output != '-': if args.output[-3:] == '.gz': of = gzip.open(args.output, 'w') # depends on [control=['if'], data=[]] else: of = open(args.output, 'w') # depends on [control=['if'], data=[]] for locus_rng in stream: (rgpds, mgpds) = locus_rng.get_payload() if len(mgpds) == 0: continue # depends on [control=['if'], data=[]] sys.stderr.write(locus_rng.get_range_string() + ' ' + str(len(rgpds)) + ' ' + str(len(mgpds)) + ' \r') ref_juncs = {} for ref in rgpds: ref_juncs[ref.get_junction_string()] = ref # depends on [control=['for'], data=['ref']] annotated = [] unannotated = [] annotated = [ref_juncs[x.get_junction_string()] for x in mgpds if x.get_exon_count() > 1 and x.get_junction_string() in ref_juncs] unannotated = [x for x in mgpds if x.get_exon_count() > 1 and x.get_junction_string() not in ref_juncs] # now unannotated needs an annotation. my_unannotated = [x for x in mgpds if x.get_exon_count() == 1] single_reference = [x for x in rgpds if x.get_exon_count() == 1] single_annotated = [] single_unannotated = [] #print len(single_reference) #print len(single_unannotated) for gpd in my_unannotated: overs = sorted([x for x in single_reference if x.overlap_size(gpd) > 0], key=lambda y: y.avg_mutual_coverage(gpd), reverse=True) if len(overs) > 0: single_annotated.append(overs[0]) # depends on [control=['if'], data=[]] else: single_unannotated.append(gpd) # depends on [control=['for'], data=['gpd']] # now annotated and single_annotated are done unannotated += single_unannotated # now single or multi we need to annotated unanotated gene_annotated = [] no_annotation = [] for m in unannotated: overs = sorted([x for x in rgpds if x.overlap_size(m) > 0], key=lambda y: y.avg_mutual_coverage(m), reverse=True) if len(overs) > 0: gname = overs[0].value('gene_name') f = overs[0].get_gpd_line().rstrip().split('\t') f[0] = gname f[1] = str(uuid.uuid4()) g = GPD('\t'.join(f)) gene_annotated.append(g) # depends on [control=['if'], data=[]] else: no_annotation.append(m) # depends on [control=['for'], data=['m']] finished = [] # now we need to annotate no_annotation while len(no_annotation) > 0: m = no_annotation.pop(0) matched = False for i in range(0, len(finished)): if len([x for x in finished[i] if x.overlap_size(m) > 0]) > 0: finished[i].append(m) matched = True break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] if not matched: finished.append([m]) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # now finished has gene groups original = [] for group in finished: gname = str(uuid.uuid4()) for member in group: tname = str(uuid.uuid4()) f = member.get_gpd_line().rstrip().split('\t') f[0] = gname f[1] = tname g = GPD('\t'.join(f)) original.append(g) # depends on [control=['for'], data=['member']] # depends on [control=['for'], data=['group']] for gpd in original + annotated + single_annotated + gene_annotated: of.write(gpd.get_gpd_line() + '\n') # depends on [control=['for'], data=['gpd']] # depends on [control=['for'], data=['locus_rng']] of.close() sys.stderr.write('\n') # Temporary working directory step 3 of 3 - Cleanup if not args.specific_tempdir: rmtree(args.tempdir) # depends on [control=['if'], data=[]]
def read(self, response): """ Reads the current state of the entity from the server. """ results = self._load_state(response) # In lower layers of the SDK, we end up trying to URL encode # text to be dispatched via HTTP. However, these links are already # URL encoded when they arrive, and we need to mark them as such. unquoted_links = dict([(k, UrlEncoded(v, skip_encode=True)) for k,v in results['links'].items()]) results['links'] = unquoted_links return results
def function[read, parameter[self, response]]: constant[ Reads the current state of the entity from the server. ] variable[results] assign[=] call[name[self]._load_state, parameter[name[response]]] variable[unquoted_links] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b179ce50>]] call[name[results]][constant[links]] assign[=] name[unquoted_links] return[name[results]]
keyword[def] identifier[read] ( identifier[self] , identifier[response] ): literal[string] identifier[results] = identifier[self] . identifier[_load_state] ( identifier[response] ) identifier[unquoted_links] = identifier[dict] ([( identifier[k] , identifier[UrlEncoded] ( identifier[v] , identifier[skip_encode] = keyword[True] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[results] [ literal[string] ]. identifier[items] ()]) identifier[results] [ literal[string] ]= identifier[unquoted_links] keyword[return] identifier[results]
def read(self, response): """ Reads the current state of the entity from the server. """ results = self._load_state(response) # In lower layers of the SDK, we end up trying to URL encode # text to be dispatched via HTTP. However, these links are already # URL encoded when they arrive, and we need to mark them as such. unquoted_links = dict([(k, UrlEncoded(v, skip_encode=True)) for (k, v) in results['links'].items()]) results['links'] = unquoted_links return results
def create_public_ip(access_token, subscription_id, resource_group, public_ip_name, dns_label, location): '''Create a public ip address. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. public_ip_name (str): Name of the new public ip address resource. dns_label (str): DNS label to apply to the IP address. location (str): Azure data center location. E.g. westus. Returns: HTTP response. Public IP address JSON body. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/publicIPAddresses/', public_ip_name, '?api-version=', NETWORK_API]) ip_body = {'location': location} properties = {'publicIPAllocationMethod': 'Dynamic'} properties['dnsSettings'] = {'domainNameLabel': dns_label} ip_body['properties'] = properties body = json.dumps(ip_body) return do_put(endpoint, body, access_token)
def function[create_public_ip, parameter[access_token, subscription_id, resource_group, public_ip_name, dns_label, location]]: constant[Create a public ip address. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. public_ip_name (str): Name of the new public ip address resource. dns_label (str): DNS label to apply to the IP address. location (str): Azure data center location. E.g. westus. Returns: HTTP response. Public IP address JSON body. ] variable[endpoint] assign[=] call[constant[].join, parameter[list[[<ast.Call object at 0x7da1b05697b0>, <ast.Constant object at 0x7da1b056aaa0>, <ast.Name object at 0x7da1b0569690>, <ast.Constant object at 0x7da1b056b3a0>, <ast.Name object at 0x7da1b056afe0>, <ast.Constant object at 0x7da1b0568970>, <ast.Name object at 0x7da1b0569000>, <ast.Constant object at 0x7da1b0568be0>, <ast.Name object at 0x7da1b0568460>]]]] variable[ip_body] assign[=] dictionary[[<ast.Constant object at 0x7da1b056b5b0>], [<ast.Name object at 0x7da1b056b3d0>]] variable[properties] assign[=] dictionary[[<ast.Constant object at 0x7da1b056b7c0>], [<ast.Constant object at 0x7da1b056b310>]] call[name[properties]][constant[dnsSettings]] assign[=] dictionary[[<ast.Constant object at 0x7da1b056ba90>], [<ast.Name object at 0x7da1b056a380>]] call[name[ip_body]][constant[properties]] assign[=] name[properties] variable[body] assign[=] call[name[json].dumps, parameter[name[ip_body]]] return[call[name[do_put], parameter[name[endpoint], name[body], name[access_token]]]]
keyword[def] identifier[create_public_ip] ( identifier[access_token] , identifier[subscription_id] , identifier[resource_group] , identifier[public_ip_name] , identifier[dns_label] , identifier[location] ): literal[string] identifier[endpoint] = literal[string] . identifier[join] ([ identifier[get_rm_endpoint] (), literal[string] , identifier[subscription_id] , literal[string] , identifier[resource_group] , literal[string] , identifier[public_ip_name] , literal[string] , identifier[NETWORK_API] ]) identifier[ip_body] ={ literal[string] : identifier[location] } identifier[properties] ={ literal[string] : literal[string] } identifier[properties] [ literal[string] ]={ literal[string] : identifier[dns_label] } identifier[ip_body] [ literal[string] ]= identifier[properties] identifier[body] = identifier[json] . identifier[dumps] ( identifier[ip_body] ) keyword[return] identifier[do_put] ( identifier[endpoint] , identifier[body] , identifier[access_token] )
def create_public_ip(access_token, subscription_id, resource_group, public_ip_name, dns_label, location): """Create a public ip address. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. public_ip_name (str): Name of the new public ip address resource. dns_label (str): DNS label to apply to the IP address. location (str): Azure data center location. E.g. westus. Returns: HTTP response. Public IP address JSON body. """ endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/publicIPAddresses/', public_ip_name, '?api-version=', NETWORK_API]) ip_body = {'location': location} properties = {'publicIPAllocationMethod': 'Dynamic'} properties['dnsSettings'] = {'domainNameLabel': dns_label} ip_body['properties'] = properties body = json.dumps(ip_body) return do_put(endpoint, body, access_token)
def replaceable(decorator): """A descriptor modifier which allows a setattr to succeed even if __get__ is defined. Formally, this turns a data descriptor into a non-data descriptor. Discards the implementation of __set__ and __delete__ if it exists, so be wary when not using with ``property``. """ def decorate(fn, *args, **kwargs): parent = decorator(fn, *args, **kwargs) #doc = _add_msg(getattr(parent, '__doc__', None), '*@replaceable*') doc = getattr(parent, '__doc__', None) return _replaceable(parent, doc) return decorate
def function[replaceable, parameter[decorator]]: constant[A descriptor modifier which allows a setattr to succeed even if __get__ is defined. Formally, this turns a data descriptor into a non-data descriptor. Discards the implementation of __set__ and __delete__ if it exists, so be wary when not using with ``property``. ] def function[decorate, parameter[fn]]: variable[parent] assign[=] call[name[decorator], parameter[name[fn], <ast.Starred object at 0x7da20e9b3130>]] variable[doc] assign[=] call[name[getattr], parameter[name[parent], constant[__doc__], constant[None]]] return[call[name[_replaceable], parameter[name[parent], name[doc]]]] return[name[decorate]]
keyword[def] identifier[replaceable] ( identifier[decorator] ): literal[string] keyword[def] identifier[decorate] ( identifier[fn] ,* identifier[args] ,** identifier[kwargs] ): identifier[parent] = identifier[decorator] ( identifier[fn] ,* identifier[args] ,** identifier[kwargs] ) identifier[doc] = identifier[getattr] ( identifier[parent] , literal[string] , keyword[None] ) keyword[return] identifier[_replaceable] ( identifier[parent] , identifier[doc] ) keyword[return] identifier[decorate]
def replaceable(decorator): """A descriptor modifier which allows a setattr to succeed even if __get__ is defined. Formally, this turns a data descriptor into a non-data descriptor. Discards the implementation of __set__ and __delete__ if it exists, so be wary when not using with ``property``. """ def decorate(fn, *args, **kwargs): parent = decorator(fn, *args, **kwargs) #doc = _add_msg(getattr(parent, '__doc__', None), '*@replaceable*') doc = getattr(parent, '__doc__', None) return _replaceable(parent, doc) return decorate
def query_closest(self, query_item): """ closest = store.query_closest({"type": "...", "item_1": "...", "timestamp": "..."}) # returns a list of the closest items to a given thing """ if not isinstance(query_item, dict): raise TypeError("The query query_item isn't a dictionary") _type = query_item.get("type") _timestamp = query_item.get("timestamp") if _type is None: raise AttributeError("Please make sure to add a type to the query_item dict") if _timestamp is None: raise AttributeError("Timestamp doesn't exist. It's necessary to provide closest query") query_less = deepcopy(query_item) query_more = deepcopy(query_item) query_less["timestamp"] = {"$lte": _timestamp} query_more["timestamp"] = {"$gt": _timestamp} closestBelow = self._collection.find(query_less).sort( "timestamp", pymongo.DESCENDING).limit(1) closestAbove = self._collection.find(query_more).sort("timestamp", pymongo.ASCENDING).limit(1) combined = list(closestAbove) + list(closestBelow) for x in combined: del x['_id'] # abs() if len(combined) >= 2: if abs(combined[0]["timestamp"] - _timestamp) > abs(combined[1]["timestamp"] - _timestamp): return combined[1] else: return combined[0] elif combined == 1: return combined[0] else: return None
def function[query_closest, parameter[self, query_item]]: constant[ closest = store.query_closest({"type": "...", "item_1": "...", "timestamp": "..."}) # returns a list of the closest items to a given thing ] if <ast.UnaryOp object at 0x7da1b09bdfc0> begin[:] <ast.Raise object at 0x7da1b09bf070> variable[_type] assign[=] call[name[query_item].get, parameter[constant[type]]] variable[_timestamp] assign[=] call[name[query_item].get, parameter[constant[timestamp]]] if compare[name[_type] is constant[None]] begin[:] <ast.Raise object at 0x7da1b09bc100> if compare[name[_timestamp] is constant[None]] begin[:] <ast.Raise object at 0x7da1b09bc790> variable[query_less] assign[=] call[name[deepcopy], parameter[name[query_item]]] variable[query_more] assign[=] call[name[deepcopy], parameter[name[query_item]]] call[name[query_less]][constant[timestamp]] assign[=] dictionary[[<ast.Constant object at 0x7da1b09be830>], [<ast.Name object at 0x7da1b09be4d0>]] call[name[query_more]][constant[timestamp]] assign[=] dictionary[[<ast.Constant object at 0x7da1b09bcee0>], [<ast.Name object at 0x7da1b09be230>]] variable[closestBelow] assign[=] call[call[call[name[self]._collection.find, parameter[name[query_less]]].sort, parameter[constant[timestamp], name[pymongo].DESCENDING]].limit, parameter[constant[1]]] variable[closestAbove] assign[=] call[call[call[name[self]._collection.find, parameter[name[query_more]]].sort, parameter[constant[timestamp], name[pymongo].ASCENDING]].limit, parameter[constant[1]]] variable[combined] assign[=] binary_operation[call[name[list], parameter[name[closestAbove]]] + call[name[list], parameter[name[closestBelow]]]] for taget[name[x]] in starred[name[combined]] begin[:] <ast.Delete object at 0x7da1b09be950> if compare[call[name[len], parameter[name[combined]]] greater_or_equal[>=] constant[2]] begin[:] if compare[call[name[abs], parameter[binary_operation[call[call[name[combined]][constant[0]]][constant[timestamp]] - name[_timestamp]]]] greater[>] call[name[abs], parameter[binary_operation[call[call[name[combined]][constant[1]]][constant[timestamp]] - name[_timestamp]]]]] begin[:] return[call[name[combined]][constant[1]]]
keyword[def] identifier[query_closest] ( identifier[self] , identifier[query_item] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[query_item] , identifier[dict] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[_type] = identifier[query_item] . identifier[get] ( literal[string] ) identifier[_timestamp] = identifier[query_item] . identifier[get] ( literal[string] ) keyword[if] identifier[_type] keyword[is] keyword[None] : keyword[raise] identifier[AttributeError] ( literal[string] ) keyword[if] identifier[_timestamp] keyword[is] keyword[None] : keyword[raise] identifier[AttributeError] ( literal[string] ) identifier[query_less] = identifier[deepcopy] ( identifier[query_item] ) identifier[query_more] = identifier[deepcopy] ( identifier[query_item] ) identifier[query_less] [ literal[string] ]={ literal[string] : identifier[_timestamp] } identifier[query_more] [ literal[string] ]={ literal[string] : identifier[_timestamp] } identifier[closestBelow] = identifier[self] . identifier[_collection] . identifier[find] ( identifier[query_less] ). identifier[sort] ( literal[string] , identifier[pymongo] . identifier[DESCENDING] ). identifier[limit] ( literal[int] ) identifier[closestAbove] = identifier[self] . identifier[_collection] . identifier[find] ( identifier[query_more] ). identifier[sort] ( literal[string] , identifier[pymongo] . identifier[ASCENDING] ). identifier[limit] ( literal[int] ) identifier[combined] = identifier[list] ( identifier[closestAbove] )+ identifier[list] ( identifier[closestBelow] ) keyword[for] identifier[x] keyword[in] identifier[combined] : keyword[del] identifier[x] [ literal[string] ] keyword[if] identifier[len] ( identifier[combined] )>= literal[int] : keyword[if] identifier[abs] ( identifier[combined] [ literal[int] ][ literal[string] ]- identifier[_timestamp] )> identifier[abs] ( identifier[combined] [ literal[int] ][ literal[string] ]- identifier[_timestamp] ): keyword[return] identifier[combined] [ literal[int] ] keyword[else] : keyword[return] identifier[combined] [ literal[int] ] keyword[elif] identifier[combined] == literal[int] : keyword[return] identifier[combined] [ literal[int] ] keyword[else] : keyword[return] keyword[None]
def query_closest(self, query_item): """ closest = store.query_closest({"type": "...", "item_1": "...", "timestamp": "..."}) # returns a list of the closest items to a given thing """ if not isinstance(query_item, dict): raise TypeError("The query query_item isn't a dictionary") # depends on [control=['if'], data=[]] _type = query_item.get('type') _timestamp = query_item.get('timestamp') if _type is None: raise AttributeError('Please make sure to add a type to the query_item dict') # depends on [control=['if'], data=[]] if _timestamp is None: raise AttributeError("Timestamp doesn't exist. It's necessary to provide closest query") # depends on [control=['if'], data=[]] query_less = deepcopy(query_item) query_more = deepcopy(query_item) query_less['timestamp'] = {'$lte': _timestamp} query_more['timestamp'] = {'$gt': _timestamp} closestBelow = self._collection.find(query_less).sort('timestamp', pymongo.DESCENDING).limit(1) closestAbove = self._collection.find(query_more).sort('timestamp', pymongo.ASCENDING).limit(1) combined = list(closestAbove) + list(closestBelow) for x in combined: del x['_id'] # depends on [control=['for'], data=['x']] # abs() if len(combined) >= 2: if abs(combined[0]['timestamp'] - _timestamp) > abs(combined[1]['timestamp'] - _timestamp): return combined[1] # depends on [control=['if'], data=[]] else: return combined[0] # depends on [control=['if'], data=[]] elif combined == 1: return combined[0] # depends on [control=['if'], data=['combined']] else: return None
def validate_known_curve(): """Validate on a sin function.""" plt.figure() N = 100 x = numpy.linspace(-1, 1, N) y = numpy.sin(4 * x) smoother.DEFAULT_BASIC_SMOOTHER = smoother.BasicFixedSpanSmootherSlowUpdate smooth = smoother.perform_smooth(x, y, smoother_cls=supersmoother.SuperSmoother) plt.plot(x, smooth.smooth_result, label='Slow') smoother.DEFAULT_BASIC_SMOOTHER = smoother.BasicFixedSpanSmoother smooth = smoother.perform_smooth(x, y, smoother_cls=supersmoother.SuperSmoother) plt.plot(x, smooth.smooth_result, label='Fast') plt.plot(x, y, '.', label='data') plt.legend() plt.show()
def function[validate_known_curve, parameter[]]: constant[Validate on a sin function.] call[name[plt].figure, parameter[]] variable[N] assign[=] constant[100] variable[x] assign[=] call[name[numpy].linspace, parameter[<ast.UnaryOp object at 0x7da1b19cb7f0>, constant[1], name[N]]] variable[y] assign[=] call[name[numpy].sin, parameter[binary_operation[constant[4] * name[x]]]] name[smoother].DEFAULT_BASIC_SMOOTHER assign[=] name[smoother].BasicFixedSpanSmootherSlowUpdate variable[smooth] assign[=] call[name[smoother].perform_smooth, parameter[name[x], name[y]]] call[name[plt].plot, parameter[name[x], name[smooth].smooth_result]] name[smoother].DEFAULT_BASIC_SMOOTHER assign[=] name[smoother].BasicFixedSpanSmoother variable[smooth] assign[=] call[name[smoother].perform_smooth, parameter[name[x], name[y]]] call[name[plt].plot, parameter[name[x], name[smooth].smooth_result]] call[name[plt].plot, parameter[name[x], name[y], constant[.]]] call[name[plt].legend, parameter[]] call[name[plt].show, parameter[]]
keyword[def] identifier[validate_known_curve] (): literal[string] identifier[plt] . identifier[figure] () identifier[N] = literal[int] identifier[x] = identifier[numpy] . identifier[linspace] (- literal[int] , literal[int] , identifier[N] ) identifier[y] = identifier[numpy] . identifier[sin] ( literal[int] * identifier[x] ) identifier[smoother] . identifier[DEFAULT_BASIC_SMOOTHER] = identifier[smoother] . identifier[BasicFixedSpanSmootherSlowUpdate] identifier[smooth] = identifier[smoother] . identifier[perform_smooth] ( identifier[x] , identifier[y] , identifier[smoother_cls] = identifier[supersmoother] . identifier[SuperSmoother] ) identifier[plt] . identifier[plot] ( identifier[x] , identifier[smooth] . identifier[smooth_result] , identifier[label] = literal[string] ) identifier[smoother] . identifier[DEFAULT_BASIC_SMOOTHER] = identifier[smoother] . identifier[BasicFixedSpanSmoother] identifier[smooth] = identifier[smoother] . identifier[perform_smooth] ( identifier[x] , identifier[y] , identifier[smoother_cls] = identifier[supersmoother] . identifier[SuperSmoother] ) identifier[plt] . identifier[plot] ( identifier[x] , identifier[smooth] . identifier[smooth_result] , identifier[label] = literal[string] ) identifier[plt] . identifier[plot] ( identifier[x] , identifier[y] , literal[string] , identifier[label] = literal[string] ) identifier[plt] . identifier[legend] () identifier[plt] . identifier[show] ()
def validate_known_curve(): """Validate on a sin function.""" plt.figure() N = 100 x = numpy.linspace(-1, 1, N) y = numpy.sin(4 * x) smoother.DEFAULT_BASIC_SMOOTHER = smoother.BasicFixedSpanSmootherSlowUpdate smooth = smoother.perform_smooth(x, y, smoother_cls=supersmoother.SuperSmoother) plt.plot(x, smooth.smooth_result, label='Slow') smoother.DEFAULT_BASIC_SMOOTHER = smoother.BasicFixedSpanSmoother smooth = smoother.perform_smooth(x, y, smoother_cls=supersmoother.SuperSmoother) plt.plot(x, smooth.smooth_result, label='Fast') plt.plot(x, y, '.', label='data') plt.legend() plt.show()
def drdgeo(lon, lat, alt, re, f): """ This routine computes the Jacobian of the transformation from geodetic to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/drdgeo_c.html :param lon: Geodetic longitude of point (radians). :type lon: float :param lat: Geodetic latitude of point (radians). :type lat: float :param alt: Altitude of point above the reference spheroid. :type alt: float :param re: Equatorial radius of the reference spheroid. :type re: float :param f: Flattening coefficient. :type f: float :return: Matrix of partial derivatives. :rtype: 3x3-Element Array of floats """ lon = ctypes.c_double(lon) lat = ctypes.c_double(lat) alt = ctypes.c_double(alt) re = ctypes.c_double(re) f = ctypes.c_double(f) jacobi = stypes.emptyDoubleMatrix() libspice.drdgeo_c(lon, lat, alt, re, f, jacobi) return stypes.cMatrixToNumpy(jacobi)
def function[drdgeo, parameter[lon, lat, alt, re, f]]: constant[ This routine computes the Jacobian of the transformation from geodetic to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/drdgeo_c.html :param lon: Geodetic longitude of point (radians). :type lon: float :param lat: Geodetic latitude of point (radians). :type lat: float :param alt: Altitude of point above the reference spheroid. :type alt: float :param re: Equatorial radius of the reference spheroid. :type re: float :param f: Flattening coefficient. :type f: float :return: Matrix of partial derivatives. :rtype: 3x3-Element Array of floats ] variable[lon] assign[=] call[name[ctypes].c_double, parameter[name[lon]]] variable[lat] assign[=] call[name[ctypes].c_double, parameter[name[lat]]] variable[alt] assign[=] call[name[ctypes].c_double, parameter[name[alt]]] variable[re] assign[=] call[name[ctypes].c_double, parameter[name[re]]] variable[f] assign[=] call[name[ctypes].c_double, parameter[name[f]]] variable[jacobi] assign[=] call[name[stypes].emptyDoubleMatrix, parameter[]] call[name[libspice].drdgeo_c, parameter[name[lon], name[lat], name[alt], name[re], name[f], name[jacobi]]] return[call[name[stypes].cMatrixToNumpy, parameter[name[jacobi]]]]
keyword[def] identifier[drdgeo] ( identifier[lon] , identifier[lat] , identifier[alt] , identifier[re] , identifier[f] ): literal[string] identifier[lon] = identifier[ctypes] . identifier[c_double] ( identifier[lon] ) identifier[lat] = identifier[ctypes] . identifier[c_double] ( identifier[lat] ) identifier[alt] = identifier[ctypes] . identifier[c_double] ( identifier[alt] ) identifier[re] = identifier[ctypes] . identifier[c_double] ( identifier[re] ) identifier[f] = identifier[ctypes] . identifier[c_double] ( identifier[f] ) identifier[jacobi] = identifier[stypes] . identifier[emptyDoubleMatrix] () identifier[libspice] . identifier[drdgeo_c] ( identifier[lon] , identifier[lat] , identifier[alt] , identifier[re] , identifier[f] , identifier[jacobi] ) keyword[return] identifier[stypes] . identifier[cMatrixToNumpy] ( identifier[jacobi] )
def drdgeo(lon, lat, alt, re, f): """ This routine computes the Jacobian of the transformation from geodetic to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/drdgeo_c.html :param lon: Geodetic longitude of point (radians). :type lon: float :param lat: Geodetic latitude of point (radians). :type lat: float :param alt: Altitude of point above the reference spheroid. :type alt: float :param re: Equatorial radius of the reference spheroid. :type re: float :param f: Flattening coefficient. :type f: float :return: Matrix of partial derivatives. :rtype: 3x3-Element Array of floats """ lon = ctypes.c_double(lon) lat = ctypes.c_double(lat) alt = ctypes.c_double(alt) re = ctypes.c_double(re) f = ctypes.c_double(f) jacobi = stypes.emptyDoubleMatrix() libspice.drdgeo_c(lon, lat, alt, re, f, jacobi) return stypes.cMatrixToNumpy(jacobi)
def get_namespace(fn: Callable, namespace: Optional[str]) -> str: """ Returns a representation of a function's name (perhaps within a namespace), like .. code-block:: none mymodule:MyClass.myclassfunc # with no namespace mymodule:MyClass.myclassfunc|somenamespace # with a namespace Args: fn: a function namespace: an optional namespace, which can be of any type but is normally a ``str``; if not ``None``, ``str(namespace)`` will be added to the result. See https://dogpilecache.readthedocs.io/en/latest/api.html#dogpile.cache.region.CacheRegion.cache_on_arguments """ # noqa # See hidden attributes with dir(fn) # noinspection PyUnresolvedReferences return "{module}:{name}{extra}".format( module=fn.__module__, name=fn.__qualname__, # __qualname__ includes class name, if present extra="|{}".format(namespace) if namespace is not None else "", )
def function[get_namespace, parameter[fn, namespace]]: constant[ Returns a representation of a function's name (perhaps within a namespace), like .. code-block:: none mymodule:MyClass.myclassfunc # with no namespace mymodule:MyClass.myclassfunc|somenamespace # with a namespace Args: fn: a function namespace: an optional namespace, which can be of any type but is normally a ``str``; if not ``None``, ``str(namespace)`` will be added to the result. See https://dogpilecache.readthedocs.io/en/latest/api.html#dogpile.cache.region.CacheRegion.cache_on_arguments ] return[call[constant[{module}:{name}{extra}].format, parameter[]]]
keyword[def] identifier[get_namespace] ( identifier[fn] : identifier[Callable] , identifier[namespace] : identifier[Optional] [ identifier[str] ])-> identifier[str] : literal[string] keyword[return] literal[string] . identifier[format] ( identifier[module] = identifier[fn] . identifier[__module__] , identifier[name] = identifier[fn] . identifier[__qualname__] , identifier[extra] = literal[string] . identifier[format] ( identifier[namespace] ) keyword[if] identifier[namespace] keyword[is] keyword[not] keyword[None] keyword[else] literal[string] , )
def get_namespace(fn: Callable, namespace: Optional[str]) -> str: """ Returns a representation of a function's name (perhaps within a namespace), like .. code-block:: none mymodule:MyClass.myclassfunc # with no namespace mymodule:MyClass.myclassfunc|somenamespace # with a namespace Args: fn: a function namespace: an optional namespace, which can be of any type but is normally a ``str``; if not ``None``, ``str(namespace)`` will be added to the result. See https://dogpilecache.readthedocs.io/en/latest/api.html#dogpile.cache.region.CacheRegion.cache_on_arguments """ # noqa # See hidden attributes with dir(fn) # noinspection PyUnresolvedReferences # __qualname__ includes class name, if present return '{module}:{name}{extra}'.format(module=fn.__module__, name=fn.__qualname__, extra='|{}'.format(namespace) if namespace is not None else '')
def median(values): """Return median value for the list of values. @param values: list of values for processing. @return: median value. """ values.sort() n = int(len(values) / 2) return values[n]
def function[median, parameter[values]]: constant[Return median value for the list of values. @param values: list of values for processing. @return: median value. ] call[name[values].sort, parameter[]] variable[n] assign[=] call[name[int], parameter[binary_operation[call[name[len], parameter[name[values]]] / constant[2]]]] return[call[name[values]][name[n]]]
keyword[def] identifier[median] ( identifier[values] ): literal[string] identifier[values] . identifier[sort] () identifier[n] = identifier[int] ( identifier[len] ( identifier[values] )/ literal[int] ) keyword[return] identifier[values] [ identifier[n] ]
def median(values): """Return median value for the list of values. @param values: list of values for processing. @return: median value. """ values.sort() n = int(len(values) / 2) return values[n]
def remove_interference_line(img): """ 去除干扰线 :param img: :return: """ pixdata = img.load() w,h = img.size for y in range(1,h-1): for x in range(1,w-1): count = 0 if pixdata[x,y-1] > 245: count = count + 1 if pixdata[x,y+1] > 245: count = count + 1 if pixdata[x-1,y] > 245: count = count + 1 if pixdata[x+1,y] > 245: count = count + 1 if count > 2: pixdata[x,y] = 255 return img
def function[remove_interference_line, parameter[img]]: constant[ 去除干扰线 :param img: :return: ] variable[pixdata] assign[=] call[name[img].load, parameter[]] <ast.Tuple object at 0x7da1b162b910> assign[=] name[img].size for taget[name[y]] in starred[call[name[range], parameter[constant[1], binary_operation[name[h] - constant[1]]]]] begin[:] for taget[name[x]] in starred[call[name[range], parameter[constant[1], binary_operation[name[w] - constant[1]]]]] begin[:] variable[count] assign[=] constant[0] if compare[call[name[pixdata]][tuple[[<ast.Name object at 0x7da1b1629b10>, <ast.BinOp object at 0x7da1b162ad10>]]] greater[>] constant[245]] begin[:] variable[count] assign[=] binary_operation[name[count] + constant[1]] if compare[call[name[pixdata]][tuple[[<ast.Name object at 0x7da1b143c250>, <ast.BinOp object at 0x7da1b168ce80>]]] greater[>] constant[245]] begin[:] variable[count] assign[=] binary_operation[name[count] + constant[1]] if compare[call[name[pixdata]][tuple[[<ast.BinOp object at 0x7da1b168e6b0>, <ast.Name object at 0x7da1b168e920>]]] greater[>] constant[245]] begin[:] variable[count] assign[=] binary_operation[name[count] + constant[1]] if compare[call[name[pixdata]][tuple[[<ast.BinOp object at 0x7da1b168ccd0>, <ast.Name object at 0x7da1b168edd0>]]] greater[>] constant[245]] begin[:] variable[count] assign[=] binary_operation[name[count] + constant[1]] if compare[name[count] greater[>] constant[2]] begin[:] call[name[pixdata]][tuple[[<ast.Name object at 0x7da1b168e740>, <ast.Name object at 0x7da1b168f9a0>]]] assign[=] constant[255] return[name[img]]
keyword[def] identifier[remove_interference_line] ( identifier[img] ): literal[string] identifier[pixdata] = identifier[img] . identifier[load] () identifier[w] , identifier[h] = identifier[img] . identifier[size] keyword[for] identifier[y] keyword[in] identifier[range] ( literal[int] , identifier[h] - literal[int] ): keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , identifier[w] - literal[int] ): identifier[count] = literal[int] keyword[if] identifier[pixdata] [ identifier[x] , identifier[y] - literal[int] ]> literal[int] : identifier[count] = identifier[count] + literal[int] keyword[if] identifier[pixdata] [ identifier[x] , identifier[y] + literal[int] ]> literal[int] : identifier[count] = identifier[count] + literal[int] keyword[if] identifier[pixdata] [ identifier[x] - literal[int] , identifier[y] ]> literal[int] : identifier[count] = identifier[count] + literal[int] keyword[if] identifier[pixdata] [ identifier[x] + literal[int] , identifier[y] ]> literal[int] : identifier[count] = identifier[count] + literal[int] keyword[if] identifier[count] > literal[int] : identifier[pixdata] [ identifier[x] , identifier[y] ]= literal[int] keyword[return] identifier[img]
def remove_interference_line(img): """ 去除干扰线 :param img: :return: """ pixdata = img.load() (w, h) = img.size for y in range(1, h - 1): for x in range(1, w - 1): count = 0 if pixdata[x, y - 1] > 245: count = count + 1 # depends on [control=['if'], data=[]] if pixdata[x, y + 1] > 245: count = count + 1 # depends on [control=['if'], data=[]] if pixdata[x - 1, y] > 245: count = count + 1 # depends on [control=['if'], data=[]] if pixdata[x + 1, y] > 245: count = count + 1 # depends on [control=['if'], data=[]] if count > 2: pixdata[x, y] = 255 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['y']] return img
def get_keys_from_shelve(file_name, file_location): """ Function to retreive all keys in a shelve Args: file_name: Shelve storage file name file_location: The location of the file, derive from the os module Returns: a list of the keys """ temp_list = list() file = __os.path.join(file_location, file_name) shelve_store = __shelve.open(file) for key in shelve_store: temp_list.append(key) shelve_store.close() return temp_list
def function[get_keys_from_shelve, parameter[file_name, file_location]]: constant[ Function to retreive all keys in a shelve Args: file_name: Shelve storage file name file_location: The location of the file, derive from the os module Returns: a list of the keys ] variable[temp_list] assign[=] call[name[list], parameter[]] variable[file] assign[=] call[name[__os].path.join, parameter[name[file_location], name[file_name]]] variable[shelve_store] assign[=] call[name[__shelve].open, parameter[name[file]]] for taget[name[key]] in starred[name[shelve_store]] begin[:] call[name[temp_list].append, parameter[name[key]]] call[name[shelve_store].close, parameter[]] return[name[temp_list]]
keyword[def] identifier[get_keys_from_shelve] ( identifier[file_name] , identifier[file_location] ): literal[string] identifier[temp_list] = identifier[list] () identifier[file] = identifier[__os] . identifier[path] . identifier[join] ( identifier[file_location] , identifier[file_name] ) identifier[shelve_store] = identifier[__shelve] . identifier[open] ( identifier[file] ) keyword[for] identifier[key] keyword[in] identifier[shelve_store] : identifier[temp_list] . identifier[append] ( identifier[key] ) identifier[shelve_store] . identifier[close] () keyword[return] identifier[temp_list]
def get_keys_from_shelve(file_name, file_location): """ Function to retreive all keys in a shelve Args: file_name: Shelve storage file name file_location: The location of the file, derive from the os module Returns: a list of the keys """ temp_list = list() file = __os.path.join(file_location, file_name) shelve_store = __shelve.open(file) for key in shelve_store: temp_list.append(key) # depends on [control=['for'], data=['key']] shelve_store.close() return temp_list
def _make_txn_selector(self): """Helper for :meth:`read`.""" if self._transaction_id is not None: return TransactionSelector(id=self._transaction_id) if self._read_timestamp: key = "read_timestamp" value = _datetime_to_pb_timestamp(self._read_timestamp) elif self._min_read_timestamp: key = "min_read_timestamp" value = _datetime_to_pb_timestamp(self._min_read_timestamp) elif self._max_staleness: key = "max_staleness" value = _timedelta_to_duration_pb(self._max_staleness) elif self._exact_staleness: key = "exact_staleness" value = _timedelta_to_duration_pb(self._exact_staleness) else: key = "strong" value = True options = TransactionOptions( read_only=TransactionOptions.ReadOnly(**{key: value}) ) if self._multi_use: return TransactionSelector(begin=options) else: return TransactionSelector(single_use=options)
def function[_make_txn_selector, parameter[self]]: constant[Helper for :meth:`read`.] if compare[name[self]._transaction_id is_not constant[None]] begin[:] return[call[name[TransactionSelector], parameter[]]] if name[self]._read_timestamp begin[:] variable[key] assign[=] constant[read_timestamp] variable[value] assign[=] call[name[_datetime_to_pb_timestamp], parameter[name[self]._read_timestamp]] variable[options] assign[=] call[name[TransactionOptions], parameter[]] if name[self]._multi_use begin[:] return[call[name[TransactionSelector], parameter[]]]
keyword[def] identifier[_make_txn_selector] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_transaction_id] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[TransactionSelector] ( identifier[id] = identifier[self] . identifier[_transaction_id] ) keyword[if] identifier[self] . identifier[_read_timestamp] : identifier[key] = literal[string] identifier[value] = identifier[_datetime_to_pb_timestamp] ( identifier[self] . identifier[_read_timestamp] ) keyword[elif] identifier[self] . identifier[_min_read_timestamp] : identifier[key] = literal[string] identifier[value] = identifier[_datetime_to_pb_timestamp] ( identifier[self] . identifier[_min_read_timestamp] ) keyword[elif] identifier[self] . identifier[_max_staleness] : identifier[key] = literal[string] identifier[value] = identifier[_timedelta_to_duration_pb] ( identifier[self] . identifier[_max_staleness] ) keyword[elif] identifier[self] . identifier[_exact_staleness] : identifier[key] = literal[string] identifier[value] = identifier[_timedelta_to_duration_pb] ( identifier[self] . identifier[_exact_staleness] ) keyword[else] : identifier[key] = literal[string] identifier[value] = keyword[True] identifier[options] = identifier[TransactionOptions] ( identifier[read_only] = identifier[TransactionOptions] . identifier[ReadOnly] (**{ identifier[key] : identifier[value] }) ) keyword[if] identifier[self] . identifier[_multi_use] : keyword[return] identifier[TransactionSelector] ( identifier[begin] = identifier[options] ) keyword[else] : keyword[return] identifier[TransactionSelector] ( identifier[single_use] = identifier[options] )
def _make_txn_selector(self): """Helper for :meth:`read`.""" if self._transaction_id is not None: return TransactionSelector(id=self._transaction_id) # depends on [control=['if'], data=[]] if self._read_timestamp: key = 'read_timestamp' value = _datetime_to_pb_timestamp(self._read_timestamp) # depends on [control=['if'], data=[]] elif self._min_read_timestamp: key = 'min_read_timestamp' value = _datetime_to_pb_timestamp(self._min_read_timestamp) # depends on [control=['if'], data=[]] elif self._max_staleness: key = 'max_staleness' value = _timedelta_to_duration_pb(self._max_staleness) # depends on [control=['if'], data=[]] elif self._exact_staleness: key = 'exact_staleness' value = _timedelta_to_duration_pb(self._exact_staleness) # depends on [control=['if'], data=[]] else: key = 'strong' value = True options = TransactionOptions(read_only=TransactionOptions.ReadOnly(**{key: value})) if self._multi_use: return TransactionSelector(begin=options) # depends on [control=['if'], data=[]] else: return TransactionSelector(single_use=options)
def calldataload(computation: BaseComputation) -> None: """ Load call data into memory. """ start_position = computation.stack_pop(type_hint=constants.UINT256) value = computation.msg.data_as_bytes[start_position:start_position + 32] padded_value = value.ljust(32, b'\x00') normalized_value = padded_value.lstrip(b'\x00') computation.stack_push(normalized_value)
def function[calldataload, parameter[computation]]: constant[ Load call data into memory. ] variable[start_position] assign[=] call[name[computation].stack_pop, parameter[]] variable[value] assign[=] call[name[computation].msg.data_as_bytes][<ast.Slice object at 0x7da1b1647760>] variable[padded_value] assign[=] call[name[value].ljust, parameter[constant[32], constant[b'\x00']]] variable[normalized_value] assign[=] call[name[padded_value].lstrip, parameter[constant[b'\x00']]] call[name[computation].stack_push, parameter[name[normalized_value]]]
keyword[def] identifier[calldataload] ( identifier[computation] : identifier[BaseComputation] )-> keyword[None] : literal[string] identifier[start_position] = identifier[computation] . identifier[stack_pop] ( identifier[type_hint] = identifier[constants] . identifier[UINT256] ) identifier[value] = identifier[computation] . identifier[msg] . identifier[data_as_bytes] [ identifier[start_position] : identifier[start_position] + literal[int] ] identifier[padded_value] = identifier[value] . identifier[ljust] ( literal[int] , literal[string] ) identifier[normalized_value] = identifier[padded_value] . identifier[lstrip] ( literal[string] ) identifier[computation] . identifier[stack_push] ( identifier[normalized_value] )
def calldataload(computation: BaseComputation) -> None: """ Load call data into memory. """ start_position = computation.stack_pop(type_hint=constants.UINT256) value = computation.msg.data_as_bytes[start_position:start_position + 32] padded_value = value.ljust(32, b'\x00') normalized_value = padded_value.lstrip(b'\x00') computation.stack_push(normalized_value)
def pole_from_endpoints(coord1, coord2): """Compute the pole from a great circle that connects the two specified coordinates. This assumes a right-handed rule from coord1 to coord2: the pole is the north pole under that assumption. Parameters ---------- coord1 : `~astropy.coordinates.SkyCoord` Coordinate of one point on a great circle. coord2 : `~astropy.coordinates.SkyCoord` Coordinate of the other point on a great circle. Returns ------- pole : `~astropy.coordinates.SkyCoord` The coordinates of the pole. """ c1 = coord1.cartesian / coord1.cartesian.norm() coord2 = coord2.transform_to(coord1.frame) c2 = coord2.cartesian / coord2.cartesian.norm() pole = c1.cross(c2) pole = pole / pole.norm() return coord1.frame.realize_frame(pole)
def function[pole_from_endpoints, parameter[coord1, coord2]]: constant[Compute the pole from a great circle that connects the two specified coordinates. This assumes a right-handed rule from coord1 to coord2: the pole is the north pole under that assumption. Parameters ---------- coord1 : `~astropy.coordinates.SkyCoord` Coordinate of one point on a great circle. coord2 : `~astropy.coordinates.SkyCoord` Coordinate of the other point on a great circle. Returns ------- pole : `~astropy.coordinates.SkyCoord` The coordinates of the pole. ] variable[c1] assign[=] binary_operation[name[coord1].cartesian / call[name[coord1].cartesian.norm, parameter[]]] variable[coord2] assign[=] call[name[coord2].transform_to, parameter[name[coord1].frame]] variable[c2] assign[=] binary_operation[name[coord2].cartesian / call[name[coord2].cartesian.norm, parameter[]]] variable[pole] assign[=] call[name[c1].cross, parameter[name[c2]]] variable[pole] assign[=] binary_operation[name[pole] / call[name[pole].norm, parameter[]]] return[call[name[coord1].frame.realize_frame, parameter[name[pole]]]]
keyword[def] identifier[pole_from_endpoints] ( identifier[coord1] , identifier[coord2] ): literal[string] identifier[c1] = identifier[coord1] . identifier[cartesian] / identifier[coord1] . identifier[cartesian] . identifier[norm] () identifier[coord2] = identifier[coord2] . identifier[transform_to] ( identifier[coord1] . identifier[frame] ) identifier[c2] = identifier[coord2] . identifier[cartesian] / identifier[coord2] . identifier[cartesian] . identifier[norm] () identifier[pole] = identifier[c1] . identifier[cross] ( identifier[c2] ) identifier[pole] = identifier[pole] / identifier[pole] . identifier[norm] () keyword[return] identifier[coord1] . identifier[frame] . identifier[realize_frame] ( identifier[pole] )
def pole_from_endpoints(coord1, coord2): """Compute the pole from a great circle that connects the two specified coordinates. This assumes a right-handed rule from coord1 to coord2: the pole is the north pole under that assumption. Parameters ---------- coord1 : `~astropy.coordinates.SkyCoord` Coordinate of one point on a great circle. coord2 : `~astropy.coordinates.SkyCoord` Coordinate of the other point on a great circle. Returns ------- pole : `~astropy.coordinates.SkyCoord` The coordinates of the pole. """ c1 = coord1.cartesian / coord1.cartesian.norm() coord2 = coord2.transform_to(coord1.frame) c2 = coord2.cartesian / coord2.cartesian.norm() pole = c1.cross(c2) pole = pole / pole.norm() return coord1.frame.realize_frame(pole)
def _query(self, action, qobj): """ returns WPToolsQuery string from action """ return qobj.restbase(self.params['rest_endpoint'], self.params.get('title'))
def function[_query, parameter[self, action, qobj]]: constant[ returns WPToolsQuery string from action ] return[call[name[qobj].restbase, parameter[call[name[self].params][constant[rest_endpoint]], call[name[self].params.get, parameter[constant[title]]]]]]
keyword[def] identifier[_query] ( identifier[self] , identifier[action] , identifier[qobj] ): literal[string] keyword[return] identifier[qobj] . identifier[restbase] ( identifier[self] . identifier[params] [ literal[string] ], identifier[self] . identifier[params] . identifier[get] ( literal[string] ))
def _query(self, action, qobj): """ returns WPToolsQuery string from action """ return qobj.restbase(self.params['rest_endpoint'], self.params.get('title'))
def meters(self): """Gets the XY meters in Spherical Mercator EPSG:900913, converted from lat/lon in WGS84""" latitude, longitude = self.latitude_longitude meter_x = longitude * ORIGIN_SHIFT / 180.0 meter_y = math.log(math.tan((90.0 + latitude) * math.pi / 360.0)) / (math.pi / 180.0) meter_y = meter_y * ORIGIN_SHIFT / 180.0 return meter_x, meter_y
def function[meters, parameter[self]]: constant[Gets the XY meters in Spherical Mercator EPSG:900913, converted from lat/lon in WGS84] <ast.Tuple object at 0x7da2044c0370> assign[=] name[self].latitude_longitude variable[meter_x] assign[=] binary_operation[binary_operation[name[longitude] * name[ORIGIN_SHIFT]] / constant[180.0]] variable[meter_y] assign[=] binary_operation[call[name[math].log, parameter[call[name[math].tan, parameter[binary_operation[binary_operation[binary_operation[constant[90.0] + name[latitude]] * name[math].pi] / constant[360.0]]]]]] / binary_operation[name[math].pi / constant[180.0]]] variable[meter_y] assign[=] binary_operation[binary_operation[name[meter_y] * name[ORIGIN_SHIFT]] / constant[180.0]] return[tuple[[<ast.Name object at 0x7da2044c1870>, <ast.Name object at 0x7da2044c3760>]]]
keyword[def] identifier[meters] ( identifier[self] ): literal[string] identifier[latitude] , identifier[longitude] = identifier[self] . identifier[latitude_longitude] identifier[meter_x] = identifier[longitude] * identifier[ORIGIN_SHIFT] / literal[int] identifier[meter_y] = identifier[math] . identifier[log] ( identifier[math] . identifier[tan] (( literal[int] + identifier[latitude] )* identifier[math] . identifier[pi] / literal[int] ))/( identifier[math] . identifier[pi] / literal[int] ) identifier[meter_y] = identifier[meter_y] * identifier[ORIGIN_SHIFT] / literal[int] keyword[return] identifier[meter_x] , identifier[meter_y]
def meters(self): """Gets the XY meters in Spherical Mercator EPSG:900913, converted from lat/lon in WGS84""" (latitude, longitude) = self.latitude_longitude meter_x = longitude * ORIGIN_SHIFT / 180.0 meter_y = math.log(math.tan((90.0 + latitude) * math.pi / 360.0)) / (math.pi / 180.0) meter_y = meter_y * ORIGIN_SHIFT / 180.0 return (meter_x, meter_y)
def cp(src, dst, overwrite=False): """ Copy files to a new location. :param src: list (or string) of paths of files to copy :param dst: file or folder to copy item(s) to :param overwrite: IF the file already exists, should I overwrite it? """ if not isinstance(src, list): src = [src] dst = os.path.expanduser(dst) dst_folder = os.path.isdir(dst) if len(src) > 1 and not dst_folder: raise OSError("Cannot copy multiple item to same file") for item in src: source = os.path.expanduser(item) destination = (dst if not dst_folder else os.path.join(dst, os.path.basename(source))) if not overwrite and os.path.exists(destination): _logger.warning("Not replacing {0} with {1}, overwrite not enabled" "".format(destination, source)) continue shutil.copy(source, destination)
def function[cp, parameter[src, dst, overwrite]]: constant[ Copy files to a new location. :param src: list (or string) of paths of files to copy :param dst: file or folder to copy item(s) to :param overwrite: IF the file already exists, should I overwrite it? ] if <ast.UnaryOp object at 0x7da18f09f820> begin[:] variable[src] assign[=] list[[<ast.Name object at 0x7da18f09c5e0>]] variable[dst] assign[=] call[name[os].path.expanduser, parameter[name[dst]]] variable[dst_folder] assign[=] call[name[os].path.isdir, parameter[name[dst]]] if <ast.BoolOp object at 0x7da18f09e620> begin[:] <ast.Raise object at 0x7da18f09e500> for taget[name[item]] in starred[name[src]] begin[:] variable[source] assign[=] call[name[os].path.expanduser, parameter[name[item]]] variable[destination] assign[=] <ast.IfExp object at 0x7da18f09e980> if <ast.BoolOp object at 0x7da18f09c220> begin[:] call[name[_logger].warning, parameter[call[constant[Not replacing {0} with {1}, overwrite not enabled].format, parameter[name[destination], name[source]]]]] continue call[name[shutil].copy, parameter[name[source], name[destination]]]
keyword[def] identifier[cp] ( identifier[src] , identifier[dst] , identifier[overwrite] = keyword[False] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[src] , identifier[list] ): identifier[src] =[ identifier[src] ] identifier[dst] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[dst] ) identifier[dst_folder] = identifier[os] . identifier[path] . identifier[isdir] ( identifier[dst] ) keyword[if] identifier[len] ( identifier[src] )> literal[int] keyword[and] keyword[not] identifier[dst_folder] : keyword[raise] identifier[OSError] ( literal[string] ) keyword[for] identifier[item] keyword[in] identifier[src] : identifier[source] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[item] ) identifier[destination] =( identifier[dst] keyword[if] keyword[not] identifier[dst_folder] keyword[else] identifier[os] . identifier[path] . identifier[join] ( identifier[dst] , identifier[os] . identifier[path] . identifier[basename] ( identifier[source] ))) keyword[if] keyword[not] identifier[overwrite] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[destination] ): identifier[_logger] . identifier[warning] ( literal[string] literal[string] . identifier[format] ( identifier[destination] , identifier[source] )) keyword[continue] identifier[shutil] . identifier[copy] ( identifier[source] , identifier[destination] )
def cp(src, dst, overwrite=False): """ Copy files to a new location. :param src: list (or string) of paths of files to copy :param dst: file or folder to copy item(s) to :param overwrite: IF the file already exists, should I overwrite it? """ if not isinstance(src, list): src = [src] # depends on [control=['if'], data=[]] dst = os.path.expanduser(dst) dst_folder = os.path.isdir(dst) if len(src) > 1 and (not dst_folder): raise OSError('Cannot copy multiple item to same file') # depends on [control=['if'], data=[]] for item in src: source = os.path.expanduser(item) destination = dst if not dst_folder else os.path.join(dst, os.path.basename(source)) if not overwrite and os.path.exists(destination): _logger.warning('Not replacing {0} with {1}, overwrite not enabled'.format(destination, source)) continue # depends on [control=['if'], data=[]] shutil.copy(source, destination) # depends on [control=['for'], data=['item']]
def _random_mutation_operator(self, individual, allow_shrink=True): """Perform a replacement, insertion, or shrink mutation on an individual. Parameters ---------- individual: DEAP individual A list of pipeline operators and model parameters that can be compiled by DEAP into a callable function allow_shrink: bool (True) If True the `mutShrink` operator, which randomly shrinks the pipeline, is allowed to be chosen as one of the random mutation operators. If False, `mutShrink` will never be chosen as a mutation operator. Returns ------- mut_ind: DEAP individual Returns the individual with one of the mutations applied to it """ if self.tree_structure: mutation_techniques = [ partial(gp.mutInsert, pset=self._pset), partial(mutNodeReplacement, pset=self._pset) ] # We can't shrink pipelines with only one primitive, so we only add it if we find more primitives. number_of_primitives = sum([isinstance(node, deap.gp.Primitive) for node in individual]) if number_of_primitives > 1 and allow_shrink: mutation_techniques.append(partial(gp.mutShrink)) else: mutation_techniques = [partial(mutNodeReplacement, pset=self._pset)] mutator = np.random.choice(mutation_techniques) unsuccesful_mutations = 0 for _ in range(self._max_mut_loops): # We have to clone the individual because mutator operators work in-place. ind = self._toolbox.clone(individual) offspring, = mutator(ind) if str(offspring) not in self.evaluated_individuals_: # Update statistics # crossover_count is kept the same as for the predecessor # mutation count is increased by 1 # predecessor is set to the string representation of the individual before mutation # generation is set to 'INVALID' such that we can recognize that it should be updated accordingly offspring.statistics['crossover_count'] = individual.statistics['crossover_count'] offspring.statistics['mutation_count'] = individual.statistics['mutation_count'] + 1 offspring.statistics['predecessor'] = (str(individual),) offspring.statistics['generation'] = 'INVALID' break else: unsuccesful_mutations += 1 # Sometimes you have pipelines for which every shrunk version has already been explored too. # To still mutate the individual, one of the two other mutators should be applied instead. if ((unsuccesful_mutations == 50) and (type(mutator) is partial and mutator.func is gp.mutShrink)): offspring, = self._random_mutation_operator(individual, allow_shrink=False) return offspring,
def function[_random_mutation_operator, parameter[self, individual, allow_shrink]]: constant[Perform a replacement, insertion, or shrink mutation on an individual. Parameters ---------- individual: DEAP individual A list of pipeline operators and model parameters that can be compiled by DEAP into a callable function allow_shrink: bool (True) If True the `mutShrink` operator, which randomly shrinks the pipeline, is allowed to be chosen as one of the random mutation operators. If False, `mutShrink` will never be chosen as a mutation operator. Returns ------- mut_ind: DEAP individual Returns the individual with one of the mutations applied to it ] if name[self].tree_structure begin[:] variable[mutation_techniques] assign[=] list[[<ast.Call object at 0x7da20e956b30>, <ast.Call object at 0x7da20e956e00>]] variable[number_of_primitives] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da20e954850>]] if <ast.BoolOp object at 0x7da20e9577f0> begin[:] call[name[mutation_techniques].append, parameter[call[name[partial], parameter[name[gp].mutShrink]]]] variable[mutator] assign[=] call[name[np].random.choice, parameter[name[mutation_techniques]]] variable[unsuccesful_mutations] assign[=] constant[0] for taget[name[_]] in starred[call[name[range], parameter[name[self]._max_mut_loops]]] begin[:] variable[ind] assign[=] call[name[self]._toolbox.clone, parameter[name[individual]]] <ast.Tuple object at 0x7da20e954700> assign[=] call[name[mutator], parameter[name[ind]]] if compare[call[name[str], parameter[name[offspring]]] <ast.NotIn object at 0x7da2590d7190> name[self].evaluated_individuals_] begin[:] call[name[offspring].statistics][constant[crossover_count]] assign[=] call[name[individual].statistics][constant[crossover_count]] call[name[offspring].statistics][constant[mutation_count]] assign[=] binary_operation[call[name[individual].statistics][constant[mutation_count]] + constant[1]] call[name[offspring].statistics][constant[predecessor]] assign[=] tuple[[<ast.Call object at 0x7da20e956350>]] call[name[offspring].statistics][constant[generation]] assign[=] constant[INVALID] break if <ast.BoolOp object at 0x7da20e9557b0> begin[:] <ast.Tuple object at 0x7da20e954b80> assign[=] call[name[self]._random_mutation_operator, parameter[name[individual]]] return[tuple[[<ast.Name object at 0x7da20e955f00>]]]
keyword[def] identifier[_random_mutation_operator] ( identifier[self] , identifier[individual] , identifier[allow_shrink] = keyword[True] ): literal[string] keyword[if] identifier[self] . identifier[tree_structure] : identifier[mutation_techniques] =[ identifier[partial] ( identifier[gp] . identifier[mutInsert] , identifier[pset] = identifier[self] . identifier[_pset] ), identifier[partial] ( identifier[mutNodeReplacement] , identifier[pset] = identifier[self] . identifier[_pset] ) ] identifier[number_of_primitives] = identifier[sum] ([ identifier[isinstance] ( identifier[node] , identifier[deap] . identifier[gp] . identifier[Primitive] ) keyword[for] identifier[node] keyword[in] identifier[individual] ]) keyword[if] identifier[number_of_primitives] > literal[int] keyword[and] identifier[allow_shrink] : identifier[mutation_techniques] . identifier[append] ( identifier[partial] ( identifier[gp] . identifier[mutShrink] )) keyword[else] : identifier[mutation_techniques] =[ identifier[partial] ( identifier[mutNodeReplacement] , identifier[pset] = identifier[self] . identifier[_pset] )] identifier[mutator] = identifier[np] . identifier[random] . identifier[choice] ( identifier[mutation_techniques] ) identifier[unsuccesful_mutations] = literal[int] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[self] . identifier[_max_mut_loops] ): identifier[ind] = identifier[self] . identifier[_toolbox] . identifier[clone] ( identifier[individual] ) identifier[offspring] ,= identifier[mutator] ( identifier[ind] ) keyword[if] identifier[str] ( identifier[offspring] ) keyword[not] keyword[in] identifier[self] . identifier[evaluated_individuals_] : identifier[offspring] . identifier[statistics] [ literal[string] ]= identifier[individual] . identifier[statistics] [ literal[string] ] identifier[offspring] . identifier[statistics] [ literal[string] ]= identifier[individual] . identifier[statistics] [ literal[string] ]+ literal[int] identifier[offspring] . identifier[statistics] [ literal[string] ]=( identifier[str] ( identifier[individual] ),) identifier[offspring] . identifier[statistics] [ literal[string] ]= literal[string] keyword[break] keyword[else] : identifier[unsuccesful_mutations] += literal[int] keyword[if] (( identifier[unsuccesful_mutations] == literal[int] ) keyword[and] ( identifier[type] ( identifier[mutator] ) keyword[is] identifier[partial] keyword[and] identifier[mutator] . identifier[func] keyword[is] identifier[gp] . identifier[mutShrink] )): identifier[offspring] ,= identifier[self] . identifier[_random_mutation_operator] ( identifier[individual] , identifier[allow_shrink] = keyword[False] ) keyword[return] identifier[offspring] ,
def _random_mutation_operator(self, individual, allow_shrink=True): """Perform a replacement, insertion, or shrink mutation on an individual. Parameters ---------- individual: DEAP individual A list of pipeline operators and model parameters that can be compiled by DEAP into a callable function allow_shrink: bool (True) If True the `mutShrink` operator, which randomly shrinks the pipeline, is allowed to be chosen as one of the random mutation operators. If False, `mutShrink` will never be chosen as a mutation operator. Returns ------- mut_ind: DEAP individual Returns the individual with one of the mutations applied to it """ if self.tree_structure: mutation_techniques = [partial(gp.mutInsert, pset=self._pset), partial(mutNodeReplacement, pset=self._pset)] # We can't shrink pipelines with only one primitive, so we only add it if we find more primitives. number_of_primitives = sum([isinstance(node, deap.gp.Primitive) for node in individual]) if number_of_primitives > 1 and allow_shrink: mutation_techniques.append(partial(gp.mutShrink)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: mutation_techniques = [partial(mutNodeReplacement, pset=self._pset)] mutator = np.random.choice(mutation_techniques) unsuccesful_mutations = 0 for _ in range(self._max_mut_loops): # We have to clone the individual because mutator operators work in-place. ind = self._toolbox.clone(individual) (offspring,) = mutator(ind) if str(offspring) not in self.evaluated_individuals_: # Update statistics # crossover_count is kept the same as for the predecessor # mutation count is increased by 1 # predecessor is set to the string representation of the individual before mutation # generation is set to 'INVALID' such that we can recognize that it should be updated accordingly offspring.statistics['crossover_count'] = individual.statistics['crossover_count'] offspring.statistics['mutation_count'] = individual.statistics['mutation_count'] + 1 offspring.statistics['predecessor'] = (str(individual),) offspring.statistics['generation'] = 'INVALID' break # depends on [control=['if'], data=[]] else: unsuccesful_mutations += 1 # depends on [control=['for'], data=[]] # Sometimes you have pipelines for which every shrunk version has already been explored too. # To still mutate the individual, one of the two other mutators should be applied instead. if unsuccesful_mutations == 50 and (type(mutator) is partial and mutator.func is gp.mutShrink): (offspring,) = self._random_mutation_operator(individual, allow_shrink=False) # depends on [control=['if'], data=[]] return (offspring,)
def _ctypes_indices(parameter): """Returns code for parameter variable declarations specifying the size of each dimension in the specified parameter. """ if (parameter.dimension is not None and ":" in parameter.dimension): splice = _ctypes_splice(parameter) if "out" in parameter.direction: #Even for pure out variables, the ctypes pointer is passed in and will #have a value already. return ("integer, intent(inout) :: {}".format(splice), False) else: return ("integer, intent(in) :: {}".format(splice), False)
def function[_ctypes_indices, parameter[parameter]]: constant[Returns code for parameter variable declarations specifying the size of each dimension in the specified parameter. ] if <ast.BoolOp object at 0x7da20e9572e0> begin[:] variable[splice] assign[=] call[name[_ctypes_splice], parameter[name[parameter]]] if compare[constant[out] in name[parameter].direction] begin[:] return[tuple[[<ast.Call object at 0x7da20e954ee0>, <ast.Constant object at 0x7da20e956050>]]]
keyword[def] identifier[_ctypes_indices] ( identifier[parameter] ): literal[string] keyword[if] ( identifier[parameter] . identifier[dimension] keyword[is] keyword[not] keyword[None] keyword[and] literal[string] keyword[in] identifier[parameter] . identifier[dimension] ): identifier[splice] = identifier[_ctypes_splice] ( identifier[parameter] ) keyword[if] literal[string] keyword[in] identifier[parameter] . identifier[direction] : keyword[return] ( literal[string] . identifier[format] ( identifier[splice] ), keyword[False] ) keyword[else] : keyword[return] ( literal[string] . identifier[format] ( identifier[splice] ), keyword[False] )
def _ctypes_indices(parameter): """Returns code for parameter variable declarations specifying the size of each dimension in the specified parameter. """ if parameter.dimension is not None and ':' in parameter.dimension: splice = _ctypes_splice(parameter) if 'out' in parameter.direction: #Even for pure out variables, the ctypes pointer is passed in and will #have a value already. return ('integer, intent(inout) :: {}'.format(splice), False) # depends on [control=['if'], data=[]] else: return ('integer, intent(in) :: {}'.format(splice), False) # depends on [control=['if'], data=[]]
def to_abivars(self): """Returns a dictionary with the abinit variables.""" abivars = dict( bs_calctype=1, bs_loband=self.bs_loband, #nband=self.nband, mbpt_sciss=self.mbpt_sciss, ecuteps=self.ecuteps, bs_algorithm=self._ALGO2VAR[self.algo], bs_coulomb_term=21, mdf_epsinf=self.mdf_epsinf, bs_exchange_term=1 if self.with_lf else 0, inclvkb=self.inclvkb, zcut=self.zcut, bs_freq_mesh=self.bs_freq_mesh, bs_coupling=self._EXC_TYPES[self.exc_type], optdriver=self.optdriver, ) if self.use_haydock: # FIXME abivars.update( bs_haydock_niter=100, # No. of iterations for Haydock bs_hayd_term=0, # No terminator bs_haydock_tol=[0.05, 0], # Stopping criteria ) elif self.use_direct_diago: raise NotImplementedError("") elif self.use_cg: raise NotImplementedError("") else: raise ValueError("Unknown algorithm for EXC: %s" % self.algo) # Add extra kwargs abivars.update(self.kwargs) return abivars
def function[to_abivars, parameter[self]]: constant[Returns a dictionary with the abinit variables.] variable[abivars] assign[=] call[name[dict], parameter[]] if name[self].use_haydock begin[:] call[name[abivars].update, parameter[]] call[name[abivars].update, parameter[name[self].kwargs]] return[name[abivars]]
keyword[def] identifier[to_abivars] ( identifier[self] ): literal[string] identifier[abivars] = identifier[dict] ( identifier[bs_calctype] = literal[int] , identifier[bs_loband] = identifier[self] . identifier[bs_loband] , identifier[mbpt_sciss] = identifier[self] . identifier[mbpt_sciss] , identifier[ecuteps] = identifier[self] . identifier[ecuteps] , identifier[bs_algorithm] = identifier[self] . identifier[_ALGO2VAR] [ identifier[self] . identifier[algo] ], identifier[bs_coulomb_term] = literal[int] , identifier[mdf_epsinf] = identifier[self] . identifier[mdf_epsinf] , identifier[bs_exchange_term] = literal[int] keyword[if] identifier[self] . identifier[with_lf] keyword[else] literal[int] , identifier[inclvkb] = identifier[self] . identifier[inclvkb] , identifier[zcut] = identifier[self] . identifier[zcut] , identifier[bs_freq_mesh] = identifier[self] . identifier[bs_freq_mesh] , identifier[bs_coupling] = identifier[self] . identifier[_EXC_TYPES] [ identifier[self] . identifier[exc_type] ], identifier[optdriver] = identifier[self] . identifier[optdriver] , ) keyword[if] identifier[self] . identifier[use_haydock] : identifier[abivars] . identifier[update] ( identifier[bs_haydock_niter] = literal[int] , identifier[bs_hayd_term] = literal[int] , identifier[bs_haydock_tol] =[ literal[int] , literal[int] ], ) keyword[elif] identifier[self] . identifier[use_direct_diago] : keyword[raise] identifier[NotImplementedError] ( literal[string] ) keyword[elif] identifier[self] . identifier[use_cg] : keyword[raise] identifier[NotImplementedError] ( literal[string] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[self] . identifier[algo] ) identifier[abivars] . identifier[update] ( identifier[self] . identifier[kwargs] ) keyword[return] identifier[abivars]
def to_abivars(self): """Returns a dictionary with the abinit variables.""" #nband=self.nband, abivars = dict(bs_calctype=1, bs_loband=self.bs_loband, mbpt_sciss=self.mbpt_sciss, ecuteps=self.ecuteps, bs_algorithm=self._ALGO2VAR[self.algo], bs_coulomb_term=21, mdf_epsinf=self.mdf_epsinf, bs_exchange_term=1 if self.with_lf else 0, inclvkb=self.inclvkb, zcut=self.zcut, bs_freq_mesh=self.bs_freq_mesh, bs_coupling=self._EXC_TYPES[self.exc_type], optdriver=self.optdriver) if self.use_haydock: # FIXME # No. of iterations for Haydock # No terminator # Stopping criteria abivars.update(bs_haydock_niter=100, bs_hayd_term=0, bs_haydock_tol=[0.05, 0]) # depends on [control=['if'], data=[]] elif self.use_direct_diago: raise NotImplementedError('') # depends on [control=['if'], data=[]] elif self.use_cg: raise NotImplementedError('') # depends on [control=['if'], data=[]] else: raise ValueError('Unknown algorithm for EXC: %s' % self.algo) # Add extra kwargs abivars.update(self.kwargs) return abivars
def iterate_nodes(self, string_key, distinct=True): """Given a string key it returns the nodes as a generator that can hold the key. The generator iterates one time through the ring starting at the correct position. if `distinct` is set, then the nodes returned will be unique, i.e. no virtual copies will be returned. """ if not self.ring: yield None, None returned_values = set() def distinct_filter(value): if str(value) not in returned_values: returned_values.add(str(value)) return value pos = self.get_node_pos(string_key) for key in self._sorted_keys[pos:]: val = distinct_filter(self.ring[key]) if val: yield val for i, key in enumerate(self._sorted_keys): if i < pos: val = distinct_filter(self.ring[key]) if val: yield val
def function[iterate_nodes, parameter[self, string_key, distinct]]: constant[Given a string key it returns the nodes as a generator that can hold the key. The generator iterates one time through the ring starting at the correct position. if `distinct` is set, then the nodes returned will be unique, i.e. no virtual copies will be returned. ] if <ast.UnaryOp object at 0x7da20e9b1060> begin[:] <ast.Yield object at 0x7da20e9b2a10> variable[returned_values] assign[=] call[name[set], parameter[]] def function[distinct_filter, parameter[value]]: if compare[call[name[str], parameter[name[value]]] <ast.NotIn object at 0x7da2590d7190> name[returned_values]] begin[:] call[name[returned_values].add, parameter[call[name[str], parameter[name[value]]]]] return[name[value]] variable[pos] assign[=] call[name[self].get_node_pos, parameter[name[string_key]]] for taget[name[key]] in starred[call[name[self]._sorted_keys][<ast.Slice object at 0x7da20e9b2b30>]] begin[:] variable[val] assign[=] call[name[distinct_filter], parameter[call[name[self].ring][name[key]]]] if name[val] begin[:] <ast.Yield object at 0x7da20e9b32e0> for taget[tuple[[<ast.Name object at 0x7da20e9b0ac0>, <ast.Name object at 0x7da20e9b2ce0>]]] in starred[call[name[enumerate], parameter[name[self]._sorted_keys]]] begin[:] if compare[name[i] less[<] name[pos]] begin[:] variable[val] assign[=] call[name[distinct_filter], parameter[call[name[self].ring][name[key]]]] if name[val] begin[:] <ast.Yield object at 0x7da20e9b35b0>
keyword[def] identifier[iterate_nodes] ( identifier[self] , identifier[string_key] , identifier[distinct] = keyword[True] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[ring] : keyword[yield] keyword[None] , keyword[None] identifier[returned_values] = identifier[set] () keyword[def] identifier[distinct_filter] ( identifier[value] ): keyword[if] identifier[str] ( identifier[value] ) keyword[not] keyword[in] identifier[returned_values] : identifier[returned_values] . identifier[add] ( identifier[str] ( identifier[value] )) keyword[return] identifier[value] identifier[pos] = identifier[self] . identifier[get_node_pos] ( identifier[string_key] ) keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_sorted_keys] [ identifier[pos] :]: identifier[val] = identifier[distinct_filter] ( identifier[self] . identifier[ring] [ identifier[key] ]) keyword[if] identifier[val] : keyword[yield] identifier[val] keyword[for] identifier[i] , identifier[key] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_sorted_keys] ): keyword[if] identifier[i] < identifier[pos] : identifier[val] = identifier[distinct_filter] ( identifier[self] . identifier[ring] [ identifier[key] ]) keyword[if] identifier[val] : keyword[yield] identifier[val]
def iterate_nodes(self, string_key, distinct=True): """Given a string key it returns the nodes as a generator that can hold the key. The generator iterates one time through the ring starting at the correct position. if `distinct` is set, then the nodes returned will be unique, i.e. no virtual copies will be returned. """ if not self.ring: yield (None, None) # depends on [control=['if'], data=[]] returned_values = set() def distinct_filter(value): if str(value) not in returned_values: returned_values.add(str(value)) return value # depends on [control=['if'], data=['returned_values']] pos = self.get_node_pos(string_key) for key in self._sorted_keys[pos:]: val = distinct_filter(self.ring[key]) if val: yield val # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] for (i, key) in enumerate(self._sorted_keys): if i < pos: val = distinct_filter(self.ring[key]) if val: yield val # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def prep_system(run_info_yaml, bcbio_system=None): """Prepare system configuration information from an input configuration file. This does the work of parsing the system input file and setting up directories for use in 'organize'. """ work_dir = os.getcwd() config, config_file = config_utils.load_system_config(bcbio_system, work_dir) dirs = setup_directories(work_dir, os.path.normpath(os.path.dirname(os.path.dirname(run_info_yaml))), config, config_file) return [dirs, config, run_info_yaml]
def function[prep_system, parameter[run_info_yaml, bcbio_system]]: constant[Prepare system configuration information from an input configuration file. This does the work of parsing the system input file and setting up directories for use in 'organize'. ] variable[work_dir] assign[=] call[name[os].getcwd, parameter[]] <ast.Tuple object at 0x7da1b17b4340> assign[=] call[name[config_utils].load_system_config, parameter[name[bcbio_system], name[work_dir]]] variable[dirs] assign[=] call[name[setup_directories], parameter[name[work_dir], call[name[os].path.normpath, parameter[call[name[os].path.dirname, parameter[call[name[os].path.dirname, parameter[name[run_info_yaml]]]]]]], name[config], name[config_file]]] return[list[[<ast.Name object at 0x7da1b17b45e0>, <ast.Name object at 0x7da1b17b4610>, <ast.Name object at 0x7da1b17b4640>]]]
keyword[def] identifier[prep_system] ( identifier[run_info_yaml] , identifier[bcbio_system] = keyword[None] ): literal[string] identifier[work_dir] = identifier[os] . identifier[getcwd] () identifier[config] , identifier[config_file] = identifier[config_utils] . identifier[load_system_config] ( identifier[bcbio_system] , identifier[work_dir] ) identifier[dirs] = identifier[setup_directories] ( identifier[work_dir] , identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[run_info_yaml] ))), identifier[config] , identifier[config_file] ) keyword[return] [ identifier[dirs] , identifier[config] , identifier[run_info_yaml] ]
def prep_system(run_info_yaml, bcbio_system=None): """Prepare system configuration information from an input configuration file. This does the work of parsing the system input file and setting up directories for use in 'organize'. """ work_dir = os.getcwd() (config, config_file) = config_utils.load_system_config(bcbio_system, work_dir) dirs = setup_directories(work_dir, os.path.normpath(os.path.dirname(os.path.dirname(run_info_yaml))), config, config_file) return [dirs, config, run_info_yaml]
def normalize(self): """ Convert times to midnight. The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. This method is available on Series with datetime values under the ``.dt`` accessor, and directly on Datetime Array/Index. Returns ------- DatetimeArray, DatetimeIndex or Series The same type as the original data. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- floor : Floor the datetimes to the specified freq. ceil : Ceil the datetimes to the specified freq. round : Round the datetimes to the specified freq. Examples -------- >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', ... periods=3, tz='Asia/Calcutta') >>> idx DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', '2014-08-01 12:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq='H') >>> idx.normalize() DatetimeIndex(['2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) """ if self.tz is None or timezones.is_utc(self.tz): not_null = ~self.isna() DAY_NS = ccalendar.DAY_SECONDS * 1000000000 new_values = self.asi8.copy() adjustment = (new_values[not_null] % DAY_NS) new_values[not_null] = new_values[not_null] - adjustment else: new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) return type(self)._from_sequence(new_values, freq='infer').tz_localize(self.tz)
def function[normalize, parameter[self]]: constant[ Convert times to midnight. The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. This method is available on Series with datetime values under the ``.dt`` accessor, and directly on Datetime Array/Index. Returns ------- DatetimeArray, DatetimeIndex or Series The same type as the original data. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- floor : Floor the datetimes to the specified freq. ceil : Ceil the datetimes to the specified freq. round : Round the datetimes to the specified freq. Examples -------- >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', ... periods=3, tz='Asia/Calcutta') >>> idx DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', '2014-08-01 12:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq='H') >>> idx.normalize() DatetimeIndex(['2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) ] if <ast.BoolOp object at 0x7da18ede4a30> begin[:] variable[not_null] assign[=] <ast.UnaryOp object at 0x7da18ede4640> variable[DAY_NS] assign[=] binary_operation[name[ccalendar].DAY_SECONDS * constant[1000000000]] variable[new_values] assign[=] call[name[self].asi8.copy, parameter[]] variable[adjustment] assign[=] binary_operation[call[name[new_values]][name[not_null]] <ast.Mod object at 0x7da2590d6920> name[DAY_NS]] call[name[new_values]][name[not_null]] assign[=] binary_operation[call[name[new_values]][name[not_null]] - name[adjustment]] return[call[call[call[name[type], parameter[name[self]]]._from_sequence, parameter[name[new_values]]].tz_localize, parameter[name[self].tz]]]
keyword[def] identifier[normalize] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[tz] keyword[is] keyword[None] keyword[or] identifier[timezones] . identifier[is_utc] ( identifier[self] . identifier[tz] ): identifier[not_null] =~ identifier[self] . identifier[isna] () identifier[DAY_NS] = identifier[ccalendar] . identifier[DAY_SECONDS] * literal[int] identifier[new_values] = identifier[self] . identifier[asi8] . identifier[copy] () identifier[adjustment] =( identifier[new_values] [ identifier[not_null] ]% identifier[DAY_NS] ) identifier[new_values] [ identifier[not_null] ]= identifier[new_values] [ identifier[not_null] ]- identifier[adjustment] keyword[else] : identifier[new_values] = identifier[conversion] . identifier[normalize_i8_timestamps] ( identifier[self] . identifier[asi8] , identifier[self] . identifier[tz] ) keyword[return] identifier[type] ( identifier[self] ). identifier[_from_sequence] ( identifier[new_values] , identifier[freq] = literal[string] ). identifier[tz_localize] ( identifier[self] . identifier[tz] )
def normalize(self): """ Convert times to midnight. The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. This method is available on Series with datetime values under the ``.dt`` accessor, and directly on Datetime Array/Index. Returns ------- DatetimeArray, DatetimeIndex or Series The same type as the original data. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- floor : Floor the datetimes to the specified freq. ceil : Ceil the datetimes to the specified freq. round : Round the datetimes to the specified freq. Examples -------- >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', ... periods=3, tz='Asia/Calcutta') >>> idx DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', '2014-08-01 12:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq='H') >>> idx.normalize() DatetimeIndex(['2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) """ if self.tz is None or timezones.is_utc(self.tz): not_null = ~self.isna() DAY_NS = ccalendar.DAY_SECONDS * 1000000000 new_values = self.asi8.copy() adjustment = new_values[not_null] % DAY_NS new_values[not_null] = new_values[not_null] - adjustment # depends on [control=['if'], data=[]] else: new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) return type(self)._from_sequence(new_values, freq='infer').tz_localize(self.tz)
def env(self): """ Dict of all environment variables that will be run with this command. """ env_vars = os.environ.copy() env_vars.update(self._env) new_path = ":".join( self._paths + [env_vars["PATH"]] if "PATH" in env_vars else [] + self._paths ) env_vars["PATH"] = new_path for env_var in self._env_drop: if env_var in env_vars: del env_vars[env_var] return env_vars
def function[env, parameter[self]]: constant[ Dict of all environment variables that will be run with this command. ] variable[env_vars] assign[=] call[name[os].environ.copy, parameter[]] call[name[env_vars].update, parameter[name[self]._env]] variable[new_path] assign[=] call[constant[:].join, parameter[<ast.IfExp object at 0x7da18f00c700>]] call[name[env_vars]][constant[PATH]] assign[=] name[new_path] for taget[name[env_var]] in starred[name[self]._env_drop] begin[:] if compare[name[env_var] in name[env_vars]] begin[:] <ast.Delete object at 0x7da20e957b50> return[name[env_vars]]
keyword[def] identifier[env] ( identifier[self] ): literal[string] identifier[env_vars] = identifier[os] . identifier[environ] . identifier[copy] () identifier[env_vars] . identifier[update] ( identifier[self] . identifier[_env] ) identifier[new_path] = literal[string] . identifier[join] ( identifier[self] . identifier[_paths] +[ identifier[env_vars] [ literal[string] ]] keyword[if] literal[string] keyword[in] identifier[env_vars] keyword[else] []+ identifier[self] . identifier[_paths] ) identifier[env_vars] [ literal[string] ]= identifier[new_path] keyword[for] identifier[env_var] keyword[in] identifier[self] . identifier[_env_drop] : keyword[if] identifier[env_var] keyword[in] identifier[env_vars] : keyword[del] identifier[env_vars] [ identifier[env_var] ] keyword[return] identifier[env_vars]
def env(self): """ Dict of all environment variables that will be run with this command. """ env_vars = os.environ.copy() env_vars.update(self._env) new_path = ':'.join(self._paths + [env_vars['PATH']] if 'PATH' in env_vars else [] + self._paths) env_vars['PATH'] = new_path for env_var in self._env_drop: if env_var in env_vars: del env_vars[env_var] # depends on [control=['if'], data=['env_var', 'env_vars']] # depends on [control=['for'], data=['env_var']] return env_vars
def get_stats(self): ''' Get kitty stats as a dictionary ''' resp = requests.get('%s/api/stats.json' % self.url) assert(resp.status_code == 200) return resp.json()
def function[get_stats, parameter[self]]: constant[ Get kitty stats as a dictionary ] variable[resp] assign[=] call[name[requests].get, parameter[binary_operation[constant[%s/api/stats.json] <ast.Mod object at 0x7da2590d6920> name[self].url]]] assert[compare[name[resp].status_code equal[==] constant[200]]] return[call[name[resp].json, parameter[]]]
keyword[def] identifier[get_stats] ( identifier[self] ): literal[string] identifier[resp] = identifier[requests] . identifier[get] ( literal[string] % identifier[self] . identifier[url] ) keyword[assert] ( identifier[resp] . identifier[status_code] == literal[int] ) keyword[return] identifier[resp] . identifier[json] ()
def get_stats(self): """ Get kitty stats as a dictionary """ resp = requests.get('%s/api/stats.json' % self.url) assert resp.status_code == 200 return resp.json()
def obj_to_str(self, file_path=None, deliminator=None, tab=None, quote_numbers=True, quote_empty_str=False): """ This will return a simple str table. :param file_path: str of the path to the file :param keys: list of str of the order of keys to use :param tab: string of offset of the table :param quote_numbers: bool if True will quote numbers that are strings :param quote_empty_str: bool if True will quote empty strings :return: str of the converted markdown tables """ deliminator = self.deliminator if deliminator is None \ else deliminator tab = self.tab if tab is None else tab list_of_list, column_widths = self.get_data_and_shared_column_widths( data_kwargs=dict(quote_numbers=quote_numbers, quote_empty_str=quote_empty_str), width_kwargs = dict(padding=0)) ret = [[cell.ljust(column_widths[i]) for i, cell in enumerate(row)] for row in list_of_list] ret = [deliminator.join(row) for row in ret] ret = tab + (u'\n' + tab).join(ret) self._save_file(file_path, ret) return ret
def function[obj_to_str, parameter[self, file_path, deliminator, tab, quote_numbers, quote_empty_str]]: constant[ This will return a simple str table. :param file_path: str of the path to the file :param keys: list of str of the order of keys to use :param tab: string of offset of the table :param quote_numbers: bool if True will quote numbers that are strings :param quote_empty_str: bool if True will quote empty strings :return: str of the converted markdown tables ] variable[deliminator] assign[=] <ast.IfExp object at 0x7da20c7ca080> variable[tab] assign[=] <ast.IfExp object at 0x7da20c7cbd90> <ast.Tuple object at 0x7da20c7c8bb0> assign[=] call[name[self].get_data_and_shared_column_widths, parameter[]] variable[ret] assign[=] <ast.ListComp object at 0x7da20c7cbf10> variable[ret] assign[=] <ast.ListComp object at 0x7da20c7c81f0> variable[ret] assign[=] binary_operation[name[tab] + call[binary_operation[constant[ ] + name[tab]].join, parameter[name[ret]]]] call[name[self]._save_file, parameter[name[file_path], name[ret]]] return[name[ret]]
keyword[def] identifier[obj_to_str] ( identifier[self] , identifier[file_path] = keyword[None] , identifier[deliminator] = keyword[None] , identifier[tab] = keyword[None] , identifier[quote_numbers] = keyword[True] , identifier[quote_empty_str] = keyword[False] ): literal[string] identifier[deliminator] = identifier[self] . identifier[deliminator] keyword[if] identifier[deliminator] keyword[is] keyword[None] keyword[else] identifier[deliminator] identifier[tab] = identifier[self] . identifier[tab] keyword[if] identifier[tab] keyword[is] keyword[None] keyword[else] identifier[tab] identifier[list_of_list] , identifier[column_widths] = identifier[self] . identifier[get_data_and_shared_column_widths] ( identifier[data_kwargs] = identifier[dict] ( identifier[quote_numbers] = identifier[quote_numbers] , identifier[quote_empty_str] = identifier[quote_empty_str] ), identifier[width_kwargs] = identifier[dict] ( identifier[padding] = literal[int] )) identifier[ret] =[[ identifier[cell] . identifier[ljust] ( identifier[column_widths] [ identifier[i] ]) keyword[for] identifier[i] , identifier[cell] keyword[in] identifier[enumerate] ( identifier[row] )] keyword[for] identifier[row] keyword[in] identifier[list_of_list] ] identifier[ret] =[ identifier[deliminator] . identifier[join] ( identifier[row] ) keyword[for] identifier[row] keyword[in] identifier[ret] ] identifier[ret] = identifier[tab] +( literal[string] + identifier[tab] ). identifier[join] ( identifier[ret] ) identifier[self] . identifier[_save_file] ( identifier[file_path] , identifier[ret] ) keyword[return] identifier[ret]
def obj_to_str(self, file_path=None, deliminator=None, tab=None, quote_numbers=True, quote_empty_str=False): """ This will return a simple str table. :param file_path: str of the path to the file :param keys: list of str of the order of keys to use :param tab: string of offset of the table :param quote_numbers: bool if True will quote numbers that are strings :param quote_empty_str: bool if True will quote empty strings :return: str of the converted markdown tables """ deliminator = self.deliminator if deliminator is None else deliminator tab = self.tab if tab is None else tab (list_of_list, column_widths) = self.get_data_and_shared_column_widths(data_kwargs=dict(quote_numbers=quote_numbers, quote_empty_str=quote_empty_str), width_kwargs=dict(padding=0)) ret = [[cell.ljust(column_widths[i]) for (i, cell) in enumerate(row)] for row in list_of_list] ret = [deliminator.join(row) for row in ret] ret = tab + (u'\n' + tab).join(ret) self._save_file(file_path, ret) return ret
def set_api_from_config_path(self): """Set the RPC mode based on a given config file.""" config = ConfigParser(allow_no_value=False) config.optionxform = str config.read(self.config_path, "utf-8") if config.has_option("defaults", "dynamic_loading"): dynamic_loading = config.get("defaults", "dynamic_loading") else: dynamic_loading = "infura" if dynamic_loading == "infura": self.set_api_rpc_infura() elif dynamic_loading == "localhost": self.set_api_rpc_localhost() else: self.set_api_rpc(dynamic_loading)
def function[set_api_from_config_path, parameter[self]]: constant[Set the RPC mode based on a given config file.] variable[config] assign[=] call[name[ConfigParser], parameter[]] name[config].optionxform assign[=] name[str] call[name[config].read, parameter[name[self].config_path, constant[utf-8]]] if call[name[config].has_option, parameter[constant[defaults], constant[dynamic_loading]]] begin[:] variable[dynamic_loading] assign[=] call[name[config].get, parameter[constant[defaults], constant[dynamic_loading]]] if compare[name[dynamic_loading] equal[==] constant[infura]] begin[:] call[name[self].set_api_rpc_infura, parameter[]]
keyword[def] identifier[set_api_from_config_path] ( identifier[self] ): literal[string] identifier[config] = identifier[ConfigParser] ( identifier[allow_no_value] = keyword[False] ) identifier[config] . identifier[optionxform] = identifier[str] identifier[config] . identifier[read] ( identifier[self] . identifier[config_path] , literal[string] ) keyword[if] identifier[config] . identifier[has_option] ( literal[string] , literal[string] ): identifier[dynamic_loading] = identifier[config] . identifier[get] ( literal[string] , literal[string] ) keyword[else] : identifier[dynamic_loading] = literal[string] keyword[if] identifier[dynamic_loading] == literal[string] : identifier[self] . identifier[set_api_rpc_infura] () keyword[elif] identifier[dynamic_loading] == literal[string] : identifier[self] . identifier[set_api_rpc_localhost] () keyword[else] : identifier[self] . identifier[set_api_rpc] ( identifier[dynamic_loading] )
def set_api_from_config_path(self): """Set the RPC mode based on a given config file.""" config = ConfigParser(allow_no_value=False) config.optionxform = str config.read(self.config_path, 'utf-8') if config.has_option('defaults', 'dynamic_loading'): dynamic_loading = config.get('defaults', 'dynamic_loading') # depends on [control=['if'], data=[]] else: dynamic_loading = 'infura' if dynamic_loading == 'infura': self.set_api_rpc_infura() # depends on [control=['if'], data=[]] elif dynamic_loading == 'localhost': self.set_api_rpc_localhost() # depends on [control=['if'], data=[]] else: self.set_api_rpc(dynamic_loading)
def split(self, delimiter=None): """Same as string.split(), but retains literal/expandable structure. Returns: List of `EscapedString`. """ result = [] strings = self.strings[:] current = None while strings: is_literal, value = strings[0] parts = value.split(delimiter, 1) if len(parts) > 1: value1, value2 = parts strings[0] = (is_literal, value2) out = EscapedString(value1, is_literal) push = True else: strings = strings[1:] out = EscapedString(value, is_literal) push = False if current is None: current = out else: current = current + out if push: result.append(current) current = None if current: result.append(current) return result
def function[split, parameter[self, delimiter]]: constant[Same as string.split(), but retains literal/expandable structure. Returns: List of `EscapedString`. ] variable[result] assign[=] list[[]] variable[strings] assign[=] call[name[self].strings][<ast.Slice object at 0x7da207f9a830>] variable[current] assign[=] constant[None] while name[strings] begin[:] <ast.Tuple object at 0x7da207f9b8b0> assign[=] call[name[strings]][constant[0]] variable[parts] assign[=] call[name[value].split, parameter[name[delimiter], constant[1]]] if compare[call[name[len], parameter[name[parts]]] greater[>] constant[1]] begin[:] <ast.Tuple object at 0x7da207f98e20> assign[=] name[parts] call[name[strings]][constant[0]] assign[=] tuple[[<ast.Name object at 0x7da207f99de0>, <ast.Name object at 0x7da207f9bb20>]] variable[out] assign[=] call[name[EscapedString], parameter[name[value1], name[is_literal]]] variable[push] assign[=] constant[True] if compare[name[current] is constant[None]] begin[:] variable[current] assign[=] name[out] if name[push] begin[:] call[name[result].append, parameter[name[current]]] variable[current] assign[=] constant[None] if name[current] begin[:] call[name[result].append, parameter[name[current]]] return[name[result]]
keyword[def] identifier[split] ( identifier[self] , identifier[delimiter] = keyword[None] ): literal[string] identifier[result] =[] identifier[strings] = identifier[self] . identifier[strings] [:] identifier[current] = keyword[None] keyword[while] identifier[strings] : identifier[is_literal] , identifier[value] = identifier[strings] [ literal[int] ] identifier[parts] = identifier[value] . identifier[split] ( identifier[delimiter] , literal[int] ) keyword[if] identifier[len] ( identifier[parts] )> literal[int] : identifier[value1] , identifier[value2] = identifier[parts] identifier[strings] [ literal[int] ]=( identifier[is_literal] , identifier[value2] ) identifier[out] = identifier[EscapedString] ( identifier[value1] , identifier[is_literal] ) identifier[push] = keyword[True] keyword[else] : identifier[strings] = identifier[strings] [ literal[int] :] identifier[out] = identifier[EscapedString] ( identifier[value] , identifier[is_literal] ) identifier[push] = keyword[False] keyword[if] identifier[current] keyword[is] keyword[None] : identifier[current] = identifier[out] keyword[else] : identifier[current] = identifier[current] + identifier[out] keyword[if] identifier[push] : identifier[result] . identifier[append] ( identifier[current] ) identifier[current] = keyword[None] keyword[if] identifier[current] : identifier[result] . identifier[append] ( identifier[current] ) keyword[return] identifier[result]
def split(self, delimiter=None): """Same as string.split(), but retains literal/expandable structure. Returns: List of `EscapedString`. """ result = [] strings = self.strings[:] current = None while strings: (is_literal, value) = strings[0] parts = value.split(delimiter, 1) if len(parts) > 1: (value1, value2) = parts strings[0] = (is_literal, value2) out = EscapedString(value1, is_literal) push = True # depends on [control=['if'], data=[]] else: strings = strings[1:] out = EscapedString(value, is_literal) push = False if current is None: current = out # depends on [control=['if'], data=['current']] else: current = current + out if push: result.append(current) current = None # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] if current: result.append(current) # depends on [control=['if'], data=[]] return result
def _del_controller(self, uid): """ Remove controller from internal list and tell the game. :param uid: Unique id of the controller :type uid: str """ try: self.controllers.pop(uid) e = Event(uid, E_DISCONNECT) self.queue.put_nowait(e) except KeyError: # There is no such controller, ignore the command pass
def function[_del_controller, parameter[self, uid]]: constant[ Remove controller from internal list and tell the game. :param uid: Unique id of the controller :type uid: str ] <ast.Try object at 0x7da18bc705b0>
keyword[def] identifier[_del_controller] ( identifier[self] , identifier[uid] ): literal[string] keyword[try] : identifier[self] . identifier[controllers] . identifier[pop] ( identifier[uid] ) identifier[e] = identifier[Event] ( identifier[uid] , identifier[E_DISCONNECT] ) identifier[self] . identifier[queue] . identifier[put_nowait] ( identifier[e] ) keyword[except] identifier[KeyError] : keyword[pass]
def _del_controller(self, uid): """ Remove controller from internal list and tell the game. :param uid: Unique id of the controller :type uid: str """ try: self.controllers.pop(uid) e = Event(uid, E_DISCONNECT) self.queue.put_nowait(e) # depends on [control=['try'], data=[]] except KeyError: # There is no such controller, ignore the command pass # depends on [control=['except'], data=[]]
def get_scaffold_objective_ids(self): """Assumes that a scaffold objective id is available""" section = self._assessment_section item_id = self.get_my_item_id_from_section(section) return section.get_confused_learning_objective_ids(item_id)
def function[get_scaffold_objective_ids, parameter[self]]: constant[Assumes that a scaffold objective id is available] variable[section] assign[=] name[self]._assessment_section variable[item_id] assign[=] call[name[self].get_my_item_id_from_section, parameter[name[section]]] return[call[name[section].get_confused_learning_objective_ids, parameter[name[item_id]]]]
keyword[def] identifier[get_scaffold_objective_ids] ( identifier[self] ): literal[string] identifier[section] = identifier[self] . identifier[_assessment_section] identifier[item_id] = identifier[self] . identifier[get_my_item_id_from_section] ( identifier[section] ) keyword[return] identifier[section] . identifier[get_confused_learning_objective_ids] ( identifier[item_id] )
def get_scaffold_objective_ids(self): """Assumes that a scaffold objective id is available""" section = self._assessment_section item_id = self.get_my_item_id_from_section(section) return section.get_confused_learning_objective_ids(item_id)
def create(self): """ Creates the directory and all its parent directories if it does not exist yet """ if self.dirname and not os.path.exists(self.dirname): os.makedirs(self.dirname)
def function[create, parameter[self]]: constant[ Creates the directory and all its parent directories if it does not exist yet ] if <ast.BoolOp object at 0x7da18f721d50> begin[:] call[name[os].makedirs, parameter[name[self].dirname]]
keyword[def] identifier[create] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[dirname] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[dirname] ): identifier[os] . identifier[makedirs] ( identifier[self] . identifier[dirname] )
def create(self): """ Creates the directory and all its parent directories if it does not exist yet """ if self.dirname and (not os.path.exists(self.dirname)): os.makedirs(self.dirname) # depends on [control=['if'], data=[]]
def _render_dataframe(dataframe): """Helper to render a dataframe as an HTML table.""" data = dataframe.to_dict(orient='records') fields = dataframe.columns.tolist() return IPython.core.display.HTML( datalab.utils.commands.HtmlBuilder.render_table(data, fields))
def function[_render_dataframe, parameter[dataframe]]: constant[Helper to render a dataframe as an HTML table.] variable[data] assign[=] call[name[dataframe].to_dict, parameter[]] variable[fields] assign[=] call[name[dataframe].columns.tolist, parameter[]] return[call[name[IPython].core.display.HTML, parameter[call[name[datalab].utils.commands.HtmlBuilder.render_table, parameter[name[data], name[fields]]]]]]
keyword[def] identifier[_render_dataframe] ( identifier[dataframe] ): literal[string] identifier[data] = identifier[dataframe] . identifier[to_dict] ( identifier[orient] = literal[string] ) identifier[fields] = identifier[dataframe] . identifier[columns] . identifier[tolist] () keyword[return] identifier[IPython] . identifier[core] . identifier[display] . identifier[HTML] ( identifier[datalab] . identifier[utils] . identifier[commands] . identifier[HtmlBuilder] . identifier[render_table] ( identifier[data] , identifier[fields] ))
def _render_dataframe(dataframe): """Helper to render a dataframe as an HTML table.""" data = dataframe.to_dict(orient='records') fields = dataframe.columns.tolist() return IPython.core.display.HTML(datalab.utils.commands.HtmlBuilder.render_table(data, fields))
def _linux_stp(br, state): ''' Internal, sets STP state ''' brctl = _tool_path('brctl') return __salt__['cmd.run']('{0} stp {1} {2}'.format(brctl, br, state), python_shell=False)
def function[_linux_stp, parameter[br, state]]: constant[ Internal, sets STP state ] variable[brctl] assign[=] call[name[_tool_path], parameter[constant[brctl]]] return[call[call[name[__salt__]][constant[cmd.run]], parameter[call[constant[{0} stp {1} {2}].format, parameter[name[brctl], name[br], name[state]]]]]]
keyword[def] identifier[_linux_stp] ( identifier[br] , identifier[state] ): literal[string] identifier[brctl] = identifier[_tool_path] ( literal[string] ) keyword[return] identifier[__salt__] [ literal[string] ]( literal[string] . identifier[format] ( identifier[brctl] , identifier[br] , identifier[state] ), identifier[python_shell] = keyword[False] )
def _linux_stp(br, state): """ Internal, sets STP state """ brctl = _tool_path('brctl') return __salt__['cmd.run']('{0} stp {1} {2}'.format(brctl, br, state), python_shell=False)
def make_list(obj, cast=True): """ Converts an object *obj* to a list and returns it. Objects of types *tuple* and *set* are converted if *cast* is *True*. Otherwise, and for all other types, *obj* is put in a new list. """ if isinstance(obj, list): return list(obj) elif is_lazy_iterable(obj): return list(obj) elif isinstance(obj, (tuple, set)) and cast: return list(obj) else: return [obj]
def function[make_list, parameter[obj, cast]]: constant[ Converts an object *obj* to a list and returns it. Objects of types *tuple* and *set* are converted if *cast* is *True*. Otherwise, and for all other types, *obj* is put in a new list. ] if call[name[isinstance], parameter[name[obj], name[list]]] begin[:] return[call[name[list], parameter[name[obj]]]]
keyword[def] identifier[make_list] ( identifier[obj] , identifier[cast] = keyword[True] ): literal[string] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[list] ): keyword[return] identifier[list] ( identifier[obj] ) keyword[elif] identifier[is_lazy_iterable] ( identifier[obj] ): keyword[return] identifier[list] ( identifier[obj] ) keyword[elif] identifier[isinstance] ( identifier[obj] ,( identifier[tuple] , identifier[set] )) keyword[and] identifier[cast] : keyword[return] identifier[list] ( identifier[obj] ) keyword[else] : keyword[return] [ identifier[obj] ]
def make_list(obj, cast=True): """ Converts an object *obj* to a list and returns it. Objects of types *tuple* and *set* are converted if *cast* is *True*. Otherwise, and for all other types, *obj* is put in a new list. """ if isinstance(obj, list): return list(obj) # depends on [control=['if'], data=[]] elif is_lazy_iterable(obj): return list(obj) # depends on [control=['if'], data=[]] elif isinstance(obj, (tuple, set)) and cast: return list(obj) # depends on [control=['if'], data=[]] else: return [obj]
def getHeader(filename, handle=None): """ Return a copy of the PRIMARY header, along with any group/extension header for this filename specification. """ _fname, _extn = parseFilename(filename) # Allow the user to provide an already opened PyFITS object # to derive the header from... # if not handle: # Open image whether it is FITS or GEIS _fimg = openImage(_fname, mode='readonly') else: # Use what the user provides, after insuring # that it is a proper PyFITS object. if isinstance(handle, fits.HDUList): _fimg = handle else: raise ValueError('Handle must be a %r object!' % fits.HDUList) _hdr = _fimg['PRIMARY'].header.copy() # if the data is not in the primary array delete NAXIS # so that the correct value is read from the extension header if _hdr['NAXIS'] == 0: del _hdr['NAXIS'] if not (_extn is None or (_extn.isdigit() and int(_extn) == 0)): # Append correct extension/chip/group header to PRIMARY... #for _card in getExtn(_fimg,_extn).header.ascard: #_hdr.ascard.append(_card) for _card in getExtn(_fimg, _extn).header.cards: _hdr.append(_card) if not handle: # Close file handle now... _fimg.close() del _fimg return _hdr
def function[getHeader, parameter[filename, handle]]: constant[ Return a copy of the PRIMARY header, along with any group/extension header for this filename specification. ] <ast.Tuple object at 0x7da1b26aeb60> assign[=] call[name[parseFilename], parameter[name[filename]]] if <ast.UnaryOp object at 0x7da1b0e3d060> begin[:] variable[_fimg] assign[=] call[name[openImage], parameter[name[_fname]]] variable[_hdr] assign[=] call[call[name[_fimg]][constant[PRIMARY]].header.copy, parameter[]] if compare[call[name[_hdr]][constant[NAXIS]] equal[==] constant[0]] begin[:] <ast.Delete object at 0x7da1b0e3ce20> if <ast.UnaryOp object at 0x7da1b0e3d630> begin[:] for taget[name[_card]] in starred[call[name[getExtn], parameter[name[_fimg], name[_extn]]].header.cards] begin[:] call[name[_hdr].append, parameter[name[_card]]] if <ast.UnaryOp object at 0x7da1b0e33460> begin[:] call[name[_fimg].close, parameter[]] <ast.Delete object at 0x7da1b0e30fd0> return[name[_hdr]]
keyword[def] identifier[getHeader] ( identifier[filename] , identifier[handle] = keyword[None] ): literal[string] identifier[_fname] , identifier[_extn] = identifier[parseFilename] ( identifier[filename] ) keyword[if] keyword[not] identifier[handle] : identifier[_fimg] = identifier[openImage] ( identifier[_fname] , identifier[mode] = literal[string] ) keyword[else] : keyword[if] identifier[isinstance] ( identifier[handle] , identifier[fits] . identifier[HDUList] ): identifier[_fimg] = identifier[handle] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[fits] . identifier[HDUList] ) identifier[_hdr] = identifier[_fimg] [ literal[string] ]. identifier[header] . identifier[copy] () keyword[if] identifier[_hdr] [ literal[string] ]== literal[int] : keyword[del] identifier[_hdr] [ literal[string] ] keyword[if] keyword[not] ( identifier[_extn] keyword[is] keyword[None] keyword[or] ( identifier[_extn] . identifier[isdigit] () keyword[and] identifier[int] ( identifier[_extn] )== literal[int] )): keyword[for] identifier[_card] keyword[in] identifier[getExtn] ( identifier[_fimg] , identifier[_extn] ). identifier[header] . identifier[cards] : identifier[_hdr] . identifier[append] ( identifier[_card] ) keyword[if] keyword[not] identifier[handle] : identifier[_fimg] . identifier[close] () keyword[del] identifier[_fimg] keyword[return] identifier[_hdr]
def getHeader(filename, handle=None): """ Return a copy of the PRIMARY header, along with any group/extension header for this filename specification. """ (_fname, _extn) = parseFilename(filename) # Allow the user to provide an already opened PyFITS object # to derive the header from... # if not handle: # Open image whether it is FITS or GEIS _fimg = openImage(_fname, mode='readonly') # depends on [control=['if'], data=[]] # Use what the user provides, after insuring # that it is a proper PyFITS object. elif isinstance(handle, fits.HDUList): _fimg = handle # depends on [control=['if'], data=[]] else: raise ValueError('Handle must be a %r object!' % fits.HDUList) _hdr = _fimg['PRIMARY'].header.copy() # if the data is not in the primary array delete NAXIS # so that the correct value is read from the extension header if _hdr['NAXIS'] == 0: del _hdr['NAXIS'] # depends on [control=['if'], data=[]] if not (_extn is None or (_extn.isdigit() and int(_extn) == 0)): # Append correct extension/chip/group header to PRIMARY... #for _card in getExtn(_fimg,_extn).header.ascard: #_hdr.ascard.append(_card) for _card in getExtn(_fimg, _extn).header.cards: _hdr.append(_card) # depends on [control=['for'], data=['_card']] # depends on [control=['if'], data=[]] if not handle: # Close file handle now... _fimg.close() del _fimg # depends on [control=['if'], data=[]] return _hdr
def download_fundflow(self, bill_date, account_type='Basic', tar_type=None): """ 下载资金账单 https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_18&index=7 :param bill_date: 下载对账单的日期 :param account_type: 账单的资金来源账户 Basic 基本账户 Operation 运营账户 Fees 手续费账户 :param tar_type: 非必传参数,固定值:GZIP,返回格式为.gzip的压缩包账单。 不传则默认为数据流形式。 """ if isinstance(bill_date, (datetime, date)): bill_date = bill_date.strftime('%Y%m%d') data = { 'appid': self.appid, 'bill_date': bill_date, 'account_type': account_type, 'sign_type': 'HMAC-SHA256' } if tar_type is not None: data['tar_type'] = tar_type return self._post('pay/downloadfundflow', data=data)
def function[download_fundflow, parameter[self, bill_date, account_type, tar_type]]: constant[ 下载资金账单 https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_18&index=7 :param bill_date: 下载对账单的日期 :param account_type: 账单的资金来源账户 Basic 基本账户 Operation 运营账户 Fees 手续费账户 :param tar_type: 非必传参数,固定值:GZIP,返回格式为.gzip的压缩包账单。 不传则默认为数据流形式。 ] if call[name[isinstance], parameter[name[bill_date], tuple[[<ast.Name object at 0x7da1b21ed390>, <ast.Name object at 0x7da1b21eff40>]]]] begin[:] variable[bill_date] assign[=] call[name[bill_date].strftime, parameter[constant[%Y%m%d]]] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b21ec9a0>, <ast.Constant object at 0x7da1b21ece20>, <ast.Constant object at 0x7da1b21ec9d0>, <ast.Constant object at 0x7da1b21efa60>], [<ast.Attribute object at 0x7da1b21eda50>, <ast.Name object at 0x7da1b21eddb0>, <ast.Name object at 0x7da1b21edd20>, <ast.Constant object at 0x7da1b21ee0b0>]] if compare[name[tar_type] is_not constant[None]] begin[:] call[name[data]][constant[tar_type]] assign[=] name[tar_type] return[call[name[self]._post, parameter[constant[pay/downloadfundflow]]]]
keyword[def] identifier[download_fundflow] ( identifier[self] , identifier[bill_date] , identifier[account_type] = literal[string] , identifier[tar_type] = keyword[None] ): literal[string] keyword[if] identifier[isinstance] ( identifier[bill_date] ,( identifier[datetime] , identifier[date] )): identifier[bill_date] = identifier[bill_date] . identifier[strftime] ( literal[string] ) identifier[data] ={ literal[string] : identifier[self] . identifier[appid] , literal[string] : identifier[bill_date] , literal[string] : identifier[account_type] , literal[string] : literal[string] } keyword[if] identifier[tar_type] keyword[is] keyword[not] keyword[None] : identifier[data] [ literal[string] ]= identifier[tar_type] keyword[return] identifier[self] . identifier[_post] ( literal[string] , identifier[data] = identifier[data] )
def download_fundflow(self, bill_date, account_type='Basic', tar_type=None): """ 下载资金账单 https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_18&index=7 :param bill_date: 下载对账单的日期 :param account_type: 账单的资金来源账户 Basic 基本账户 Operation 运营账户 Fees 手续费账户 :param tar_type: 非必传参数,固定值:GZIP,返回格式为.gzip的压缩包账单。 不传则默认为数据流形式。 """ if isinstance(bill_date, (datetime, date)): bill_date = bill_date.strftime('%Y%m%d') # depends on [control=['if'], data=[]] data = {'appid': self.appid, 'bill_date': bill_date, 'account_type': account_type, 'sign_type': 'HMAC-SHA256'} if tar_type is not None: data['tar_type'] = tar_type # depends on [control=['if'], data=['tar_type']] return self._post('pay/downloadfundflow', data=data)
def invoke_ssh_shell(cls, *args, **kwargs): """invoke_ssh(arguments..., pty=False, echo=False) Star a new shell on a remote server. It first calls :meth:`Flow.connect_ssh` using all positional and keyword arguments, then calls :meth:`SSHClient.invoke_shell` with the pty / echo options. Args: arguments...: The options for the SSH connection. pty(bool): Request a pseudo-terminal from the server. echo(bool): Whether to echo read/written data to stdout by default. Returns: :class:`Flow`: A Flow instance initialised with the SSH channel. """ pty = kwargs.pop('pty', True) echo = kwargs.pop('echo', False) client = cls.connect_ssh(*args, **kwargs) f = client.invoke_shell(pty=pty, echo=echo) f.client = client return f
def function[invoke_ssh_shell, parameter[cls]]: constant[invoke_ssh(arguments..., pty=False, echo=False) Star a new shell on a remote server. It first calls :meth:`Flow.connect_ssh` using all positional and keyword arguments, then calls :meth:`SSHClient.invoke_shell` with the pty / echo options. Args: arguments...: The options for the SSH connection. pty(bool): Request a pseudo-terminal from the server. echo(bool): Whether to echo read/written data to stdout by default. Returns: :class:`Flow`: A Flow instance initialised with the SSH channel. ] variable[pty] assign[=] call[name[kwargs].pop, parameter[constant[pty], constant[True]]] variable[echo] assign[=] call[name[kwargs].pop, parameter[constant[echo], constant[False]]] variable[client] assign[=] call[name[cls].connect_ssh, parameter[<ast.Starred object at 0x7da18f09c820>]] variable[f] assign[=] call[name[client].invoke_shell, parameter[]] name[f].client assign[=] name[client] return[name[f]]
keyword[def] identifier[invoke_ssh_shell] ( identifier[cls] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[pty] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] ) identifier[echo] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ) identifier[client] = identifier[cls] . identifier[connect_ssh] (* identifier[args] ,** identifier[kwargs] ) identifier[f] = identifier[client] . identifier[invoke_shell] ( identifier[pty] = identifier[pty] , identifier[echo] = identifier[echo] ) identifier[f] . identifier[client] = identifier[client] keyword[return] identifier[f]
def invoke_ssh_shell(cls, *args, **kwargs): """invoke_ssh(arguments..., pty=False, echo=False) Star a new shell on a remote server. It first calls :meth:`Flow.connect_ssh` using all positional and keyword arguments, then calls :meth:`SSHClient.invoke_shell` with the pty / echo options. Args: arguments...: The options for the SSH connection. pty(bool): Request a pseudo-terminal from the server. echo(bool): Whether to echo read/written data to stdout by default. Returns: :class:`Flow`: A Flow instance initialised with the SSH channel. """ pty = kwargs.pop('pty', True) echo = kwargs.pop('echo', False) client = cls.connect_ssh(*args, **kwargs) f = client.invoke_shell(pty=pty, echo=echo) f.client = client return f
def associate(self, queue): """Merge this queue with another. Both queues will use a shared command list and either one can be used to fill or flush the shared queue. """ assert isinstance(queue, GlirQueue) if queue._shared is self._shared: return # merge commands self._shared._commands.extend(queue.clear()) self._shared._verbose |= queue._shared._verbose self._shared._associations[queue] = None # update queue and all related queues to use the same _shared object for ch in queue._shared._associations: ch._shared = self._shared self._shared._associations[ch] = None queue._shared = self._shared
def function[associate, parameter[self, queue]]: constant[Merge this queue with another. Both queues will use a shared command list and either one can be used to fill or flush the shared queue. ] assert[call[name[isinstance], parameter[name[queue], name[GlirQueue]]]] if compare[name[queue]._shared is name[self]._shared] begin[:] return[None] call[name[self]._shared._commands.extend, parameter[call[name[queue].clear, parameter[]]]] <ast.AugAssign object at 0x7da1b0f2b9a0> call[name[self]._shared._associations][name[queue]] assign[=] constant[None] for taget[name[ch]] in starred[name[queue]._shared._associations] begin[:] name[ch]._shared assign[=] name[self]._shared call[name[self]._shared._associations][name[ch]] assign[=] constant[None] name[queue]._shared assign[=] name[self]._shared
keyword[def] identifier[associate] ( identifier[self] , identifier[queue] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[queue] , identifier[GlirQueue] ) keyword[if] identifier[queue] . identifier[_shared] keyword[is] identifier[self] . identifier[_shared] : keyword[return] identifier[self] . identifier[_shared] . identifier[_commands] . identifier[extend] ( identifier[queue] . identifier[clear] ()) identifier[self] . identifier[_shared] . identifier[_verbose] |= identifier[queue] . identifier[_shared] . identifier[_verbose] identifier[self] . identifier[_shared] . identifier[_associations] [ identifier[queue] ]= keyword[None] keyword[for] identifier[ch] keyword[in] identifier[queue] . identifier[_shared] . identifier[_associations] : identifier[ch] . identifier[_shared] = identifier[self] . identifier[_shared] identifier[self] . identifier[_shared] . identifier[_associations] [ identifier[ch] ]= keyword[None] identifier[queue] . identifier[_shared] = identifier[self] . identifier[_shared]
def associate(self, queue): """Merge this queue with another. Both queues will use a shared command list and either one can be used to fill or flush the shared queue. """ assert isinstance(queue, GlirQueue) if queue._shared is self._shared: return # depends on [control=['if'], data=[]] # merge commands self._shared._commands.extend(queue.clear()) self._shared._verbose |= queue._shared._verbose self._shared._associations[queue] = None # update queue and all related queues to use the same _shared object for ch in queue._shared._associations: ch._shared = self._shared self._shared._associations[ch] = None # depends on [control=['for'], data=['ch']] queue._shared = self._shared
def join(chord_root, quality='', extensions=None, bass=''): r"""Join the parts of a chord into a complete chord label. Parameters ---------- chord_root : str Root pitch class of the chord, e.g. 'C', 'Eb' quality : str Quality of the chord, e.g. 'maj', 'hdim7' (Default value = '') extensions : list Any added or absent scaled degrees for this chord, e.g. ['4', '\*3'] (Default value = None) bass : str Scale degree of the bass note, e.g. '5'. (Default value = '') Returns ------- chord_label : str A complete chord label. """ chord_label = chord_root if quality or extensions: chord_label += ":%s" % quality if extensions: chord_label += "(%s)" % ",".join(extensions) if bass and bass != '1': chord_label += "/%s" % bass validate_chord_label(chord_label) return chord_label
def function[join, parameter[chord_root, quality, extensions, bass]]: constant[Join the parts of a chord into a complete chord label. Parameters ---------- chord_root : str Root pitch class of the chord, e.g. 'C', 'Eb' quality : str Quality of the chord, e.g. 'maj', 'hdim7' (Default value = '') extensions : list Any added or absent scaled degrees for this chord, e.g. ['4', '\*3'] (Default value = None) bass : str Scale degree of the bass note, e.g. '5'. (Default value = '') Returns ------- chord_label : str A complete chord label. ] variable[chord_label] assign[=] name[chord_root] if <ast.BoolOp object at 0x7da18f722f50> begin[:] <ast.AugAssign object at 0x7da18f723460> if name[extensions] begin[:] <ast.AugAssign object at 0x7da18f7233a0> if <ast.BoolOp object at 0x7da18f7235b0> begin[:] <ast.AugAssign object at 0x7da18f723b20> call[name[validate_chord_label], parameter[name[chord_label]]] return[name[chord_label]]
keyword[def] identifier[join] ( identifier[chord_root] , identifier[quality] = literal[string] , identifier[extensions] = keyword[None] , identifier[bass] = literal[string] ): literal[string] identifier[chord_label] = identifier[chord_root] keyword[if] identifier[quality] keyword[or] identifier[extensions] : identifier[chord_label] += literal[string] % identifier[quality] keyword[if] identifier[extensions] : identifier[chord_label] += literal[string] % literal[string] . identifier[join] ( identifier[extensions] ) keyword[if] identifier[bass] keyword[and] identifier[bass] != literal[string] : identifier[chord_label] += literal[string] % identifier[bass] identifier[validate_chord_label] ( identifier[chord_label] ) keyword[return] identifier[chord_label]
def join(chord_root, quality='', extensions=None, bass=''): """Join the parts of a chord into a complete chord label. Parameters ---------- chord_root : str Root pitch class of the chord, e.g. 'C', 'Eb' quality : str Quality of the chord, e.g. 'maj', 'hdim7' (Default value = '') extensions : list Any added or absent scaled degrees for this chord, e.g. ['4', '\\*3'] (Default value = None) bass : str Scale degree of the bass note, e.g. '5'. (Default value = '') Returns ------- chord_label : str A complete chord label. """ chord_label = chord_root if quality or extensions: chord_label += ':%s' % quality # depends on [control=['if'], data=[]] if extensions: chord_label += '(%s)' % ','.join(extensions) # depends on [control=['if'], data=[]] if bass and bass != '1': chord_label += '/%s' % bass # depends on [control=['if'], data=[]] validate_chord_label(chord_label) return chord_label
def _add_column(self, column): """ Add a new column to the DataFrame :param column: column name :return: nothing """ self._columns.append(column) if self._blist: self._data.append(blist([None] * len(self._index))) else: self._data.append([None] * len(self._index))
def function[_add_column, parameter[self, column]]: constant[ Add a new column to the DataFrame :param column: column name :return: nothing ] call[name[self]._columns.append, parameter[name[column]]] if name[self]._blist begin[:] call[name[self]._data.append, parameter[call[name[blist], parameter[binary_operation[list[[<ast.Constant object at 0x7da20c6abbb0>]] * call[name[len], parameter[name[self]._index]]]]]]]
keyword[def] identifier[_add_column] ( identifier[self] , identifier[column] ): literal[string] identifier[self] . identifier[_columns] . identifier[append] ( identifier[column] ) keyword[if] identifier[self] . identifier[_blist] : identifier[self] . identifier[_data] . identifier[append] ( identifier[blist] ([ keyword[None] ]* identifier[len] ( identifier[self] . identifier[_index] ))) keyword[else] : identifier[self] . identifier[_data] . identifier[append] ([ keyword[None] ]* identifier[len] ( identifier[self] . identifier[_index] ))
def _add_column(self, column): """ Add a new column to the DataFrame :param column: column name :return: nothing """ self._columns.append(column) if self._blist: self._data.append(blist([None] * len(self._index))) # depends on [control=['if'], data=[]] else: self._data.append([None] * len(self._index))
def export(self, file_obj=None, file_type=None, **kwargs): """ Export the path to a file object or return data. Parameters --------------- file_obj : None, str, or file object File object or string to export to file_type : None or str Type of file: dxf, dict, svg Returns --------------- exported : bytes or str Exported as specified type """ return export_path(self, file_type=file_type, file_obj=file_obj, **kwargs)
def function[export, parameter[self, file_obj, file_type]]: constant[ Export the path to a file object or return data. Parameters --------------- file_obj : None, str, or file object File object or string to export to file_type : None or str Type of file: dxf, dict, svg Returns --------------- exported : bytes or str Exported as specified type ] return[call[name[export_path], parameter[name[self]]]]
keyword[def] identifier[export] ( identifier[self] , identifier[file_obj] = keyword[None] , identifier[file_type] = keyword[None] , ** identifier[kwargs] ): literal[string] keyword[return] identifier[export_path] ( identifier[self] , identifier[file_type] = identifier[file_type] , identifier[file_obj] = identifier[file_obj] , ** identifier[kwargs] )
def export(self, file_obj=None, file_type=None, **kwargs): """ Export the path to a file object or return data. Parameters --------------- file_obj : None, str, or file object File object or string to export to file_type : None or str Type of file: dxf, dict, svg Returns --------------- exported : bytes or str Exported as specified type """ return export_path(self, file_type=file_type, file_obj=file_obj, **kwargs)
def is_sub_to_all(self, *super_entities): """ Given a list of super entities, return the entities that have those as a subset of their super entities. """ if super_entities: if len(super_entities) == 1: # Optimize for the case of just one super entity since this is a much less intensive query has_subset = EntityRelationship.objects.filter( super_entity=super_entities[0]).values_list('sub_entity', flat=True) else: # Get a list of entities that have super entities with all types has_subset = EntityRelationship.objects.filter( super_entity__in=super_entities).values('sub_entity').annotate(Count('super_entity')).filter( super_entity__count=len(set(super_entities))).values_list('sub_entity', flat=True) return self.filter(id__in=has_subset) else: return self
def function[is_sub_to_all, parameter[self]]: constant[ Given a list of super entities, return the entities that have those as a subset of their super entities. ] if name[super_entities] begin[:] if compare[call[name[len], parameter[name[super_entities]]] equal[==] constant[1]] begin[:] variable[has_subset] assign[=] call[call[name[EntityRelationship].objects.filter, parameter[]].values_list, parameter[constant[sub_entity]]] return[call[name[self].filter, parameter[]]]
keyword[def] identifier[is_sub_to_all] ( identifier[self] ,* identifier[super_entities] ): literal[string] keyword[if] identifier[super_entities] : keyword[if] identifier[len] ( identifier[super_entities] )== literal[int] : identifier[has_subset] = identifier[EntityRelationship] . identifier[objects] . identifier[filter] ( identifier[super_entity] = identifier[super_entities] [ literal[int] ]). identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] ) keyword[else] : identifier[has_subset] = identifier[EntityRelationship] . identifier[objects] . identifier[filter] ( identifier[super_entity__in] = identifier[super_entities] ). identifier[values] ( literal[string] ). identifier[annotate] ( identifier[Count] ( literal[string] )). identifier[filter] ( identifier[super_entity__count] = identifier[len] ( identifier[set] ( identifier[super_entities] ))). identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] ) keyword[return] identifier[self] . identifier[filter] ( identifier[id__in] = identifier[has_subset] ) keyword[else] : keyword[return] identifier[self]
def is_sub_to_all(self, *super_entities): """ Given a list of super entities, return the entities that have those as a subset of their super entities. """ if super_entities: if len(super_entities) == 1: # Optimize for the case of just one super entity since this is a much less intensive query has_subset = EntityRelationship.objects.filter(super_entity=super_entities[0]).values_list('sub_entity', flat=True) # depends on [control=['if'], data=[]] else: # Get a list of entities that have super entities with all types has_subset = EntityRelationship.objects.filter(super_entity__in=super_entities).values('sub_entity').annotate(Count('super_entity')).filter(super_entity__count=len(set(super_entities))).values_list('sub_entity', flat=True) return self.filter(id__in=has_subset) # depends on [control=['if'], data=[]] else: return self
def restore_original_dimensions(obs, obs_space, tensorlib=tf): """Unpacks Dict and Tuple space observations into their original form. This is needed since we flatten Dict and Tuple observations in transit. Before sending them to the model though, we should unflatten them into Dicts or Tuples of tensors. Arguments: obs: The flattened observation tensor. obs_space: The flattened obs space. If this has the `original_space` attribute, we will unflatten the tensor to that shape. tensorlib: The library used to unflatten (reshape) the array/tensor. Returns: single tensor or dict / tuple of tensors matching the original observation space. """ if hasattr(obs_space, "original_space"): return _unpack_obs(obs, obs_space.original_space, tensorlib=tensorlib) else: return obs
def function[restore_original_dimensions, parameter[obs, obs_space, tensorlib]]: constant[Unpacks Dict and Tuple space observations into their original form. This is needed since we flatten Dict and Tuple observations in transit. Before sending them to the model though, we should unflatten them into Dicts or Tuples of tensors. Arguments: obs: The flattened observation tensor. obs_space: The flattened obs space. If this has the `original_space` attribute, we will unflatten the tensor to that shape. tensorlib: The library used to unflatten (reshape) the array/tensor. Returns: single tensor or dict / tuple of tensors matching the original observation space. ] if call[name[hasattr], parameter[name[obs_space], constant[original_space]]] begin[:] return[call[name[_unpack_obs], parameter[name[obs], name[obs_space].original_space]]]
keyword[def] identifier[restore_original_dimensions] ( identifier[obs] , identifier[obs_space] , identifier[tensorlib] = identifier[tf] ): literal[string] keyword[if] identifier[hasattr] ( identifier[obs_space] , literal[string] ): keyword[return] identifier[_unpack_obs] ( identifier[obs] , identifier[obs_space] . identifier[original_space] , identifier[tensorlib] = identifier[tensorlib] ) keyword[else] : keyword[return] identifier[obs]
def restore_original_dimensions(obs, obs_space, tensorlib=tf): """Unpacks Dict and Tuple space observations into their original form. This is needed since we flatten Dict and Tuple observations in transit. Before sending them to the model though, we should unflatten them into Dicts or Tuples of tensors. Arguments: obs: The flattened observation tensor. obs_space: The flattened obs space. If this has the `original_space` attribute, we will unflatten the tensor to that shape. tensorlib: The library used to unflatten (reshape) the array/tensor. Returns: single tensor or dict / tuple of tensors matching the original observation space. """ if hasattr(obs_space, 'original_space'): return _unpack_obs(obs, obs_space.original_space, tensorlib=tensorlib) # depends on [control=['if'], data=[]] else: return obs
def findall_operations_with_gate_type( self, gate_type: Type[T_DESIRED_GATE_TYPE] ) -> Iterable[Tuple[int, ops.GateOperation, T_DESIRED_GATE_TYPE]]: """Find the locations of all gate operations of a given type. Args: gate_type: The type of gate to find, e.g. XPowGate or MeasurementGate. Returns: An iterator (index, operation, gate)'s for operations with the given gate type. """ result = self.findall_operations(lambda operation: bool( ops.op_gate_of_type(operation, gate_type))) for index, op in result: gate_op = cast(ops.GateOperation, op) yield index, gate_op, cast(T_DESIRED_GATE_TYPE, gate_op.gate)
def function[findall_operations_with_gate_type, parameter[self, gate_type]]: constant[Find the locations of all gate operations of a given type. Args: gate_type: The type of gate to find, e.g. XPowGate or MeasurementGate. Returns: An iterator (index, operation, gate)'s for operations with the given gate type. ] variable[result] assign[=] call[name[self].findall_operations, parameter[<ast.Lambda object at 0x7da1b1ce9cf0>]] for taget[tuple[[<ast.Name object at 0x7da1b1ce85e0>, <ast.Name object at 0x7da1b1ce91b0>]]] in starred[name[result]] begin[:] variable[gate_op] assign[=] call[name[cast], parameter[name[ops].GateOperation, name[op]]] <ast.Yield object at 0x7da1b21255a0>
keyword[def] identifier[findall_operations_with_gate_type] ( identifier[self] , identifier[gate_type] : identifier[Type] [ identifier[T_DESIRED_GATE_TYPE] ] )-> identifier[Iterable] [ identifier[Tuple] [ identifier[int] , identifier[ops] . identifier[GateOperation] , identifier[T_DESIRED_GATE_TYPE] ]]: literal[string] identifier[result] = identifier[self] . identifier[findall_operations] ( keyword[lambda] identifier[operation] : identifier[bool] ( identifier[ops] . identifier[op_gate_of_type] ( identifier[operation] , identifier[gate_type] ))) keyword[for] identifier[index] , identifier[op] keyword[in] identifier[result] : identifier[gate_op] = identifier[cast] ( identifier[ops] . identifier[GateOperation] , identifier[op] ) keyword[yield] identifier[index] , identifier[gate_op] , identifier[cast] ( identifier[T_DESIRED_GATE_TYPE] , identifier[gate_op] . identifier[gate] )
def findall_operations_with_gate_type(self, gate_type: Type[T_DESIRED_GATE_TYPE]) -> Iterable[Tuple[int, ops.GateOperation, T_DESIRED_GATE_TYPE]]: """Find the locations of all gate operations of a given type. Args: gate_type: The type of gate to find, e.g. XPowGate or MeasurementGate. Returns: An iterator (index, operation, gate)'s for operations with the given gate type. """ result = self.findall_operations(lambda operation: bool(ops.op_gate_of_type(operation, gate_type))) for (index, op) in result: gate_op = cast(ops.GateOperation, op) yield (index, gate_op, cast(T_DESIRED_GATE_TYPE, gate_op.gate)) # depends on [control=['for'], data=[]]
def add_timeline_callback(self, timeline_events, callback): """Register a callback for a specific timeline event.""" if not timeline_events: return False if not isinstance(timeline_events, (tuple, list)): timeline_events = [timeline_events] for timeline_event in timeline_events: if not isinstance(timeline_event, dict): raise AbodeException((ERROR.EVENT_CODE_MISSING)) event_code = timeline_event.get('event_code') if not event_code: raise AbodeException((ERROR.EVENT_CODE_MISSING)) _LOGGER.debug("Subscribing to timeline event: %s", timeline_event) self._timeline_callbacks[event_code].append((callback)) return True
def function[add_timeline_callback, parameter[self, timeline_events, callback]]: constant[Register a callback for a specific timeline event.] if <ast.UnaryOp object at 0x7da1b0cf69b0> begin[:] return[constant[False]] if <ast.UnaryOp object at 0x7da1b0cf68f0> begin[:] variable[timeline_events] assign[=] list[[<ast.Name object at 0x7da1b0cf6920>]] for taget[name[timeline_event]] in starred[name[timeline_events]] begin[:] if <ast.UnaryOp object at 0x7da1b0cf4b20> begin[:] <ast.Raise object at 0x7da1b0cf48b0> variable[event_code] assign[=] call[name[timeline_event].get, parameter[constant[event_code]]] if <ast.UnaryOp object at 0x7da1b0c3c130> begin[:] <ast.Raise object at 0x7da1b0c3fd60> call[name[_LOGGER].debug, parameter[constant[Subscribing to timeline event: %s], name[timeline_event]]] call[call[name[self]._timeline_callbacks][name[event_code]].append, parameter[name[callback]]] return[constant[True]]
keyword[def] identifier[add_timeline_callback] ( identifier[self] , identifier[timeline_events] , identifier[callback] ): literal[string] keyword[if] keyword[not] identifier[timeline_events] : keyword[return] keyword[False] keyword[if] keyword[not] identifier[isinstance] ( identifier[timeline_events] ,( identifier[tuple] , identifier[list] )): identifier[timeline_events] =[ identifier[timeline_events] ] keyword[for] identifier[timeline_event] keyword[in] identifier[timeline_events] : keyword[if] keyword[not] identifier[isinstance] ( identifier[timeline_event] , identifier[dict] ): keyword[raise] identifier[AbodeException] (( identifier[ERROR] . identifier[EVENT_CODE_MISSING] )) identifier[event_code] = identifier[timeline_event] . identifier[get] ( literal[string] ) keyword[if] keyword[not] identifier[event_code] : keyword[raise] identifier[AbodeException] (( identifier[ERROR] . identifier[EVENT_CODE_MISSING] )) identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[timeline_event] ) identifier[self] . identifier[_timeline_callbacks] [ identifier[event_code] ]. identifier[append] (( identifier[callback] )) keyword[return] keyword[True]
def add_timeline_callback(self, timeline_events, callback): """Register a callback for a specific timeline event.""" if not timeline_events: return False # depends on [control=['if'], data=[]] if not isinstance(timeline_events, (tuple, list)): timeline_events = [timeline_events] # depends on [control=['if'], data=[]] for timeline_event in timeline_events: if not isinstance(timeline_event, dict): raise AbodeException(ERROR.EVENT_CODE_MISSING) # depends on [control=['if'], data=[]] event_code = timeline_event.get('event_code') if not event_code: raise AbodeException(ERROR.EVENT_CODE_MISSING) # depends on [control=['if'], data=[]] _LOGGER.debug('Subscribing to timeline event: %s', timeline_event) self._timeline_callbacks[event_code].append(callback) # depends on [control=['for'], data=['timeline_event']] return True
def progress(self): """\ Report progress to the Java side. This needs to flush the uplink stream, but too many flushes can disrupt performance, so we actually talk to upstream once per second. """ now = time() if now - self.last_progress_t > 1: self.last_progress_t = now if self.status: self.uplink.status(self.status) self.status = None self.__spill_counters() self.uplink.progress(self.progress_value) self.uplink.flush()
def function[progress, parameter[self]]: constant[ Report progress to the Java side. This needs to flush the uplink stream, but too many flushes can disrupt performance, so we actually talk to upstream once per second. ] variable[now] assign[=] call[name[time], parameter[]] if compare[binary_operation[name[now] - name[self].last_progress_t] greater[>] constant[1]] begin[:] name[self].last_progress_t assign[=] name[now] if name[self].status begin[:] call[name[self].uplink.status, parameter[name[self].status]] name[self].status assign[=] constant[None] call[name[self].__spill_counters, parameter[]] call[name[self].uplink.progress, parameter[name[self].progress_value]] call[name[self].uplink.flush, parameter[]]
keyword[def] identifier[progress] ( identifier[self] ): literal[string] identifier[now] = identifier[time] () keyword[if] identifier[now] - identifier[self] . identifier[last_progress_t] > literal[int] : identifier[self] . identifier[last_progress_t] = identifier[now] keyword[if] identifier[self] . identifier[status] : identifier[self] . identifier[uplink] . identifier[status] ( identifier[self] . identifier[status] ) identifier[self] . identifier[status] = keyword[None] identifier[self] . identifier[__spill_counters] () identifier[self] . identifier[uplink] . identifier[progress] ( identifier[self] . identifier[progress_value] ) identifier[self] . identifier[uplink] . identifier[flush] ()
def progress(self): """ Report progress to the Java side. This needs to flush the uplink stream, but too many flushes can disrupt performance, so we actually talk to upstream once per second. """ now = time() if now - self.last_progress_t > 1: self.last_progress_t = now if self.status: self.uplink.status(self.status) self.status = None # depends on [control=['if'], data=[]] self.__spill_counters() self.uplink.progress(self.progress_value) self.uplink.flush() # depends on [control=['if'], data=[]]
def set_hosts_file_entry_for_role(self, role_name, network_name='user-net', fqdn=None, domain_name=None): """Adds an entry to the hosts file for a scenario host given the role name and network name :param role_name: (str) role name of the host to add :param network_name: (str) Name of the network to add to the hosts file :param fqdn: (str) Fully qualified domain name to use in the hosts file entry (trumps domain name) :param domain_name: (str) Domain name to include in the hosts file entries if provided :return: """ log = logging.getLogger(self.cls_logger + '.set_hosts_file_entry_for_role') # Determine the host file entry portion if fqdn: host_file_entry = fqdn else: if domain_name: host_file_entry = '{r}.{d} {r}'.format(r=role_name, d=domain_name) else: host_file_entry = role_name log.info('Using hosts file entry: {e}'.format(e=host_file_entry)) log.info('Scanning scenario hosts for role name [{r}] and network: {n}'.format(r=role_name, n=network_name)) for scenario_host in self.scenario_network_info: if scenario_host['scenario_role_name'] == role_name: for host_network_info in scenario_host['network_info']: if host_network_info['network_name'] == network_name: self.update_hosts_file(ip=host_network_info['internal_ip'], entry=host_file_entry)
def function[set_hosts_file_entry_for_role, parameter[self, role_name, network_name, fqdn, domain_name]]: constant[Adds an entry to the hosts file for a scenario host given the role name and network name :param role_name: (str) role name of the host to add :param network_name: (str) Name of the network to add to the hosts file :param fqdn: (str) Fully qualified domain name to use in the hosts file entry (trumps domain name) :param domain_name: (str) Domain name to include in the hosts file entries if provided :return: ] variable[log] assign[=] call[name[logging].getLogger, parameter[binary_operation[name[self].cls_logger + constant[.set_hosts_file_entry_for_role]]]] if name[fqdn] begin[:] variable[host_file_entry] assign[=] name[fqdn] call[name[log].info, parameter[call[constant[Using hosts file entry: {e}].format, parameter[]]]] call[name[log].info, parameter[call[constant[Scanning scenario hosts for role name [{r}] and network: {n}].format, parameter[]]]] for taget[name[scenario_host]] in starred[name[self].scenario_network_info] begin[:] if compare[call[name[scenario_host]][constant[scenario_role_name]] equal[==] name[role_name]] begin[:] for taget[name[host_network_info]] in starred[call[name[scenario_host]][constant[network_info]]] begin[:] if compare[call[name[host_network_info]][constant[network_name]] equal[==] name[network_name]] begin[:] call[name[self].update_hosts_file, parameter[]]
keyword[def] identifier[set_hosts_file_entry_for_role] ( identifier[self] , identifier[role_name] , identifier[network_name] = literal[string] , identifier[fqdn] = keyword[None] , identifier[domain_name] = keyword[None] ): literal[string] identifier[log] = identifier[logging] . identifier[getLogger] ( identifier[self] . identifier[cls_logger] + literal[string] ) keyword[if] identifier[fqdn] : identifier[host_file_entry] = identifier[fqdn] keyword[else] : keyword[if] identifier[domain_name] : identifier[host_file_entry] = literal[string] . identifier[format] ( identifier[r] = identifier[role_name] , identifier[d] = identifier[domain_name] ) keyword[else] : identifier[host_file_entry] = identifier[role_name] identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[e] = identifier[host_file_entry] )) identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[r] = identifier[role_name] , identifier[n] = identifier[network_name] )) keyword[for] identifier[scenario_host] keyword[in] identifier[self] . identifier[scenario_network_info] : keyword[if] identifier[scenario_host] [ literal[string] ]== identifier[role_name] : keyword[for] identifier[host_network_info] keyword[in] identifier[scenario_host] [ literal[string] ]: keyword[if] identifier[host_network_info] [ literal[string] ]== identifier[network_name] : identifier[self] . identifier[update_hosts_file] ( identifier[ip] = identifier[host_network_info] [ literal[string] ], identifier[entry] = identifier[host_file_entry] )
def set_hosts_file_entry_for_role(self, role_name, network_name='user-net', fqdn=None, domain_name=None): """Adds an entry to the hosts file for a scenario host given the role name and network name :param role_name: (str) role name of the host to add :param network_name: (str) Name of the network to add to the hosts file :param fqdn: (str) Fully qualified domain name to use in the hosts file entry (trumps domain name) :param domain_name: (str) Domain name to include in the hosts file entries if provided :return: """ log = logging.getLogger(self.cls_logger + '.set_hosts_file_entry_for_role') # Determine the host file entry portion if fqdn: host_file_entry = fqdn # depends on [control=['if'], data=[]] elif domain_name: host_file_entry = '{r}.{d} {r}'.format(r=role_name, d=domain_name) # depends on [control=['if'], data=[]] else: host_file_entry = role_name log.info('Using hosts file entry: {e}'.format(e=host_file_entry)) log.info('Scanning scenario hosts for role name [{r}] and network: {n}'.format(r=role_name, n=network_name)) for scenario_host in self.scenario_network_info: if scenario_host['scenario_role_name'] == role_name: for host_network_info in scenario_host['network_info']: if host_network_info['network_name'] == network_name: self.update_hosts_file(ip=host_network_info['internal_ip'], entry=host_file_entry) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['host_network_info']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['scenario_host']]
def reset(self, align=8, clip=80, code=False, derive=False, detail=0, ignored=True, infer=False, limit=100, stats=0, stream=None): '''Reset options, state, etc. The available options and default values are: *align=8* -- size alignment *clip=80* -- clip repr() strings *code=False* -- incl. (byte)code size *derive=False* -- derive from super type *detail=0* -- Asized refs level *ignored=True* -- ignore certain types *infer=False* -- try to infer types *limit=100* -- recursion limit *stats=0.0* -- print statistics, see function **asizeof** *stream=None* -- output stream for printing See function **asizeof** for a description of the options. ''' # options self._align_ = align self._clip_ = clip self._code_ = code self._derive_ = derive self._detail_ = detail # for Asized only self._infer_ = infer self._limit_ = limit self._stats_ = stats self._stream = stream if ignored: self._ign_d = _kind_ignored else: self._ign_d = None # clear state self._clear() self.set(align=align, code=code, stats=stats)
def function[reset, parameter[self, align, clip, code, derive, detail, ignored, infer, limit, stats, stream]]: constant[Reset options, state, etc. The available options and default values are: *align=8* -- size alignment *clip=80* -- clip repr() strings *code=False* -- incl. (byte)code size *derive=False* -- derive from super type *detail=0* -- Asized refs level *ignored=True* -- ignore certain types *infer=False* -- try to infer types *limit=100* -- recursion limit *stats=0.0* -- print statistics, see function **asizeof** *stream=None* -- output stream for printing See function **asizeof** for a description of the options. ] name[self]._align_ assign[=] name[align] name[self]._clip_ assign[=] name[clip] name[self]._code_ assign[=] name[code] name[self]._derive_ assign[=] name[derive] name[self]._detail_ assign[=] name[detail] name[self]._infer_ assign[=] name[infer] name[self]._limit_ assign[=] name[limit] name[self]._stats_ assign[=] name[stats] name[self]._stream assign[=] name[stream] if name[ignored] begin[:] name[self]._ign_d assign[=] name[_kind_ignored] call[name[self]._clear, parameter[]] call[name[self].set, parameter[]]
keyword[def] identifier[reset] ( identifier[self] , identifier[align] = literal[int] , identifier[clip] = literal[int] , identifier[code] = keyword[False] , identifier[derive] = keyword[False] , identifier[detail] = literal[int] , identifier[ignored] = keyword[True] , identifier[infer] = keyword[False] , identifier[limit] = literal[int] , identifier[stats] = literal[int] , identifier[stream] = keyword[None] ): literal[string] identifier[self] . identifier[_align_] = identifier[align] identifier[self] . identifier[_clip_] = identifier[clip] identifier[self] . identifier[_code_] = identifier[code] identifier[self] . identifier[_derive_] = identifier[derive] identifier[self] . identifier[_detail_] = identifier[detail] identifier[self] . identifier[_infer_] = identifier[infer] identifier[self] . identifier[_limit_] = identifier[limit] identifier[self] . identifier[_stats_] = identifier[stats] identifier[self] . identifier[_stream] = identifier[stream] keyword[if] identifier[ignored] : identifier[self] . identifier[_ign_d] = identifier[_kind_ignored] keyword[else] : identifier[self] . identifier[_ign_d] = keyword[None] identifier[self] . identifier[_clear] () identifier[self] . identifier[set] ( identifier[align] = identifier[align] , identifier[code] = identifier[code] , identifier[stats] = identifier[stats] )
def reset(self, align=8, clip=80, code=False, derive=False, detail=0, ignored=True, infer=False, limit=100, stats=0, stream=None): """Reset options, state, etc. The available options and default values are: *align=8* -- size alignment *clip=80* -- clip repr() strings *code=False* -- incl. (byte)code size *derive=False* -- derive from super type *detail=0* -- Asized refs level *ignored=True* -- ignore certain types *infer=False* -- try to infer types *limit=100* -- recursion limit *stats=0.0* -- print statistics, see function **asizeof** *stream=None* -- output stream for printing See function **asizeof** for a description of the options. """ # options self._align_ = align self._clip_ = clip self._code_ = code self._derive_ = derive self._detail_ = detail # for Asized only self._infer_ = infer self._limit_ = limit self._stats_ = stats self._stream = stream if ignored: self._ign_d = _kind_ignored # depends on [control=['if'], data=[]] else: self._ign_d = None # clear state self._clear() self.set(align=align, code=code, stats=stats)
def get_unread_topics(context, topics, user): """ This will return a list of unread topics for the given user from a given set of topics. Usage:: {% get_unread_topics topics request.user as unread_topics %} """ request = context.get('request', None) return TrackingHandler(request=request).get_unread_topics(topics, user)
def function[get_unread_topics, parameter[context, topics, user]]: constant[ This will return a list of unread topics for the given user from a given set of topics. Usage:: {% get_unread_topics topics request.user as unread_topics %} ] variable[request] assign[=] call[name[context].get, parameter[constant[request], constant[None]]] return[call[call[name[TrackingHandler], parameter[]].get_unread_topics, parameter[name[topics], name[user]]]]
keyword[def] identifier[get_unread_topics] ( identifier[context] , identifier[topics] , identifier[user] ): literal[string] identifier[request] = identifier[context] . identifier[get] ( literal[string] , keyword[None] ) keyword[return] identifier[TrackingHandler] ( identifier[request] = identifier[request] ). identifier[get_unread_topics] ( identifier[topics] , identifier[user] )
def get_unread_topics(context, topics, user): """ This will return a list of unread topics for the given user from a given set of topics. Usage:: {% get_unread_topics topics request.user as unread_topics %} """ request = context.get('request', None) return TrackingHandler(request=request).get_unread_topics(topics, user)
def file_url(self): """ Manage the case that we have to test a file .. note:: 1 URL per line. """ # We get, format, clean the list of URL to test. list_to_test = self._file_list_to_test_filtering() # We initiate a local variable which will save the current state of the list. not_filtered = list_to_test try: # We remove the element which are in the database from the # current list to test. list_to_test = List( list( set( list_to_test[PyFunceble.INTERN["counter"]["number"]["tested"] :] ) - set(PyFunceble.INTERN["flatten_inactive_db"]) ) ).format() _ = list_to_test[-1] except IndexError: # Our list to test is the one with the element from the database. list_to_test = not_filtered[ PyFunceble.INTERN["counter"]["number"]["tested"] : ] # We delete the undesired variable. del not_filtered if PyFunceble.CONFIGURATION["hierarchical_sorting"]: # The hierarchical sorting is desired by the user. # We format the list. list_to_test = List(list(list_to_test)).custom_format(Sort.hierarchical) try: # We test each URL from the list to test. return [self.url(x, list_to_test[-1]) for x in list_to_test if x] except IndexError: # We print a message on screen. print(PyFunceble.Fore.CYAN + PyFunceble.Style.BRIGHT + "Nothing to test.")
def function[file_url, parameter[self]]: constant[ Manage the case that we have to test a file .. note:: 1 URL per line. ] variable[list_to_test] assign[=] call[name[self]._file_list_to_test_filtering, parameter[]] variable[not_filtered] assign[=] name[list_to_test] <ast.Try object at 0x7da18f58d090> if call[name[PyFunceble].CONFIGURATION][constant[hierarchical_sorting]] begin[:] variable[list_to_test] assign[=] call[call[name[List], parameter[call[name[list], parameter[name[list_to_test]]]]].custom_format, parameter[name[Sort].hierarchical]] <ast.Try object at 0x7da1b0296800>
keyword[def] identifier[file_url] ( identifier[self] ): literal[string] identifier[list_to_test] = identifier[self] . identifier[_file_list_to_test_filtering] () identifier[not_filtered] = identifier[list_to_test] keyword[try] : identifier[list_to_test] = identifier[List] ( identifier[list] ( identifier[set] ( identifier[list_to_test] [ identifier[PyFunceble] . identifier[INTERN] [ literal[string] ][ literal[string] ][ literal[string] ]:] ) - identifier[set] ( identifier[PyFunceble] . identifier[INTERN] [ literal[string] ]) ) ). identifier[format] () identifier[_] = identifier[list_to_test] [- literal[int] ] keyword[except] identifier[IndexError] : identifier[list_to_test] = identifier[not_filtered] [ identifier[PyFunceble] . identifier[INTERN] [ literal[string] ][ literal[string] ][ literal[string] ]: ] keyword[del] identifier[not_filtered] keyword[if] identifier[PyFunceble] . identifier[CONFIGURATION] [ literal[string] ]: identifier[list_to_test] = identifier[List] ( identifier[list] ( identifier[list_to_test] )). identifier[custom_format] ( identifier[Sort] . identifier[hierarchical] ) keyword[try] : keyword[return] [ identifier[self] . identifier[url] ( identifier[x] , identifier[list_to_test] [- literal[int] ]) keyword[for] identifier[x] keyword[in] identifier[list_to_test] keyword[if] identifier[x] ] keyword[except] identifier[IndexError] : identifier[print] ( identifier[PyFunceble] . identifier[Fore] . identifier[CYAN] + identifier[PyFunceble] . identifier[Style] . identifier[BRIGHT] + literal[string] )
def file_url(self): """ Manage the case that we have to test a file .. note:: 1 URL per line. """ # We get, format, clean the list of URL to test. list_to_test = self._file_list_to_test_filtering() # We initiate a local variable which will save the current state of the list. not_filtered = list_to_test try: # We remove the element which are in the database from the # current list to test. list_to_test = List(list(set(list_to_test[PyFunceble.INTERN['counter']['number']['tested']:]) - set(PyFunceble.INTERN['flatten_inactive_db']))).format() _ = list_to_test[-1] # depends on [control=['try'], data=[]] except IndexError: # Our list to test is the one with the element from the database. list_to_test = not_filtered[PyFunceble.INTERN['counter']['number']['tested']:] # We delete the undesired variable. del not_filtered # depends on [control=['except'], data=[]] if PyFunceble.CONFIGURATION['hierarchical_sorting']: # The hierarchical sorting is desired by the user. # We format the list. list_to_test = List(list(list_to_test)).custom_format(Sort.hierarchical) # depends on [control=['if'], data=[]] try: # We test each URL from the list to test. return [self.url(x, list_to_test[-1]) for x in list_to_test if x] # depends on [control=['try'], data=[]] except IndexError: # We print a message on screen. print(PyFunceble.Fore.CYAN + PyFunceble.Style.BRIGHT + 'Nothing to test.') # depends on [control=['except'], data=[]]
def prefix_lines(lines, prefix): """Add the prefix to each of the lines. >>> prefix_lines(['foo', 'bar'], ' ') [' foo', ' bar'] >>> prefix_lines('foo\\nbar', ' ') [' foo', ' bar'] :param list or str lines: A string or a list of strings. If a string is passed, the string is split using splitlines(). :param str prefix: Prefix to add to the lines. Usually an indent. :returns: list """ if isinstance(lines, bytes): lines = lines.decode('utf-8') if isinstance(lines, str): lines = lines.splitlines() return [prefix + line for line in lines]
def function[prefix_lines, parameter[lines, prefix]]: constant[Add the prefix to each of the lines. >>> prefix_lines(['foo', 'bar'], ' ') [' foo', ' bar'] >>> prefix_lines('foo\nbar', ' ') [' foo', ' bar'] :param list or str lines: A string or a list of strings. If a string is passed, the string is split using splitlines(). :param str prefix: Prefix to add to the lines. Usually an indent. :returns: list ] if call[name[isinstance], parameter[name[lines], name[bytes]]] begin[:] variable[lines] assign[=] call[name[lines].decode, parameter[constant[utf-8]]] if call[name[isinstance], parameter[name[lines], name[str]]] begin[:] variable[lines] assign[=] call[name[lines].splitlines, parameter[]] return[<ast.ListComp object at 0x7da1b18027a0>]
keyword[def] identifier[prefix_lines] ( identifier[lines] , identifier[prefix] ): literal[string] keyword[if] identifier[isinstance] ( identifier[lines] , identifier[bytes] ): identifier[lines] = identifier[lines] . identifier[decode] ( literal[string] ) keyword[if] identifier[isinstance] ( identifier[lines] , identifier[str] ): identifier[lines] = identifier[lines] . identifier[splitlines] () keyword[return] [ identifier[prefix] + identifier[line] keyword[for] identifier[line] keyword[in] identifier[lines] ]
def prefix_lines(lines, prefix): """Add the prefix to each of the lines. >>> prefix_lines(['foo', 'bar'], ' ') [' foo', ' bar'] >>> prefix_lines('foo\\nbar', ' ') [' foo', ' bar'] :param list or str lines: A string or a list of strings. If a string is passed, the string is split using splitlines(). :param str prefix: Prefix to add to the lines. Usually an indent. :returns: list """ if isinstance(lines, bytes): lines = lines.decode('utf-8') # depends on [control=['if'], data=[]] if isinstance(lines, str): lines = lines.splitlines() # depends on [control=['if'], data=[]] return [prefix + line for line in lines]
def get_turbine_data_from_oedb(turbine_type, fetch_curve, overwrite=False): r""" Fetches data for one wind turbine type from the OpenEnergy Database (oedb). If turbine data exists in local repository it is loaded from this file. The file is created when turbine data was loaded from oedb in :py:func:`~.load_turbine_data_from_oedb`. Use this function with `overwrite=True` to overwrite your file with newly fetched data. Use :py:func:`~.check_local_turbine_data` to check weather your local file is up to date. Parameters ---------- turbine_type : string Specifies the turbine type data is fetched for. Use :py:func:`~.get_turbine_types` to see a table of all wind turbines for which power (coefficient) curve data is provided. fetch_curve : string Parameter to specify whether a power or power coefficient curve should be retrieved from the provided turbine data. Valid options are 'power_curve' and 'power_coefficient_curve'. Default: None. overwrite : boolean If True local file is overwritten by newly fetch data from oedb, if False turbine data is fetched from previously saved file. Returns ------- Tuple (pandas.DataFrame, float) Power curve or power coefficient curve (pandas.DataFrame) and nominal power (float) of one wind turbine type. Power (coefficient) curve DataFrame contains power coefficient curve values (dimensionless) or power curve values in W with the corresponding wind speeds in m/s. """ # hdf5 filename filename = os.path.join(os.path.dirname(__file__), 'data', 'turbine_data_oedb.h5') if os.path.isfile(filename) and not overwrite: logging.debug("Turbine data is fetched from {}".format(filename)) with pd.HDFStore(filename) as hdf_store: turbine_data = hdf_store.get('turbine_data') else: turbine_data = load_turbine_data_from_oedb() turbine_data.set_index('turbine_type', inplace=True) # Set `curve` depending on `fetch_curve` to match names in oedb curve = ('cp_curve' if fetch_curve == 'power_coefficient_curve' else fetch_curve) # Select curve and nominal power of turbine type try: df = turbine_data.loc[turbine_type] except KeyError: raise KeyError("Turbine type '{}' not in database. ".format( turbine_type) + "Use 'get_turbine_types()' to see a table of " + "possible wind turbine types.") if df[curve] is not None: df = pd.DataFrame(df[curve]) else: sys.exit("{} of {} not available in ".format(curve, turbine_type) + "oedb. Use 'get_turbine_types()' to see for which turbine " + "types power coefficient curves are available.") nominal_power = turbine_data.loc[turbine_type][ 'installed_capacity_kw'] * 1000 df.columns = ['wind_speed', 'value'] if fetch_curve == 'power_curve': # power in W df['value'] = df['value'] * 1000 return df, nominal_power
def function[get_turbine_data_from_oedb, parameter[turbine_type, fetch_curve, overwrite]]: constant[ Fetches data for one wind turbine type from the OpenEnergy Database (oedb). If turbine data exists in local repository it is loaded from this file. The file is created when turbine data was loaded from oedb in :py:func:`~.load_turbine_data_from_oedb`. Use this function with `overwrite=True` to overwrite your file with newly fetched data. Use :py:func:`~.check_local_turbine_data` to check weather your local file is up to date. Parameters ---------- turbine_type : string Specifies the turbine type data is fetched for. Use :py:func:`~.get_turbine_types` to see a table of all wind turbines for which power (coefficient) curve data is provided. fetch_curve : string Parameter to specify whether a power or power coefficient curve should be retrieved from the provided turbine data. Valid options are 'power_curve' and 'power_coefficient_curve'. Default: None. overwrite : boolean If True local file is overwritten by newly fetch data from oedb, if False turbine data is fetched from previously saved file. Returns ------- Tuple (pandas.DataFrame, float) Power curve or power coefficient curve (pandas.DataFrame) and nominal power (float) of one wind turbine type. Power (coefficient) curve DataFrame contains power coefficient curve values (dimensionless) or power curve values in W with the corresponding wind speeds in m/s. ] variable[filename] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[__file__]]], constant[data], constant[turbine_data_oedb.h5]]] if <ast.BoolOp object at 0x7da2041db3a0> begin[:] call[name[logging].debug, parameter[call[constant[Turbine data is fetched from {}].format, parameter[name[filename]]]]] with call[name[pd].HDFStore, parameter[name[filename]]] begin[:] variable[turbine_data] assign[=] call[name[hdf_store].get, parameter[constant[turbine_data]]] call[name[turbine_data].set_index, parameter[constant[turbine_type]]] variable[curve] assign[=] <ast.IfExp object at 0x7da2041d8df0> <ast.Try object at 0x7da1b088ea70> if compare[call[name[df]][name[curve]] is_not constant[None]] begin[:] variable[df] assign[=] call[name[pd].DataFrame, parameter[call[name[df]][name[curve]]]] variable[nominal_power] assign[=] binary_operation[call[call[name[turbine_data].loc][name[turbine_type]]][constant[installed_capacity_kw]] * constant[1000]] name[df].columns assign[=] list[[<ast.Constant object at 0x7da2054a7e20>, <ast.Constant object at 0x7da2054a4c10>]] if compare[name[fetch_curve] equal[==] constant[power_curve]] begin[:] call[name[df]][constant[value]] assign[=] binary_operation[call[name[df]][constant[value]] * constant[1000]] return[tuple[[<ast.Name object at 0x7da2054a63e0>, <ast.Name object at 0x7da2054a7070>]]]
keyword[def] identifier[get_turbine_data_from_oedb] ( identifier[turbine_type] , identifier[fetch_curve] , identifier[overwrite] = keyword[False] ): literal[string] identifier[filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ), literal[string] , literal[string] ) keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filename] ) keyword[and] keyword[not] identifier[overwrite] : identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[filename] )) keyword[with] identifier[pd] . identifier[HDFStore] ( identifier[filename] ) keyword[as] identifier[hdf_store] : identifier[turbine_data] = identifier[hdf_store] . identifier[get] ( literal[string] ) keyword[else] : identifier[turbine_data] = identifier[load_turbine_data_from_oedb] () identifier[turbine_data] . identifier[set_index] ( literal[string] , identifier[inplace] = keyword[True] ) identifier[curve] =( literal[string] keyword[if] identifier[fetch_curve] == literal[string] keyword[else] identifier[fetch_curve] ) keyword[try] : identifier[df] = identifier[turbine_data] . identifier[loc] [ identifier[turbine_type] ] keyword[except] identifier[KeyError] : keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] ( identifier[turbine_type] )+ literal[string] + literal[string] ) keyword[if] identifier[df] [ identifier[curve] ] keyword[is] keyword[not] keyword[None] : identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[df] [ identifier[curve] ]) keyword[else] : identifier[sys] . identifier[exit] ( literal[string] . identifier[format] ( identifier[curve] , identifier[turbine_type] )+ literal[string] + literal[string] ) identifier[nominal_power] = identifier[turbine_data] . identifier[loc] [ identifier[turbine_type] ][ literal[string] ]* literal[int] identifier[df] . identifier[columns] =[ literal[string] , literal[string] ] keyword[if] identifier[fetch_curve] == literal[string] : identifier[df] [ literal[string] ]= identifier[df] [ literal[string] ]* literal[int] keyword[return] identifier[df] , identifier[nominal_power]
def get_turbine_data_from_oedb(turbine_type, fetch_curve, overwrite=False): """ Fetches data for one wind turbine type from the OpenEnergy Database (oedb). If turbine data exists in local repository it is loaded from this file. The file is created when turbine data was loaded from oedb in :py:func:`~.load_turbine_data_from_oedb`. Use this function with `overwrite=True` to overwrite your file with newly fetched data. Use :py:func:`~.check_local_turbine_data` to check weather your local file is up to date. Parameters ---------- turbine_type : string Specifies the turbine type data is fetched for. Use :py:func:`~.get_turbine_types` to see a table of all wind turbines for which power (coefficient) curve data is provided. fetch_curve : string Parameter to specify whether a power or power coefficient curve should be retrieved from the provided turbine data. Valid options are 'power_curve' and 'power_coefficient_curve'. Default: None. overwrite : boolean If True local file is overwritten by newly fetch data from oedb, if False turbine data is fetched from previously saved file. Returns ------- Tuple (pandas.DataFrame, float) Power curve or power coefficient curve (pandas.DataFrame) and nominal power (float) of one wind turbine type. Power (coefficient) curve DataFrame contains power coefficient curve values (dimensionless) or power curve values in W with the corresponding wind speeds in m/s. """ # hdf5 filename filename = os.path.join(os.path.dirname(__file__), 'data', 'turbine_data_oedb.h5') if os.path.isfile(filename) and (not overwrite): logging.debug('Turbine data is fetched from {}'.format(filename)) with pd.HDFStore(filename) as hdf_store: turbine_data = hdf_store.get('turbine_data') # depends on [control=['with'], data=['hdf_store']] # depends on [control=['if'], data=[]] else: turbine_data = load_turbine_data_from_oedb() turbine_data.set_index('turbine_type', inplace=True) # Set `curve` depending on `fetch_curve` to match names in oedb curve = 'cp_curve' if fetch_curve == 'power_coefficient_curve' else fetch_curve # Select curve and nominal power of turbine type try: df = turbine_data.loc[turbine_type] # depends on [control=['try'], data=[]] except KeyError: raise KeyError("Turbine type '{}' not in database. ".format(turbine_type) + "Use 'get_turbine_types()' to see a table of " + 'possible wind turbine types.') # depends on [control=['except'], data=[]] if df[curve] is not None: df = pd.DataFrame(df[curve]) # depends on [control=['if'], data=[]] else: sys.exit('{} of {} not available in '.format(curve, turbine_type) + "oedb. Use 'get_turbine_types()' to see for which turbine " + 'types power coefficient curves are available.') nominal_power = turbine_data.loc[turbine_type]['installed_capacity_kw'] * 1000 df.columns = ['wind_speed', 'value'] if fetch_curve == 'power_curve': # power in W df['value'] = df['value'] * 1000 # depends on [control=['if'], data=[]] return (df, nominal_power)
def create_shot_model(self, project, releasetype): """Create and return a new tree model that represents shots til descriptors The tree will include sequences, shots, tasks and descriptors of the given releaetype. :param releasetype: the releasetype for the model :type releasetype: :data:`djadapter.RELEASETYPES` :param project: the project of the shots :type project: :class:`djadapter.models.Project` :returns: the created tree model :rtype: :class:`jukeboxcore.gui.treemodel.TreeModel` :raises: None """ rootdata = treemodel.ListItemData(['Name']) rootitem = treemodel.TreeItem(rootdata) for seq in project.sequence_set.all(): seqdata = djitemdata.SequenceItemData(seq) seqitem = treemodel.TreeItem(seqdata, rootitem) for shot in seq.shot_set.all(): shotdata = djitemdata.ShotItemData(shot) shotitem = treemodel.TreeItem(shotdata, seqitem) for task in shot.tasks.all(): taskdata = djitemdata.TaskItemData(task) taskitem = treemodel.TreeItem(taskdata, shotitem) #get all mayafiles taskfiles = task.taskfile_set.filter(releasetype=releasetype, typ=self._filetype) # get all descriptor values as a list. disctinct eliminates duplicates. for d in taskfiles.order_by('descriptor').values_list('descriptor', flat=True).distinct(): ddata = treemodel.ListItemData([d,]) treemodel.TreeItem(ddata, taskitem) shotmodel = treemodel.TreeModel(rootitem) return shotmodel
def function[create_shot_model, parameter[self, project, releasetype]]: constant[Create and return a new tree model that represents shots til descriptors The tree will include sequences, shots, tasks and descriptors of the given releaetype. :param releasetype: the releasetype for the model :type releasetype: :data:`djadapter.RELEASETYPES` :param project: the project of the shots :type project: :class:`djadapter.models.Project` :returns: the created tree model :rtype: :class:`jukeboxcore.gui.treemodel.TreeModel` :raises: None ] variable[rootdata] assign[=] call[name[treemodel].ListItemData, parameter[list[[<ast.Constant object at 0x7da1b142b850>]]]] variable[rootitem] assign[=] call[name[treemodel].TreeItem, parameter[name[rootdata]]] for taget[name[seq]] in starred[call[name[project].sequence_set.all, parameter[]]] begin[:] variable[seqdata] assign[=] call[name[djitemdata].SequenceItemData, parameter[name[seq]]] variable[seqitem] assign[=] call[name[treemodel].TreeItem, parameter[name[seqdata], name[rootitem]]] for taget[name[shot]] in starred[call[name[seq].shot_set.all, parameter[]]] begin[:] variable[shotdata] assign[=] call[name[djitemdata].ShotItemData, parameter[name[shot]]] variable[shotitem] assign[=] call[name[treemodel].TreeItem, parameter[name[shotdata], name[seqitem]]] for taget[name[task]] in starred[call[name[shot].tasks.all, parameter[]]] begin[:] variable[taskdata] assign[=] call[name[djitemdata].TaskItemData, parameter[name[task]]] variable[taskitem] assign[=] call[name[treemodel].TreeItem, parameter[name[taskdata], name[shotitem]]] variable[taskfiles] assign[=] call[name[task].taskfile_set.filter, parameter[]] for taget[name[d]] in starred[call[call[call[name[taskfiles].order_by, parameter[constant[descriptor]]].values_list, parameter[constant[descriptor]]].distinct, parameter[]]] begin[:] variable[ddata] assign[=] call[name[treemodel].ListItemData, parameter[list[[<ast.Name object at 0x7da1b14280a0>]]]] call[name[treemodel].TreeItem, parameter[name[ddata], name[taskitem]]] variable[shotmodel] assign[=] call[name[treemodel].TreeModel, parameter[name[rootitem]]] return[name[shotmodel]]
keyword[def] identifier[create_shot_model] ( identifier[self] , identifier[project] , identifier[releasetype] ): literal[string] identifier[rootdata] = identifier[treemodel] . identifier[ListItemData] ([ literal[string] ]) identifier[rootitem] = identifier[treemodel] . identifier[TreeItem] ( identifier[rootdata] ) keyword[for] identifier[seq] keyword[in] identifier[project] . identifier[sequence_set] . identifier[all] (): identifier[seqdata] = identifier[djitemdata] . identifier[SequenceItemData] ( identifier[seq] ) identifier[seqitem] = identifier[treemodel] . identifier[TreeItem] ( identifier[seqdata] , identifier[rootitem] ) keyword[for] identifier[shot] keyword[in] identifier[seq] . identifier[shot_set] . identifier[all] (): identifier[shotdata] = identifier[djitemdata] . identifier[ShotItemData] ( identifier[shot] ) identifier[shotitem] = identifier[treemodel] . identifier[TreeItem] ( identifier[shotdata] , identifier[seqitem] ) keyword[for] identifier[task] keyword[in] identifier[shot] . identifier[tasks] . identifier[all] (): identifier[taskdata] = identifier[djitemdata] . identifier[TaskItemData] ( identifier[task] ) identifier[taskitem] = identifier[treemodel] . identifier[TreeItem] ( identifier[taskdata] , identifier[shotitem] ) identifier[taskfiles] = identifier[task] . identifier[taskfile_set] . identifier[filter] ( identifier[releasetype] = identifier[releasetype] , identifier[typ] = identifier[self] . identifier[_filetype] ) keyword[for] identifier[d] keyword[in] identifier[taskfiles] . identifier[order_by] ( literal[string] ). identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] ). identifier[distinct] (): identifier[ddata] = identifier[treemodel] . identifier[ListItemData] ([ identifier[d] ,]) identifier[treemodel] . identifier[TreeItem] ( identifier[ddata] , identifier[taskitem] ) identifier[shotmodel] = identifier[treemodel] . identifier[TreeModel] ( identifier[rootitem] ) keyword[return] identifier[shotmodel]
def create_shot_model(self, project, releasetype): """Create and return a new tree model that represents shots til descriptors The tree will include sequences, shots, tasks and descriptors of the given releaetype. :param releasetype: the releasetype for the model :type releasetype: :data:`djadapter.RELEASETYPES` :param project: the project of the shots :type project: :class:`djadapter.models.Project` :returns: the created tree model :rtype: :class:`jukeboxcore.gui.treemodel.TreeModel` :raises: None """ rootdata = treemodel.ListItemData(['Name']) rootitem = treemodel.TreeItem(rootdata) for seq in project.sequence_set.all(): seqdata = djitemdata.SequenceItemData(seq) seqitem = treemodel.TreeItem(seqdata, rootitem) for shot in seq.shot_set.all(): shotdata = djitemdata.ShotItemData(shot) shotitem = treemodel.TreeItem(shotdata, seqitem) for task in shot.tasks.all(): taskdata = djitemdata.TaskItemData(task) taskitem = treemodel.TreeItem(taskdata, shotitem) #get all mayafiles taskfiles = task.taskfile_set.filter(releasetype=releasetype, typ=self._filetype) # get all descriptor values as a list. disctinct eliminates duplicates. for d in taskfiles.order_by('descriptor').values_list('descriptor', flat=True).distinct(): ddata = treemodel.ListItemData([d]) treemodel.TreeItem(ddata, taskitem) # depends on [control=['for'], data=['d']] # depends on [control=['for'], data=['task']] # depends on [control=['for'], data=['shot']] # depends on [control=['for'], data=['seq']] shotmodel = treemodel.TreeModel(rootitem) return shotmodel
def parse(self, filename): """ Parses a file into an AstromData structure. Args: filename: str The name of the file whose contents will be parsed. Returns: data: AstromData The file contents extracted into a data structure for programmatic access. """ filehandle = storage.open_vos_or_local(filename, "rb") assert filehandle is not None, "Failed to open file {} ".format(filename) filestr = filehandle.read() filehandle.close() assert filestr is not None, "File contents are None" observations = self._parse_observation_list(filestr) self._parse_observation_headers(filestr, observations) sys_header = self._parse_system_header(filestr) sources = self._parse_source_data(filestr, observations) return AstromData(observations, sys_header, sources, discovery_only=self.discovery_only)
def function[parse, parameter[self, filename]]: constant[ Parses a file into an AstromData structure. Args: filename: str The name of the file whose contents will be parsed. Returns: data: AstromData The file contents extracted into a data structure for programmatic access. ] variable[filehandle] assign[=] call[name[storage].open_vos_or_local, parameter[name[filename], constant[rb]]] assert[compare[name[filehandle] is_not constant[None]]] variable[filestr] assign[=] call[name[filehandle].read, parameter[]] call[name[filehandle].close, parameter[]] assert[compare[name[filestr] is_not constant[None]]] variable[observations] assign[=] call[name[self]._parse_observation_list, parameter[name[filestr]]] call[name[self]._parse_observation_headers, parameter[name[filestr], name[observations]]] variable[sys_header] assign[=] call[name[self]._parse_system_header, parameter[name[filestr]]] variable[sources] assign[=] call[name[self]._parse_source_data, parameter[name[filestr], name[observations]]] return[call[name[AstromData], parameter[name[observations], name[sys_header], name[sources]]]]
keyword[def] identifier[parse] ( identifier[self] , identifier[filename] ): literal[string] identifier[filehandle] = identifier[storage] . identifier[open_vos_or_local] ( identifier[filename] , literal[string] ) keyword[assert] identifier[filehandle] keyword[is] keyword[not] keyword[None] , literal[string] . identifier[format] ( identifier[filename] ) identifier[filestr] = identifier[filehandle] . identifier[read] () identifier[filehandle] . identifier[close] () keyword[assert] identifier[filestr] keyword[is] keyword[not] keyword[None] , literal[string] identifier[observations] = identifier[self] . identifier[_parse_observation_list] ( identifier[filestr] ) identifier[self] . identifier[_parse_observation_headers] ( identifier[filestr] , identifier[observations] ) identifier[sys_header] = identifier[self] . identifier[_parse_system_header] ( identifier[filestr] ) identifier[sources] = identifier[self] . identifier[_parse_source_data] ( identifier[filestr] , identifier[observations] ) keyword[return] identifier[AstromData] ( identifier[observations] , identifier[sys_header] , identifier[sources] , identifier[discovery_only] = identifier[self] . identifier[discovery_only] )
def parse(self, filename): """ Parses a file into an AstromData structure. Args: filename: str The name of the file whose contents will be parsed. Returns: data: AstromData The file contents extracted into a data structure for programmatic access. """ filehandle = storage.open_vos_or_local(filename, 'rb') assert filehandle is not None, 'Failed to open file {} '.format(filename) filestr = filehandle.read() filehandle.close() assert filestr is not None, 'File contents are None' observations = self._parse_observation_list(filestr) self._parse_observation_headers(filestr, observations) sys_header = self._parse_system_header(filestr) sources = self._parse_source_data(filestr, observations) return AstromData(observations, sys_header, sources, discovery_only=self.discovery_only)
def dump_parent(self, obj): """Dump the parent of a PID.""" if not self._is_parent(obj): return self._dump_relative(obj.pid) return None
def function[dump_parent, parameter[self, obj]]: constant[Dump the parent of a PID.] if <ast.UnaryOp object at 0x7da1b229a200> begin[:] return[call[name[self]._dump_relative, parameter[name[obj].pid]]] return[constant[None]]
keyword[def] identifier[dump_parent] ( identifier[self] , identifier[obj] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_is_parent] ( identifier[obj] ): keyword[return] identifier[self] . identifier[_dump_relative] ( identifier[obj] . identifier[pid] ) keyword[return] keyword[None]
def dump_parent(self, obj): """Dump the parent of a PID.""" if not self._is_parent(obj): return self._dump_relative(obj.pid) # depends on [control=['if'], data=[]] return None
def _string_generator(descriptor, max_length=0, limit=0): 'Helper to create a string generator' vals = list(values.get_strings(max_length, limit)) return gen.IterValueGenerator(descriptor.name, vals)
def function[_string_generator, parameter[descriptor, max_length, limit]]: constant[Helper to create a string generator] variable[vals] assign[=] call[name[list], parameter[call[name[values].get_strings, parameter[name[max_length], name[limit]]]]] return[call[name[gen].IterValueGenerator, parameter[name[descriptor].name, name[vals]]]]
keyword[def] identifier[_string_generator] ( identifier[descriptor] , identifier[max_length] = literal[int] , identifier[limit] = literal[int] ): literal[string] identifier[vals] = identifier[list] ( identifier[values] . identifier[get_strings] ( identifier[max_length] , identifier[limit] )) keyword[return] identifier[gen] . identifier[IterValueGenerator] ( identifier[descriptor] . identifier[name] , identifier[vals] )
def _string_generator(descriptor, max_length=0, limit=0): """Helper to create a string generator""" vals = list(values.get_strings(max_length, limit)) return gen.IterValueGenerator(descriptor.name, vals)
def online_help(param=None): """ Open online document in web browser. :param param: input parameter :type param : int or str :return: None """ try: PARAMS_LINK_KEYS = sorted(PARAMS_LINK.keys()) if param in PARAMS_LINK_KEYS: webbrowser.open_new_tab(DOCUMENT_ADR + PARAMS_LINK[param]) elif param in range(1, len(PARAMS_LINK_KEYS) + 1): webbrowser.open_new_tab( DOCUMENT_ADR + PARAMS_LINK[PARAMS_LINK_KEYS[param - 1]]) else: print("Please choose one parameter : \n") print('Example : online_help("J") or online_help(2)\n') for index, item in enumerate(PARAMS_LINK_KEYS): print(str(index + 1) + "-" + item) except Exception: print("Error in online help")
def function[online_help, parameter[param]]: constant[ Open online document in web browser. :param param: input parameter :type param : int or str :return: None ] <ast.Try object at 0x7da1b160a1a0>
keyword[def] identifier[online_help] ( identifier[param] = keyword[None] ): literal[string] keyword[try] : identifier[PARAMS_LINK_KEYS] = identifier[sorted] ( identifier[PARAMS_LINK] . identifier[keys] ()) keyword[if] identifier[param] keyword[in] identifier[PARAMS_LINK_KEYS] : identifier[webbrowser] . identifier[open_new_tab] ( identifier[DOCUMENT_ADR] + identifier[PARAMS_LINK] [ identifier[param] ]) keyword[elif] identifier[param] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[PARAMS_LINK_KEYS] )+ literal[int] ): identifier[webbrowser] . identifier[open_new_tab] ( identifier[DOCUMENT_ADR] + identifier[PARAMS_LINK] [ identifier[PARAMS_LINK_KEYS] [ identifier[param] - literal[int] ]]) keyword[else] : identifier[print] ( literal[string] ) identifier[print] ( literal[string] ) keyword[for] identifier[index] , identifier[item] keyword[in] identifier[enumerate] ( identifier[PARAMS_LINK_KEYS] ): identifier[print] ( identifier[str] ( identifier[index] + literal[int] )+ literal[string] + identifier[item] ) keyword[except] identifier[Exception] : identifier[print] ( literal[string] )
def online_help(param=None): """ Open online document in web browser. :param param: input parameter :type param : int or str :return: None """ try: PARAMS_LINK_KEYS = sorted(PARAMS_LINK.keys()) if param in PARAMS_LINK_KEYS: webbrowser.open_new_tab(DOCUMENT_ADR + PARAMS_LINK[param]) # depends on [control=['if'], data=['param']] elif param in range(1, len(PARAMS_LINK_KEYS) + 1): webbrowser.open_new_tab(DOCUMENT_ADR + PARAMS_LINK[PARAMS_LINK_KEYS[param - 1]]) # depends on [control=['if'], data=['param']] else: print('Please choose one parameter : \n') print('Example : online_help("J") or online_help(2)\n') for (index, item) in enumerate(PARAMS_LINK_KEYS): print(str(index + 1) + '-' + item) # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]] except Exception: print('Error in online help') # depends on [control=['except'], data=[]]
def list_blobs(self, prefix=''): """Lists names of all blobs by their prefix.""" return [b.name for b in self.bucket.list_blobs(prefix=prefix)]
def function[list_blobs, parameter[self, prefix]]: constant[Lists names of all blobs by their prefix.] return[<ast.ListComp object at 0x7da2047e87f0>]
keyword[def] identifier[list_blobs] ( identifier[self] , identifier[prefix] = literal[string] ): literal[string] keyword[return] [ identifier[b] . identifier[name] keyword[for] identifier[b] keyword[in] identifier[self] . identifier[bucket] . identifier[list_blobs] ( identifier[prefix] = identifier[prefix] )]
def list_blobs(self, prefix=''): """Lists names of all blobs by their prefix.""" return [b.name for b in self.bucket.list_blobs(prefix=prefix)]
def make_wsgi_app(registry=REGISTRY): """Create a WSGI app which serves the metrics from a registry.""" def prometheus_app(environ, start_response): params = parse_qs(environ.get('QUERY_STRING', '')) r = registry encoder, content_type = choose_encoder(environ.get('HTTP_ACCEPT')) if 'name[]' in params: r = r.restricted_registry(params['name[]']) output = encoder(r) status = str('200 OK') headers = [(str('Content-type'), content_type)] start_response(status, headers) return [output] return prometheus_app
def function[make_wsgi_app, parameter[registry]]: constant[Create a WSGI app which serves the metrics from a registry.] def function[prometheus_app, parameter[environ, start_response]]: variable[params] assign[=] call[name[parse_qs], parameter[call[name[environ].get, parameter[constant[QUERY_STRING], constant[]]]]] variable[r] assign[=] name[registry] <ast.Tuple object at 0x7da1b2184f10> assign[=] call[name[choose_encoder], parameter[call[name[environ].get, parameter[constant[HTTP_ACCEPT]]]]] if compare[constant[name[]] in name[params]] begin[:] variable[r] assign[=] call[name[r].restricted_registry, parameter[call[name[params]][constant[name[]]]]] variable[output] assign[=] call[name[encoder], parameter[name[r]]] variable[status] assign[=] call[name[str], parameter[constant[200 OK]]] variable[headers] assign[=] list[[<ast.Tuple object at 0x7da1b2185b40>]] call[name[start_response], parameter[name[status], name[headers]]] return[list[[<ast.Name object at 0x7da18dc99f90>]]] return[name[prometheus_app]]
keyword[def] identifier[make_wsgi_app] ( identifier[registry] = identifier[REGISTRY] ): literal[string] keyword[def] identifier[prometheus_app] ( identifier[environ] , identifier[start_response] ): identifier[params] = identifier[parse_qs] ( identifier[environ] . identifier[get] ( literal[string] , literal[string] )) identifier[r] = identifier[registry] identifier[encoder] , identifier[content_type] = identifier[choose_encoder] ( identifier[environ] . identifier[get] ( literal[string] )) keyword[if] literal[string] keyword[in] identifier[params] : identifier[r] = identifier[r] . identifier[restricted_registry] ( identifier[params] [ literal[string] ]) identifier[output] = identifier[encoder] ( identifier[r] ) identifier[status] = identifier[str] ( literal[string] ) identifier[headers] =[( identifier[str] ( literal[string] ), identifier[content_type] )] identifier[start_response] ( identifier[status] , identifier[headers] ) keyword[return] [ identifier[output] ] keyword[return] identifier[prometheus_app]
def make_wsgi_app(registry=REGISTRY): """Create a WSGI app which serves the metrics from a registry.""" def prometheus_app(environ, start_response): params = parse_qs(environ.get('QUERY_STRING', '')) r = registry (encoder, content_type) = choose_encoder(environ.get('HTTP_ACCEPT')) if 'name[]' in params: r = r.restricted_registry(params['name[]']) # depends on [control=['if'], data=['params']] output = encoder(r) status = str('200 OK') headers = [(str('Content-type'), content_type)] start_response(status, headers) return [output] return prometheus_app
def doit(self, classes=None, recursive=True, **kwargs): """Rewrite (sub-)expressions in a more explicit form Return a modified expression that is more explicit than the original expression. The definition of "more explicit" is decided by the relevant subclass, e.g. a :meth:`Commutator <.Commutator.doit>` is written out according to its definition. Args: classes (None or list): an optional list of classes. If given, only (sub-)expressions that an instance of one of the classes in the list will be rewritten. recursive (bool): If True, also rewrite any sub-expressions of any rewritten expression. Note that :meth:`doit` always recurses into sub-expressions of expressions not affected by it. kwargs: Any remaining keyword arguments may be used by the :meth:`doit` method of a particular expression. Example: Consider the following expression:: >>> from sympy import IndexedBase >>> i = IdxSym('i'); N = symbols('N') >>> Asym, Csym = symbols('A, C', cls=IndexedBase) >>> A = lambda i: OperatorSymbol(StrLabel(Asym[i]), hs=0) >>> B = OperatorSymbol('B', hs=0) >>> C = lambda i: OperatorSymbol(StrLabel(Csym[i]), hs=0) >>> def show(expr): ... print(unicode(expr, show_hs_label=False)) >>> expr = Sum(i, 1, 3)(Commutator(A(i), B) + C(i)) / N >>> show(expr) 1/N (∑_{i=1}^{3} (Ĉ_i + [Â_i, B̂])) Calling :meth:`doit` without parameters rewrites both the indexed sum and the commutator:: >>> show(expr.doit()) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + Â₁ B̂ + Â₂ B̂ + Â₃ B̂ - B̂ Â₁ - B̂ Â₂ - B̂ Â₃) A non-recursive call only expands the sum, as it does not recurse into the expanded summands:: >>> show(expr.doit(recursive=False)) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + [Â₁, B̂] + [Â₂, B̂] + [Â₃, B̂]) We can selectively expand only the sum or only the commutator:: >>> show(expr.doit(classes=[IndexedSum])) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + [Â₁, B̂] + [Â₂, B̂] + [Â₃, B̂]) >>> show(expr.doit(classes=[Commutator])) 1/N (∑_{i=1}^{3} (Ĉ_i - B̂ Â_i + Â_i B̂)) Also we can pass a keyword argument that expands the sum only to the 2nd term, as documented in :meth:`.Commutator.doit` >>> show(expr.doit(classes=[IndexedSum], max_terms=2)) 1/N (Ĉ₁ + Ĉ₂ + [Â₁, B̂] + [Â₂, B̂]) """ in_classes = ( (classes is None) or any([isinstance(self, cls) for cls in classes])) if in_classes: new = self._doit(**kwargs) else: new = self if (new == self) or recursive: new_args = [] for arg in new.args: if isinstance(arg, Expression): new_args.append(arg.doit( classes=classes, recursive=recursive, **kwargs)) else: new_args.append(arg) new_kwargs = OrderedDict([]) for (key, val) in new.kwargs.items(): if isinstance(val, Expression): new_kwargs[key] = val.doit( classes=classes, recursive=recursive, **kwargs) else: new_kwargs[key] = val new = new.__class__.create(*new_args, **new_kwargs) if new != self and recursive: new = new.doit(classes=classes, recursive=True, **kwargs) return new
def function[doit, parameter[self, classes, recursive]]: constant[Rewrite (sub-)expressions in a more explicit form Return a modified expression that is more explicit than the original expression. The definition of "more explicit" is decided by the relevant subclass, e.g. a :meth:`Commutator <.Commutator.doit>` is written out according to its definition. Args: classes (None or list): an optional list of classes. If given, only (sub-)expressions that an instance of one of the classes in the list will be rewritten. recursive (bool): If True, also rewrite any sub-expressions of any rewritten expression. Note that :meth:`doit` always recurses into sub-expressions of expressions not affected by it. kwargs: Any remaining keyword arguments may be used by the :meth:`doit` method of a particular expression. Example: Consider the following expression:: >>> from sympy import IndexedBase >>> i = IdxSym('i'); N = symbols('N') >>> Asym, Csym = symbols('A, C', cls=IndexedBase) >>> A = lambda i: OperatorSymbol(StrLabel(Asym[i]), hs=0) >>> B = OperatorSymbol('B', hs=0) >>> C = lambda i: OperatorSymbol(StrLabel(Csym[i]), hs=0) >>> def show(expr): ... print(unicode(expr, show_hs_label=False)) >>> expr = Sum(i, 1, 3)(Commutator(A(i), B) + C(i)) / N >>> show(expr) 1/N (∑_{i=1}^{3} (Ĉ_i + [Â_i, B̂])) Calling :meth:`doit` without parameters rewrites both the indexed sum and the commutator:: >>> show(expr.doit()) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + Â₁ B̂ + Â₂ B̂ + Â₃ B̂ - B̂ Â₁ - B̂ Â₂ - B̂ Â₃) A non-recursive call only expands the sum, as it does not recurse into the expanded summands:: >>> show(expr.doit(recursive=False)) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + [Â₁, B̂] + [Â₂, B̂] + [Â₃, B̂]) We can selectively expand only the sum or only the commutator:: >>> show(expr.doit(classes=[IndexedSum])) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + [Â₁, B̂] + [Â₂, B̂] + [Â₃, B̂]) >>> show(expr.doit(classes=[Commutator])) 1/N (∑_{i=1}^{3} (Ĉ_i - B̂ Â_i + Â_i B̂)) Also we can pass a keyword argument that expands the sum only to the 2nd term, as documented in :meth:`.Commutator.doit` >>> show(expr.doit(classes=[IndexedSum], max_terms=2)) 1/N (Ĉ₁ + Ĉ₂ + [Â₁, B̂] + [Â₂, B̂]) ] variable[in_classes] assign[=] <ast.BoolOp object at 0x7da18bcca650> if name[in_classes] begin[:] variable[new] assign[=] call[name[self]._doit, parameter[]] if <ast.BoolOp object at 0x7da1b27bab00> begin[:] variable[new_args] assign[=] list[[]] for taget[name[arg]] in starred[name[new].args] begin[:] if call[name[isinstance], parameter[name[arg], name[Expression]]] begin[:] call[name[new_args].append, parameter[call[name[arg].doit, parameter[]]]] variable[new_kwargs] assign[=] call[name[OrderedDict], parameter[list[[]]]] for taget[tuple[[<ast.Name object at 0x7da1b27bbcd0>, <ast.Name object at 0x7da1b27b89d0>]]] in starred[call[name[new].kwargs.items, parameter[]]] begin[:] if call[name[isinstance], parameter[name[val], name[Expression]]] begin[:] call[name[new_kwargs]][name[key]] assign[=] call[name[val].doit, parameter[]] variable[new] assign[=] call[name[new].__class__.create, parameter[<ast.Starred object at 0x7da1b27bb2b0>]] if <ast.BoolOp object at 0x7da1b27b9930> begin[:] variable[new] assign[=] call[name[new].doit, parameter[]] return[name[new]]
keyword[def] identifier[doit] ( identifier[self] , identifier[classes] = keyword[None] , identifier[recursive] = keyword[True] ,** identifier[kwargs] ): literal[string] identifier[in_classes] =( ( identifier[classes] keyword[is] keyword[None] ) keyword[or] identifier[any] ([ identifier[isinstance] ( identifier[self] , identifier[cls] ) keyword[for] identifier[cls] keyword[in] identifier[classes] ])) keyword[if] identifier[in_classes] : identifier[new] = identifier[self] . identifier[_doit] (** identifier[kwargs] ) keyword[else] : identifier[new] = identifier[self] keyword[if] ( identifier[new] == identifier[self] ) keyword[or] identifier[recursive] : identifier[new_args] =[] keyword[for] identifier[arg] keyword[in] identifier[new] . identifier[args] : keyword[if] identifier[isinstance] ( identifier[arg] , identifier[Expression] ): identifier[new_args] . identifier[append] ( identifier[arg] . identifier[doit] ( identifier[classes] = identifier[classes] , identifier[recursive] = identifier[recursive] ,** identifier[kwargs] )) keyword[else] : identifier[new_args] . identifier[append] ( identifier[arg] ) identifier[new_kwargs] = identifier[OrderedDict] ([]) keyword[for] ( identifier[key] , identifier[val] ) keyword[in] identifier[new] . identifier[kwargs] . identifier[items] (): keyword[if] identifier[isinstance] ( identifier[val] , identifier[Expression] ): identifier[new_kwargs] [ identifier[key] ]= identifier[val] . identifier[doit] ( identifier[classes] = identifier[classes] , identifier[recursive] = identifier[recursive] ,** identifier[kwargs] ) keyword[else] : identifier[new_kwargs] [ identifier[key] ]= identifier[val] identifier[new] = identifier[new] . identifier[__class__] . identifier[create] (* identifier[new_args] ,** identifier[new_kwargs] ) keyword[if] identifier[new] != identifier[self] keyword[and] identifier[recursive] : identifier[new] = identifier[new] . identifier[doit] ( identifier[classes] = identifier[classes] , identifier[recursive] = keyword[True] ,** identifier[kwargs] ) keyword[return] identifier[new]
def doit(self, classes=None, recursive=True, **kwargs): """Rewrite (sub-)expressions in a more explicit form Return a modified expression that is more explicit than the original expression. The definition of "more explicit" is decided by the relevant subclass, e.g. a :meth:`Commutator <.Commutator.doit>` is written out according to its definition. Args: classes (None or list): an optional list of classes. If given, only (sub-)expressions that an instance of one of the classes in the list will be rewritten. recursive (bool): If True, also rewrite any sub-expressions of any rewritten expression. Note that :meth:`doit` always recurses into sub-expressions of expressions not affected by it. kwargs: Any remaining keyword arguments may be used by the :meth:`doit` method of a particular expression. Example: Consider the following expression:: >>> from sympy import IndexedBase >>> i = IdxSym('i'); N = symbols('N') >>> Asym, Csym = symbols('A, C', cls=IndexedBase) >>> A = lambda i: OperatorSymbol(StrLabel(Asym[i]), hs=0) >>> B = OperatorSymbol('B', hs=0) >>> C = lambda i: OperatorSymbol(StrLabel(Csym[i]), hs=0) >>> def show(expr): ... print(unicode(expr, show_hs_label=False)) >>> expr = Sum(i, 1, 3)(Commutator(A(i), B) + C(i)) / N >>> show(expr) 1/N (∑_{i=1}^{3} (Ĉ_i + [Â_i, B̂])) Calling :meth:`doit` without parameters rewrites both the indexed sum and the commutator:: >>> show(expr.doit()) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + Â₁ B̂ + Â₂ B̂ + Â₃ B̂ - B̂ Â₁ - B̂ Â₂ - B̂ Â₃) A non-recursive call only expands the sum, as it does not recurse into the expanded summands:: >>> show(expr.doit(recursive=False)) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + [Â₁, B̂] + [Â₂, B̂] + [Â₃, B̂]) We can selectively expand only the sum or only the commutator:: >>> show(expr.doit(classes=[IndexedSum])) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + [Â₁, B̂] + [Â₂, B̂] + [Â₃, B̂]) >>> show(expr.doit(classes=[Commutator])) 1/N (∑_{i=1}^{3} (Ĉ_i - B̂ Â_i + Â_i B̂)) Also we can pass a keyword argument that expands the sum only to the 2nd term, as documented in :meth:`.Commutator.doit` >>> show(expr.doit(classes=[IndexedSum], max_terms=2)) 1/N (Ĉ₁ + Ĉ₂ + [Â₁, B̂] + [Â₂, B̂]) """ in_classes = classes is None or any([isinstance(self, cls) for cls in classes]) if in_classes: new = self._doit(**kwargs) # depends on [control=['if'], data=[]] else: new = self if new == self or recursive: new_args = [] for arg in new.args: if isinstance(arg, Expression): new_args.append(arg.doit(classes=classes, recursive=recursive, **kwargs)) # depends on [control=['if'], data=[]] else: new_args.append(arg) # depends on [control=['for'], data=['arg']] new_kwargs = OrderedDict([]) for (key, val) in new.kwargs.items(): if isinstance(val, Expression): new_kwargs[key] = val.doit(classes=classes, recursive=recursive, **kwargs) # depends on [control=['if'], data=[]] else: new_kwargs[key] = val # depends on [control=['for'], data=[]] new = new.__class__.create(*new_args, **new_kwargs) if new != self and recursive: new = new.doit(classes=classes, recursive=True, **kwargs) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return new
def set_locale(ln): """Set Babel localization in request context. :param ln: Language identifier. """ ctx = _request_ctx_stack.top if ctx is None: raise RuntimeError('Working outside of request context.') new_locale = current_app.extensions['babel'].load_locale(ln) old_locale = getattr(ctx, 'babel_locale', None) setattr(ctx, 'babel_locale', new_locale) yield setattr(ctx, 'babel_locale', old_locale)
def function[set_locale, parameter[ln]]: constant[Set Babel localization in request context. :param ln: Language identifier. ] variable[ctx] assign[=] name[_request_ctx_stack].top if compare[name[ctx] is constant[None]] begin[:] <ast.Raise object at 0x7da18fe91990> variable[new_locale] assign[=] call[call[name[current_app].extensions][constant[babel]].load_locale, parameter[name[ln]]] variable[old_locale] assign[=] call[name[getattr], parameter[name[ctx], constant[babel_locale], constant[None]]] call[name[setattr], parameter[name[ctx], constant[babel_locale], name[new_locale]]] <ast.Yield object at 0x7da18fe93070> call[name[setattr], parameter[name[ctx], constant[babel_locale], name[old_locale]]]
keyword[def] identifier[set_locale] ( identifier[ln] ): literal[string] identifier[ctx] = identifier[_request_ctx_stack] . identifier[top] keyword[if] identifier[ctx] keyword[is] keyword[None] : keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[new_locale] = identifier[current_app] . identifier[extensions] [ literal[string] ]. identifier[load_locale] ( identifier[ln] ) identifier[old_locale] = identifier[getattr] ( identifier[ctx] , literal[string] , keyword[None] ) identifier[setattr] ( identifier[ctx] , literal[string] , identifier[new_locale] ) keyword[yield] identifier[setattr] ( identifier[ctx] , literal[string] , identifier[old_locale] )
def set_locale(ln): """Set Babel localization in request context. :param ln: Language identifier. """ ctx = _request_ctx_stack.top if ctx is None: raise RuntimeError('Working outside of request context.') # depends on [control=['if'], data=[]] new_locale = current_app.extensions['babel'].load_locale(ln) old_locale = getattr(ctx, 'babel_locale', None) setattr(ctx, 'babel_locale', new_locale) yield setattr(ctx, 'babel_locale', old_locale)
def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret
def function[avail_images, parameter[call, location]]: constant[ Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ] if compare[name[call] equal[==] constant[action]] begin[:] <ast.Raise object at 0x7da18bc72770> variable[ret] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da18bc71450>, <ast.Name object at 0x7da18bc70610>]]] in starred[call[name[six].iteritems, parameter[call[name[avail_locations], parameter[]]]]] begin[:] for taget[name[item]] in starred[call[name[query], parameter[constant[get], call[constant[nodes/{0}/storage/{1}/content].format, parameter[name[host_name], name[location]]]]]] begin[:] call[name[ret]][call[name[item]][constant[volid]]] assign[=] name[item] return[name[ret]]
keyword[def] identifier[avail_images] ( identifier[call] = keyword[None] , identifier[location] = literal[string] ): literal[string] keyword[if] identifier[call] == literal[string] : keyword[raise] identifier[SaltCloudSystemExit] ( literal[string] literal[string] ) identifier[ret] ={} keyword[for] identifier[host_name] , identifier[host_details] keyword[in] identifier[six] . identifier[iteritems] ( identifier[avail_locations] ()): keyword[for] identifier[item] keyword[in] identifier[query] ( literal[string] , literal[string] . identifier[format] ( identifier[host_name] , identifier[location] )): identifier[ret] [ identifier[item] [ literal[string] ]]= identifier[item] keyword[return] identifier[ret]
def avail_images(call=None, location='local'): """ Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config """ if call == 'action': raise SaltCloudSystemExit('The avail_images function must be called with -f or --function, or with the --list-images option') # depends on [control=['if'], data=[]] ret = {} for (host_name, host_details) in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=[]] return ret
def maintenance_mode(self, **kwargs): """Configures maintenance mode on the device Args: rbridge_id (str): The rbridge ID of the device on which Maintenance mode will be configured in a VCS fabric. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `rbridge_id` is not specified. Examples: >>> import pynos.device >>> conn = ('10.24.39.202', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.system.maintenance_mode(rbridge_id='226') ... output = dev.system.maintenance_mode(rbridge_id='226', ... get=True) ... assert output == True ... output = dev.system.maintenance_mode(rbridge_id='226', ... delete=True) ... output = dev.system.maintenance_mode(rbridge_id='226', ... get=True) ... assert output == False """ is_get_config = kwargs.pop('get', False) delete = kwargs.pop('delete', False) rbridge_id = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) rid_args = dict(rbridge_id=rbridge_id) rid = getattr(self._rbridge, 'rbridge_id_system_mode_maintenance') config = rid(**rid_args) if is_get_config: maint_mode = callback(config, handler='get_config') mode = maint_mode.data_xml root = ET.fromstring(mode) namespace = 'urn:brocade.com:mgmt:brocade-rbridge' for rbridge_id_node in root.findall('{%s}rbridge-id' % namespace): system_mode = rbridge_id_node.find( '{%s}system-mode' % namespace) if system_mode is not None: return True else: return False if delete: config.find('.//*maintenance').set('operation', 'delete') return callback(config)
def function[maintenance_mode, parameter[self]]: constant[Configures maintenance mode on the device Args: rbridge_id (str): The rbridge ID of the device on which Maintenance mode will be configured in a VCS fabric. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `rbridge_id` is not specified. Examples: >>> import pynos.device >>> conn = ('10.24.39.202', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.system.maintenance_mode(rbridge_id='226') ... output = dev.system.maintenance_mode(rbridge_id='226', ... get=True) ... assert output == True ... output = dev.system.maintenance_mode(rbridge_id='226', ... delete=True) ... output = dev.system.maintenance_mode(rbridge_id='226', ... get=True) ... assert output == False ] variable[is_get_config] assign[=] call[name[kwargs].pop, parameter[constant[get], constant[False]]] variable[delete] assign[=] call[name[kwargs].pop, parameter[constant[delete], constant[False]]] variable[rbridge_id] assign[=] call[name[kwargs].pop, parameter[constant[rbridge_id]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] variable[rid_args] assign[=] call[name[dict], parameter[]] variable[rid] assign[=] call[name[getattr], parameter[name[self]._rbridge, constant[rbridge_id_system_mode_maintenance]]] variable[config] assign[=] call[name[rid], parameter[]] if name[is_get_config] begin[:] variable[maint_mode] assign[=] call[name[callback], parameter[name[config]]] variable[mode] assign[=] name[maint_mode].data_xml variable[root] assign[=] call[name[ET].fromstring, parameter[name[mode]]] variable[namespace] assign[=] constant[urn:brocade.com:mgmt:brocade-rbridge] for taget[name[rbridge_id_node]] in starred[call[name[root].findall, parameter[binary_operation[constant[{%s}rbridge-id] <ast.Mod object at 0x7da2590d6920> name[namespace]]]]] begin[:] variable[system_mode] assign[=] call[name[rbridge_id_node].find, parameter[binary_operation[constant[{%s}system-mode] <ast.Mod object at 0x7da2590d6920> name[namespace]]]] if compare[name[system_mode] is_not constant[None]] begin[:] return[constant[True]] if name[delete] begin[:] call[call[name[config].find, parameter[constant[.//*maintenance]]].set, parameter[constant[operation], constant[delete]]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[maintenance_mode] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[is_get_config] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ) identifier[delete] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ) identifier[rbridge_id] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) identifier[rid_args] = identifier[dict] ( identifier[rbridge_id] = identifier[rbridge_id] ) identifier[rid] = identifier[getattr] ( identifier[self] . identifier[_rbridge] , literal[string] ) identifier[config] = identifier[rid] (** identifier[rid_args] ) keyword[if] identifier[is_get_config] : identifier[maint_mode] = identifier[callback] ( identifier[config] , identifier[handler] = literal[string] ) identifier[mode] = identifier[maint_mode] . identifier[data_xml] identifier[root] = identifier[ET] . identifier[fromstring] ( identifier[mode] ) identifier[namespace] = literal[string] keyword[for] identifier[rbridge_id_node] keyword[in] identifier[root] . identifier[findall] ( literal[string] % identifier[namespace] ): identifier[system_mode] = identifier[rbridge_id_node] . identifier[find] ( literal[string] % identifier[namespace] ) keyword[if] identifier[system_mode] keyword[is] keyword[not] keyword[None] : keyword[return] keyword[True] keyword[else] : keyword[return] keyword[False] keyword[if] identifier[delete] : identifier[config] . identifier[find] ( literal[string] ). identifier[set] ( literal[string] , literal[string] ) keyword[return] identifier[callback] ( identifier[config] )
def maintenance_mode(self, **kwargs): """Configures maintenance mode on the device Args: rbridge_id (str): The rbridge ID of the device on which Maintenance mode will be configured in a VCS fabric. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `rbridge_id` is not specified. Examples: >>> import pynos.device >>> conn = ('10.24.39.202', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.system.maintenance_mode(rbridge_id='226') ... output = dev.system.maintenance_mode(rbridge_id='226', ... get=True) ... assert output == True ... output = dev.system.maintenance_mode(rbridge_id='226', ... delete=True) ... output = dev.system.maintenance_mode(rbridge_id='226', ... get=True) ... assert output == False """ is_get_config = kwargs.pop('get', False) delete = kwargs.pop('delete', False) rbridge_id = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) rid_args = dict(rbridge_id=rbridge_id) rid = getattr(self._rbridge, 'rbridge_id_system_mode_maintenance') config = rid(**rid_args) if is_get_config: maint_mode = callback(config, handler='get_config') mode = maint_mode.data_xml root = ET.fromstring(mode) namespace = 'urn:brocade.com:mgmt:brocade-rbridge' for rbridge_id_node in root.findall('{%s}rbridge-id' % namespace): system_mode = rbridge_id_node.find('{%s}system-mode' % namespace) if system_mode is not None: return True # depends on [control=['if'], data=[]] else: return False # depends on [control=['for'], data=['rbridge_id_node']] # depends on [control=['if'], data=[]] if delete: config.find('.//*maintenance').set('operation', 'delete') # depends on [control=['if'], data=[]] return callback(config)
def apply(self, template, context={}): context.update(self.context) """Return the rendered text of a template instance""" return self.env.from_string(template).render(context)
def function[apply, parameter[self, template, context]]: call[name[context].update, parameter[name[self].context]] constant[Return the rendered text of a template instance] return[call[call[name[self].env.from_string, parameter[name[template]]].render, parameter[name[context]]]]
keyword[def] identifier[apply] ( identifier[self] , identifier[template] , identifier[context] ={}): identifier[context] . identifier[update] ( identifier[self] . identifier[context] ) literal[string] keyword[return] identifier[self] . identifier[env] . identifier[from_string] ( identifier[template] ). identifier[render] ( identifier[context] )
def apply(self, template, context={}): context.update(self.context) 'Return the rendered text of a template instance' return self.env.from_string(template).render(context)
def rmse(params1, params2): r"""Compute the root-mean-squared error between two models. Parameters ---------- params1 : array_like Parameters of the first model. params2 : array_like Parameters of the second model. Returns ------- error : float Root-mean-squared error. """ assert len(params1) == len(params2) params1 = np.asarray(params1) - np.mean(params1) params2 = np.asarray(params2) - np.mean(params2) sqrt_n = math.sqrt(len(params1)) return np.linalg.norm(params1 - params2, ord=2) / sqrt_n
def function[rmse, parameter[params1, params2]]: constant[Compute the root-mean-squared error between two models. Parameters ---------- params1 : array_like Parameters of the first model. params2 : array_like Parameters of the second model. Returns ------- error : float Root-mean-squared error. ] assert[compare[call[name[len], parameter[name[params1]]] equal[==] call[name[len], parameter[name[params2]]]]] variable[params1] assign[=] binary_operation[call[name[np].asarray, parameter[name[params1]]] - call[name[np].mean, parameter[name[params1]]]] variable[params2] assign[=] binary_operation[call[name[np].asarray, parameter[name[params2]]] - call[name[np].mean, parameter[name[params2]]]] variable[sqrt_n] assign[=] call[name[math].sqrt, parameter[call[name[len], parameter[name[params1]]]]] return[binary_operation[call[name[np].linalg.norm, parameter[binary_operation[name[params1] - name[params2]]]] / name[sqrt_n]]]
keyword[def] identifier[rmse] ( identifier[params1] , identifier[params2] ): literal[string] keyword[assert] identifier[len] ( identifier[params1] )== identifier[len] ( identifier[params2] ) identifier[params1] = identifier[np] . identifier[asarray] ( identifier[params1] )- identifier[np] . identifier[mean] ( identifier[params1] ) identifier[params2] = identifier[np] . identifier[asarray] ( identifier[params2] )- identifier[np] . identifier[mean] ( identifier[params2] ) identifier[sqrt_n] = identifier[math] . identifier[sqrt] ( identifier[len] ( identifier[params1] )) keyword[return] identifier[np] . identifier[linalg] . identifier[norm] ( identifier[params1] - identifier[params2] , identifier[ord] = literal[int] )/ identifier[sqrt_n]
def rmse(params1, params2): """Compute the root-mean-squared error between two models. Parameters ---------- params1 : array_like Parameters of the first model. params2 : array_like Parameters of the second model. Returns ------- error : float Root-mean-squared error. """ assert len(params1) == len(params2) params1 = np.asarray(params1) - np.mean(params1) params2 = np.asarray(params2) - np.mean(params2) sqrt_n = math.sqrt(len(params1)) return np.linalg.norm(params1 - params2, ord=2) / sqrt_n
def _run_query(client, query, job_config=None): """Runs a query while printing status updates Args: client (google.cloud.bigquery.client.Client): Client to bundle configuration needed for API requests. query (str): SQL query to be executed. Defaults to the standard SQL dialect. Use the ``job_config`` parameter to change dialects. job_config (google.cloud.bigquery.job.QueryJobConfig, optional): Extra configuration options for the job. Returns: google.cloud.bigquery.job.QueryJob: the query job created Example: >>> client = bigquery.Client() >>> _run_query(client, "SELECT 17") Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b Query executing: 1.66s Query complete after 2.07s 'bf633912-af2c-4780-b568-5d868058632b' """ start_time = time.time() query_job = client.query(query, job_config=job_config) print("Executing query with job ID: {}".format(query_job.job_id)) while True: print("\rQuery executing: {:0.2f}s".format(time.time() - start_time), end="") try: query_job.result(timeout=0.5) break except futures.TimeoutError: continue print("\nQuery complete after {:0.2f}s".format(time.time() - start_time)) return query_job
def function[_run_query, parameter[client, query, job_config]]: constant[Runs a query while printing status updates Args: client (google.cloud.bigquery.client.Client): Client to bundle configuration needed for API requests. query (str): SQL query to be executed. Defaults to the standard SQL dialect. Use the ``job_config`` parameter to change dialects. job_config (google.cloud.bigquery.job.QueryJobConfig, optional): Extra configuration options for the job. Returns: google.cloud.bigquery.job.QueryJob: the query job created Example: >>> client = bigquery.Client() >>> _run_query(client, "SELECT 17") Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b Query executing: 1.66s Query complete after 2.07s 'bf633912-af2c-4780-b568-5d868058632b' ] variable[start_time] assign[=] call[name[time].time, parameter[]] variable[query_job] assign[=] call[name[client].query, parameter[name[query]]] call[name[print], parameter[call[constant[Executing query with job ID: {}].format, parameter[name[query_job].job_id]]]] while constant[True] begin[:] call[name[print], parameter[call[constant[ Query executing: {:0.2f}s].format, parameter[binary_operation[call[name[time].time, parameter[]] - name[start_time]]]]]] <ast.Try object at 0x7da20e954760> call[name[print], parameter[call[constant[ Query complete after {:0.2f}s].format, parameter[binary_operation[call[name[time].time, parameter[]] - name[start_time]]]]]] return[name[query_job]]
keyword[def] identifier[_run_query] ( identifier[client] , identifier[query] , identifier[job_config] = keyword[None] ): literal[string] identifier[start_time] = identifier[time] . identifier[time] () identifier[query_job] = identifier[client] . identifier[query] ( identifier[query] , identifier[job_config] = identifier[job_config] ) identifier[print] ( literal[string] . identifier[format] ( identifier[query_job] . identifier[job_id] )) keyword[while] keyword[True] : identifier[print] ( literal[string] . identifier[format] ( identifier[time] . identifier[time] ()- identifier[start_time] ), identifier[end] = literal[string] ) keyword[try] : identifier[query_job] . identifier[result] ( identifier[timeout] = literal[int] ) keyword[break] keyword[except] identifier[futures] . identifier[TimeoutError] : keyword[continue] identifier[print] ( literal[string] . identifier[format] ( identifier[time] . identifier[time] ()- identifier[start_time] )) keyword[return] identifier[query_job]
def _run_query(client, query, job_config=None): """Runs a query while printing status updates Args: client (google.cloud.bigquery.client.Client): Client to bundle configuration needed for API requests. query (str): SQL query to be executed. Defaults to the standard SQL dialect. Use the ``job_config`` parameter to change dialects. job_config (google.cloud.bigquery.job.QueryJobConfig, optional): Extra configuration options for the job. Returns: google.cloud.bigquery.job.QueryJob: the query job created Example: >>> client = bigquery.Client() >>> _run_query(client, "SELECT 17") Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b Query executing: 1.66s Query complete after 2.07s 'bf633912-af2c-4780-b568-5d868058632b' """ start_time = time.time() query_job = client.query(query, job_config=job_config) print('Executing query with job ID: {}'.format(query_job.job_id)) while True: print('\rQuery executing: {:0.2f}s'.format(time.time() - start_time), end='') try: query_job.result(timeout=0.5) break # depends on [control=['try'], data=[]] except futures.TimeoutError: continue # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] print('\nQuery complete after {:0.2f}s'.format(time.time() - start_time)) return query_job
def parse_date(value): """Attempts to parse `value` into an instance of ``datetime.date``. If `value` is ``None``, this function will return ``None``. Args: value: A timestamp. This can be a string, datetime.date, or datetime.datetime value. """ if not value: return None if isinstance(value, datetime.date): return value return parse_datetime(value).date()
def function[parse_date, parameter[value]]: constant[Attempts to parse `value` into an instance of ``datetime.date``. If `value` is ``None``, this function will return ``None``. Args: value: A timestamp. This can be a string, datetime.date, or datetime.datetime value. ] if <ast.UnaryOp object at 0x7da2054a4880> begin[:] return[constant[None]] if call[name[isinstance], parameter[name[value], name[datetime].date]] begin[:] return[name[value]] return[call[call[name[parse_datetime], parameter[name[value]]].date, parameter[]]]
keyword[def] identifier[parse_date] ( identifier[value] ): literal[string] keyword[if] keyword[not] identifier[value] : keyword[return] keyword[None] keyword[if] identifier[isinstance] ( identifier[value] , identifier[datetime] . identifier[date] ): keyword[return] identifier[value] keyword[return] identifier[parse_datetime] ( identifier[value] ). identifier[date] ()
def parse_date(value): """Attempts to parse `value` into an instance of ``datetime.date``. If `value` is ``None``, this function will return ``None``. Args: value: A timestamp. This can be a string, datetime.date, or datetime.datetime value. """ if not value: return None # depends on [control=['if'], data=[]] if isinstance(value, datetime.date): return value # depends on [control=['if'], data=[]] return parse_datetime(value).date()
def get_schema(url, typename, version='1.0.0', timeout=30, username=None, password=None): """Parses DescribeFeatureType response and creates schema compatible with :class:`fiona` :param str url: url of the service :param str version: version of the service :param str typename: name of the layer :param int timeout: request timeout """ url = _get_describefeaturetype_url(url, version, typename) res = openURL(url, timeout=timeout, username=username, password=password) root = etree.fromstring(res.read()) if ':' in typename: typename = typename.split(':')[1] type_element = findall(root, '{%s}element' % XS_NAMESPACE, attribute_name='name', attribute_value=typename)[0] complex_type = type_element.attrib['type'].split(":")[1] elements = _get_elements(complex_type, root) nsmap = None if hasattr(root, 'nsmap'): nsmap = root.nsmap return _construct_schema(elements, nsmap)
def function[get_schema, parameter[url, typename, version, timeout, username, password]]: constant[Parses DescribeFeatureType response and creates schema compatible with :class:`fiona` :param str url: url of the service :param str version: version of the service :param str typename: name of the layer :param int timeout: request timeout ] variable[url] assign[=] call[name[_get_describefeaturetype_url], parameter[name[url], name[version], name[typename]]] variable[res] assign[=] call[name[openURL], parameter[name[url]]] variable[root] assign[=] call[name[etree].fromstring, parameter[call[name[res].read, parameter[]]]] if compare[constant[:] in name[typename]] begin[:] variable[typename] assign[=] call[call[name[typename].split, parameter[constant[:]]]][constant[1]] variable[type_element] assign[=] call[call[name[findall], parameter[name[root], binary_operation[constant[{%s}element] <ast.Mod object at 0x7da2590d6920> name[XS_NAMESPACE]]]]][constant[0]] variable[complex_type] assign[=] call[call[call[name[type_element].attrib][constant[type]].split, parameter[constant[:]]]][constant[1]] variable[elements] assign[=] call[name[_get_elements], parameter[name[complex_type], name[root]]] variable[nsmap] assign[=] constant[None] if call[name[hasattr], parameter[name[root], constant[nsmap]]] begin[:] variable[nsmap] assign[=] name[root].nsmap return[call[name[_construct_schema], parameter[name[elements], name[nsmap]]]]
keyword[def] identifier[get_schema] ( identifier[url] , identifier[typename] , identifier[version] = literal[string] , identifier[timeout] = literal[int] , identifier[username] = keyword[None] , identifier[password] = keyword[None] ): literal[string] identifier[url] = identifier[_get_describefeaturetype_url] ( identifier[url] , identifier[version] , identifier[typename] ) identifier[res] = identifier[openURL] ( identifier[url] , identifier[timeout] = identifier[timeout] , identifier[username] = identifier[username] , identifier[password] = identifier[password] ) identifier[root] = identifier[etree] . identifier[fromstring] ( identifier[res] . identifier[read] ()) keyword[if] literal[string] keyword[in] identifier[typename] : identifier[typename] = identifier[typename] . identifier[split] ( literal[string] )[ literal[int] ] identifier[type_element] = identifier[findall] ( identifier[root] , literal[string] % identifier[XS_NAMESPACE] , identifier[attribute_name] = literal[string] , identifier[attribute_value] = identifier[typename] )[ literal[int] ] identifier[complex_type] = identifier[type_element] . identifier[attrib] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ] identifier[elements] = identifier[_get_elements] ( identifier[complex_type] , identifier[root] ) identifier[nsmap] = keyword[None] keyword[if] identifier[hasattr] ( identifier[root] , literal[string] ): identifier[nsmap] = identifier[root] . identifier[nsmap] keyword[return] identifier[_construct_schema] ( identifier[elements] , identifier[nsmap] )
def get_schema(url, typename, version='1.0.0', timeout=30, username=None, password=None): """Parses DescribeFeatureType response and creates schema compatible with :class:`fiona` :param str url: url of the service :param str version: version of the service :param str typename: name of the layer :param int timeout: request timeout """ url = _get_describefeaturetype_url(url, version, typename) res = openURL(url, timeout=timeout, username=username, password=password) root = etree.fromstring(res.read()) if ':' in typename: typename = typename.split(':')[1] # depends on [control=['if'], data=['typename']] type_element = findall(root, '{%s}element' % XS_NAMESPACE, attribute_name='name', attribute_value=typename)[0] complex_type = type_element.attrib['type'].split(':')[1] elements = _get_elements(complex_type, root) nsmap = None if hasattr(root, 'nsmap'): nsmap = root.nsmap # depends on [control=['if'], data=[]] return _construct_schema(elements, nsmap)
def match(self, el): """Match.""" return not self.is_doc(el) and self.is_tag(el) and self.match_selectors(el, self.selectors)
def function[match, parameter[self, el]]: constant[Match.] return[<ast.BoolOp object at 0x7da204961db0>]
keyword[def] identifier[match] ( identifier[self] , identifier[el] ): literal[string] keyword[return] keyword[not] identifier[self] . identifier[is_doc] ( identifier[el] ) keyword[and] identifier[self] . identifier[is_tag] ( identifier[el] ) keyword[and] identifier[self] . identifier[match_selectors] ( identifier[el] , identifier[self] . identifier[selectors] )
def match(self, el): """Match.""" return not self.is_doc(el) and self.is_tag(el) and self.match_selectors(el, self.selectors)
def install_translator(qapp): """Install Qt translator to the QApplication instance""" global QT_TRANSLATOR if QT_TRANSLATOR is None: qt_translator = QTranslator() if qt_translator.load("qt_"+QLocale.system().name(), QLibraryInfo.location(QLibraryInfo.TranslationsPath)): QT_TRANSLATOR = qt_translator # Keep reference alive if QT_TRANSLATOR is not None: qapp.installTranslator(QT_TRANSLATOR)
def function[install_translator, parameter[qapp]]: constant[Install Qt translator to the QApplication instance] <ast.Global object at 0x7da18ede4be0> if compare[name[QT_TRANSLATOR] is constant[None]] begin[:] variable[qt_translator] assign[=] call[name[QTranslator], parameter[]] if call[name[qt_translator].load, parameter[binary_operation[constant[qt_] + call[call[name[QLocale].system, parameter[]].name, parameter[]]], call[name[QLibraryInfo].location, parameter[name[QLibraryInfo].TranslationsPath]]]] begin[:] variable[QT_TRANSLATOR] assign[=] name[qt_translator] if compare[name[QT_TRANSLATOR] is_not constant[None]] begin[:] call[name[qapp].installTranslator, parameter[name[QT_TRANSLATOR]]]
keyword[def] identifier[install_translator] ( identifier[qapp] ): literal[string] keyword[global] identifier[QT_TRANSLATOR] keyword[if] identifier[QT_TRANSLATOR] keyword[is] keyword[None] : identifier[qt_translator] = identifier[QTranslator] () keyword[if] identifier[qt_translator] . identifier[load] ( literal[string] + identifier[QLocale] . identifier[system] (). identifier[name] (), identifier[QLibraryInfo] . identifier[location] ( identifier[QLibraryInfo] . identifier[TranslationsPath] )): identifier[QT_TRANSLATOR] = identifier[qt_translator] keyword[if] identifier[QT_TRANSLATOR] keyword[is] keyword[not] keyword[None] : identifier[qapp] . identifier[installTranslator] ( identifier[QT_TRANSLATOR] )
def install_translator(qapp): """Install Qt translator to the QApplication instance""" global QT_TRANSLATOR if QT_TRANSLATOR is None: qt_translator = QTranslator() if qt_translator.load('qt_' + QLocale.system().name(), QLibraryInfo.location(QLibraryInfo.TranslationsPath)): QT_TRANSLATOR = qt_translator # Keep reference alive # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['QT_TRANSLATOR']] if QT_TRANSLATOR is not None: qapp.installTranslator(QT_TRANSLATOR) # depends on [control=['if'], data=['QT_TRANSLATOR']]
def advance(self, inputs, advance_time, time_step=None): """ Advance the simulation by the given amount of time, assuming that inputs are constant at the given values during the simulated time. """ final_time_seconds = self.time_seconds + advance_time # Use half of the max allowed time step if none is given. if time_step is None: # pragma: no cover time_step = 0.5 * self.get_max_time_step() if len(self.input_nodes) != len(inputs): raise RuntimeError("Expected {0} inputs, got {1}".format(len(self.input_nodes), len(inputs))) while self.time_seconds < final_time_seconds: dt = min(time_step, final_time_seconds - self.time_seconds) ivalues = self.values[self.active] ovalues = self.values[1 - self.active] self.active = 1 - self.active for i, v in zip(self.input_nodes, inputs): ivalues[i] = v ovalues[i] = v for node_key, ne in iteritems(self.node_evals): node_inputs = [ivalues[i] * w for i, w in ne.links] s = ne.aggregation(node_inputs) z = ne.activation(ne.bias + ne.response * s) ovalues[node_key] += dt / ne.time_constant * (-ovalues[node_key] + z) self.time_seconds += dt ovalues = self.values[1 - self.active] return [ovalues[i] for i in self.output_nodes]
def function[advance, parameter[self, inputs, advance_time, time_step]]: constant[ Advance the simulation by the given amount of time, assuming that inputs are constant at the given values during the simulated time. ] variable[final_time_seconds] assign[=] binary_operation[name[self].time_seconds + name[advance_time]] if compare[name[time_step] is constant[None]] begin[:] variable[time_step] assign[=] binary_operation[constant[0.5] * call[name[self].get_max_time_step, parameter[]]] if compare[call[name[len], parameter[name[self].input_nodes]] not_equal[!=] call[name[len], parameter[name[inputs]]]] begin[:] <ast.Raise object at 0x7da1b19f1120> while compare[name[self].time_seconds less[<] name[final_time_seconds]] begin[:] variable[dt] assign[=] call[name[min], parameter[name[time_step], binary_operation[name[final_time_seconds] - name[self].time_seconds]]] variable[ivalues] assign[=] call[name[self].values][name[self].active] variable[ovalues] assign[=] call[name[self].values][binary_operation[constant[1] - name[self].active]] name[self].active assign[=] binary_operation[constant[1] - name[self].active] for taget[tuple[[<ast.Name object at 0x7da1b19f1a50>, <ast.Name object at 0x7da1b19f3ca0>]]] in starred[call[name[zip], parameter[name[self].input_nodes, name[inputs]]]] begin[:] call[name[ivalues]][name[i]] assign[=] name[v] call[name[ovalues]][name[i]] assign[=] name[v] for taget[tuple[[<ast.Name object at 0x7da1b19f3d90>, <ast.Name object at 0x7da1b19f3ac0>]]] in starred[call[name[iteritems], parameter[name[self].node_evals]]] begin[:] variable[node_inputs] assign[=] <ast.ListComp object at 0x7da1b19f1090> variable[s] assign[=] call[name[ne].aggregation, parameter[name[node_inputs]]] variable[z] assign[=] call[name[ne].activation, parameter[binary_operation[name[ne].bias + binary_operation[name[ne].response * name[s]]]]] <ast.AugAssign object at 0x7da1b19f1660> <ast.AugAssign object at 0x7da1b19f22f0> variable[ovalues] assign[=] call[name[self].values][binary_operation[constant[1] - name[self].active]] return[<ast.ListComp object at 0x7da1b19f01c0>]
keyword[def] identifier[advance] ( identifier[self] , identifier[inputs] , identifier[advance_time] , identifier[time_step] = keyword[None] ): literal[string] identifier[final_time_seconds] = identifier[self] . identifier[time_seconds] + identifier[advance_time] keyword[if] identifier[time_step] keyword[is] keyword[None] : identifier[time_step] = literal[int] * identifier[self] . identifier[get_max_time_step] () keyword[if] identifier[len] ( identifier[self] . identifier[input_nodes] )!= identifier[len] ( identifier[inputs] ): keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[len] ( identifier[self] . identifier[input_nodes] ), identifier[len] ( identifier[inputs] ))) keyword[while] identifier[self] . identifier[time_seconds] < identifier[final_time_seconds] : identifier[dt] = identifier[min] ( identifier[time_step] , identifier[final_time_seconds] - identifier[self] . identifier[time_seconds] ) identifier[ivalues] = identifier[self] . identifier[values] [ identifier[self] . identifier[active] ] identifier[ovalues] = identifier[self] . identifier[values] [ literal[int] - identifier[self] . identifier[active] ] identifier[self] . identifier[active] = literal[int] - identifier[self] . identifier[active] keyword[for] identifier[i] , identifier[v] keyword[in] identifier[zip] ( identifier[self] . identifier[input_nodes] , identifier[inputs] ): identifier[ivalues] [ identifier[i] ]= identifier[v] identifier[ovalues] [ identifier[i] ]= identifier[v] keyword[for] identifier[node_key] , identifier[ne] keyword[in] identifier[iteritems] ( identifier[self] . identifier[node_evals] ): identifier[node_inputs] =[ identifier[ivalues] [ identifier[i] ]* identifier[w] keyword[for] identifier[i] , identifier[w] keyword[in] identifier[ne] . identifier[links] ] identifier[s] = identifier[ne] . identifier[aggregation] ( identifier[node_inputs] ) identifier[z] = identifier[ne] . identifier[activation] ( identifier[ne] . identifier[bias] + identifier[ne] . identifier[response] * identifier[s] ) identifier[ovalues] [ identifier[node_key] ]+= identifier[dt] / identifier[ne] . identifier[time_constant] *(- identifier[ovalues] [ identifier[node_key] ]+ identifier[z] ) identifier[self] . identifier[time_seconds] += identifier[dt] identifier[ovalues] = identifier[self] . identifier[values] [ literal[int] - identifier[self] . identifier[active] ] keyword[return] [ identifier[ovalues] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[output_nodes] ]
def advance(self, inputs, advance_time, time_step=None): """ Advance the simulation by the given amount of time, assuming that inputs are constant at the given values during the simulated time. """ final_time_seconds = self.time_seconds + advance_time # Use half of the max allowed time step if none is given. if time_step is None: # pragma: no cover time_step = 0.5 * self.get_max_time_step() # depends on [control=['if'], data=['time_step']] if len(self.input_nodes) != len(inputs): raise RuntimeError('Expected {0} inputs, got {1}'.format(len(self.input_nodes), len(inputs))) # depends on [control=['if'], data=[]] while self.time_seconds < final_time_seconds: dt = min(time_step, final_time_seconds - self.time_seconds) ivalues = self.values[self.active] ovalues = self.values[1 - self.active] self.active = 1 - self.active for (i, v) in zip(self.input_nodes, inputs): ivalues[i] = v ovalues[i] = v # depends on [control=['for'], data=[]] for (node_key, ne) in iteritems(self.node_evals): node_inputs = [ivalues[i] * w for (i, w) in ne.links] s = ne.aggregation(node_inputs) z = ne.activation(ne.bias + ne.response * s) ovalues[node_key] += dt / ne.time_constant * (-ovalues[node_key] + z) # depends on [control=['for'], data=[]] self.time_seconds += dt # depends on [control=['while'], data=['final_time_seconds']] ovalues = self.values[1 - self.active] return [ovalues[i] for i in self.output_nodes]
def add(self, start, end): """ Add the start and end offsets of a matching read. @param start: The C{int} start offset of the read match in the subject. @param end: The C{int} end offset of the read match in the subject. This is Python-style: the end offset is not included in the match. """ assert start <= end self._intervals.append((start, end))
def function[add, parameter[self, start, end]]: constant[ Add the start and end offsets of a matching read. @param start: The C{int} start offset of the read match in the subject. @param end: The C{int} end offset of the read match in the subject. This is Python-style: the end offset is not included in the match. ] assert[compare[name[start] less_or_equal[<=] name[end]]] call[name[self]._intervals.append, parameter[tuple[[<ast.Name object at 0x7da18bcc92d0>, <ast.Name object at 0x7da18bccafe0>]]]]
keyword[def] identifier[add] ( identifier[self] , identifier[start] , identifier[end] ): literal[string] keyword[assert] identifier[start] <= identifier[end] identifier[self] . identifier[_intervals] . identifier[append] (( identifier[start] , identifier[end] ))
def add(self, start, end): """ Add the start and end offsets of a matching read. @param start: The C{int} start offset of the read match in the subject. @param end: The C{int} end offset of the read match in the subject. This is Python-style: the end offset is not included in the match. """ assert start <= end self._intervals.append((start, end))
def local_id(personal): """ Executor for `globus endpoint local-id` """ if personal: try: ep_id = LocalGlobusConnectPersonal().endpoint_id except IOError as e: safeprint(e, write_to_stderr=True) click.get_current_context().exit(1) if ep_id is not None: safeprint(ep_id) else: safeprint("No Globus Connect Personal installation found.") click.get_current_context().exit(1)
def function[local_id, parameter[personal]]: constant[ Executor for `globus endpoint local-id` ] if name[personal] begin[:] <ast.Try object at 0x7da18ede5540> if compare[name[ep_id] is_not constant[None]] begin[:] call[name[safeprint], parameter[name[ep_id]]]
keyword[def] identifier[local_id] ( identifier[personal] ): literal[string] keyword[if] identifier[personal] : keyword[try] : identifier[ep_id] = identifier[LocalGlobusConnectPersonal] (). identifier[endpoint_id] keyword[except] identifier[IOError] keyword[as] identifier[e] : identifier[safeprint] ( identifier[e] , identifier[write_to_stderr] = keyword[True] ) identifier[click] . identifier[get_current_context] (). identifier[exit] ( literal[int] ) keyword[if] identifier[ep_id] keyword[is] keyword[not] keyword[None] : identifier[safeprint] ( identifier[ep_id] ) keyword[else] : identifier[safeprint] ( literal[string] ) identifier[click] . identifier[get_current_context] (). identifier[exit] ( literal[int] )
def local_id(personal): """ Executor for `globus endpoint local-id` """ if personal: try: ep_id = LocalGlobusConnectPersonal().endpoint_id # depends on [control=['try'], data=[]] except IOError as e: safeprint(e, write_to_stderr=True) click.get_current_context().exit(1) # depends on [control=['except'], data=['e']] if ep_id is not None: safeprint(ep_id) # depends on [control=['if'], data=['ep_id']] else: safeprint('No Globus Connect Personal installation found.') click.get_current_context().exit(1) # depends on [control=['if'], data=[]]
def _compare_keys(element1, element2, key, compare_func, *args): """ Compares a specific key between two elements of a basis set If the key exists in one element but not the other, False is returned. If the key exists in neither element, True is returned. Parameters ---------- element1 : dict Basis info for an element element2 : dict Basis info for another element key : string Key to compare in the two elements compare_func : function Function that returns True if the data under the key is equivalent in both elements args Additional arguments to be passed to compare_Func """ if key in element1 and key in element2: if not compare_func(element1[key], element2[key], *args): return False elif key in element1 or key in element2: return False return True
def function[_compare_keys, parameter[element1, element2, key, compare_func]]: constant[ Compares a specific key between two elements of a basis set If the key exists in one element but not the other, False is returned. If the key exists in neither element, True is returned. Parameters ---------- element1 : dict Basis info for an element element2 : dict Basis info for another element key : string Key to compare in the two elements compare_func : function Function that returns True if the data under the key is equivalent in both elements args Additional arguments to be passed to compare_Func ] if <ast.BoolOp object at 0x7da1b1e9b4f0> begin[:] if <ast.UnaryOp object at 0x7da1b1e981f0> begin[:] return[constant[False]] return[constant[True]]
keyword[def] identifier[_compare_keys] ( identifier[element1] , identifier[element2] , identifier[key] , identifier[compare_func] ,* identifier[args] ): literal[string] keyword[if] identifier[key] keyword[in] identifier[element1] keyword[and] identifier[key] keyword[in] identifier[element2] : keyword[if] keyword[not] identifier[compare_func] ( identifier[element1] [ identifier[key] ], identifier[element2] [ identifier[key] ],* identifier[args] ): keyword[return] keyword[False] keyword[elif] identifier[key] keyword[in] identifier[element1] keyword[or] identifier[key] keyword[in] identifier[element2] : keyword[return] keyword[False] keyword[return] keyword[True]
def _compare_keys(element1, element2, key, compare_func, *args): """ Compares a specific key between two elements of a basis set If the key exists in one element but not the other, False is returned. If the key exists in neither element, True is returned. Parameters ---------- element1 : dict Basis info for an element element2 : dict Basis info for another element key : string Key to compare in the two elements compare_func : function Function that returns True if the data under the key is equivalent in both elements args Additional arguments to be passed to compare_Func """ if key in element1 and key in element2: if not compare_func(element1[key], element2[key], *args): return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif key in element1 or key in element2: return False # depends on [control=['if'], data=[]] return True
def siret(self, max_sequential_digits=2): """ Generates a siret number (14 digits). It is in fact the result of the concatenation of a siren number (9 digits), a sequential number (4 digits) and a control number (1 digit) concatenation. If $max_sequential_digits is invalid, it is set to 2. :param max_sequential_digits The maximum number of digits for the sequential number (> 0 && <= 4). """ if max_sequential_digits > 4 or max_sequential_digits <= 0: max_sequential_digits = 2 sequential_number = str(self.random_number( max_sequential_digits)).zfill(4) return self.numerify(self.siren() + ' ' + sequential_number + '#')
def function[siret, parameter[self, max_sequential_digits]]: constant[ Generates a siret number (14 digits). It is in fact the result of the concatenation of a siren number (9 digits), a sequential number (4 digits) and a control number (1 digit) concatenation. If $max_sequential_digits is invalid, it is set to 2. :param max_sequential_digits The maximum number of digits for the sequential number (> 0 && <= 4). ] if <ast.BoolOp object at 0x7da207f9b3a0> begin[:] variable[max_sequential_digits] assign[=] constant[2] variable[sequential_number] assign[=] call[call[name[str], parameter[call[name[self].random_number, parameter[name[max_sequential_digits]]]]].zfill, parameter[constant[4]]] return[call[name[self].numerify, parameter[binary_operation[binary_operation[binary_operation[call[name[self].siren, parameter[]] + constant[ ]] + name[sequential_number]] + constant[#]]]]]
keyword[def] identifier[siret] ( identifier[self] , identifier[max_sequential_digits] = literal[int] ): literal[string] keyword[if] identifier[max_sequential_digits] > literal[int] keyword[or] identifier[max_sequential_digits] <= literal[int] : identifier[max_sequential_digits] = literal[int] identifier[sequential_number] = identifier[str] ( identifier[self] . identifier[random_number] ( identifier[max_sequential_digits] )). identifier[zfill] ( literal[int] ) keyword[return] identifier[self] . identifier[numerify] ( identifier[self] . identifier[siren] ()+ literal[string] + identifier[sequential_number] + literal[string] )
def siret(self, max_sequential_digits=2): """ Generates a siret number (14 digits). It is in fact the result of the concatenation of a siren number (9 digits), a sequential number (4 digits) and a control number (1 digit) concatenation. If $max_sequential_digits is invalid, it is set to 2. :param max_sequential_digits The maximum number of digits for the sequential number (> 0 && <= 4). """ if max_sequential_digits > 4 or max_sequential_digits <= 0: max_sequential_digits = 2 # depends on [control=['if'], data=[]] sequential_number = str(self.random_number(max_sequential_digits)).zfill(4) return self.numerify(self.siren() + ' ' + sequential_number + '#')
def _cc(self): """ implementation of the efficient bilayer cross counting by insert-sort (see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting") """ g=self.layout.grx P=[] for v in self: P.extend(sorted([g[x].pos for x in self._neighbors(v)])) # count inversions in P: s = [] count = 0 for i,p in enumerate(P): j = bisect(s,p) if j<i: count += (i-j) s.insert(j,p) return count
def function[_cc, parameter[self]]: constant[ implementation of the efficient bilayer cross counting by insert-sort (see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting") ] variable[g] assign[=] name[self].layout.grx variable[P] assign[=] list[[]] for taget[name[v]] in starred[name[self]] begin[:] call[name[P].extend, parameter[call[name[sorted], parameter[<ast.ListComp object at 0x7da1b10250c0>]]]] variable[s] assign[=] list[[]] variable[count] assign[=] constant[0] for taget[tuple[[<ast.Name object at 0x7da1b1024130>, <ast.Name object at 0x7da1b1024cd0>]]] in starred[call[name[enumerate], parameter[name[P]]]] begin[:] variable[j] assign[=] call[name[bisect], parameter[name[s], name[p]]] if compare[name[j] less[<] name[i]] begin[:] <ast.AugAssign object at 0x7da1b1024070> call[name[s].insert, parameter[name[j], name[p]]] return[name[count]]
keyword[def] identifier[_cc] ( identifier[self] ): literal[string] identifier[g] = identifier[self] . identifier[layout] . identifier[grx] identifier[P] =[] keyword[for] identifier[v] keyword[in] identifier[self] : identifier[P] . identifier[extend] ( identifier[sorted] ([ identifier[g] [ identifier[x] ]. identifier[pos] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[_neighbors] ( identifier[v] )])) identifier[s] =[] identifier[count] = literal[int] keyword[for] identifier[i] , identifier[p] keyword[in] identifier[enumerate] ( identifier[P] ): identifier[j] = identifier[bisect] ( identifier[s] , identifier[p] ) keyword[if] identifier[j] < identifier[i] : identifier[count] +=( identifier[i] - identifier[j] ) identifier[s] . identifier[insert] ( identifier[j] , identifier[p] ) keyword[return] identifier[count]
def _cc(self): """ implementation of the efficient bilayer cross counting by insert-sort (see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting") """ g = self.layout.grx P = [] for v in self: P.extend(sorted([g[x].pos for x in self._neighbors(v)])) # depends on [control=['for'], data=['v']] # count inversions in P: s = [] count = 0 for (i, p) in enumerate(P): j = bisect(s, p) if j < i: count += i - j # depends on [control=['if'], data=['j', 'i']] s.insert(j, p) # depends on [control=['for'], data=[]] return count
def list_streams(self, session_id): """ Returns a list of Stream objects that contains information of all the streams in a OpenTok session, with the following attributes: -count: An integer that indicates the number of streams in the session -items: List of the Stream objects """ endpoint = self.endpoints.get_stream_url(session_id) response = requests.get( endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: return StreamList(response.json()) elif response.status_code == 400: raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.') elif response.status_code == 403: raise AuthError('You passed in an invalid OpenTok API key or JWT token.') else: raise RequestError('An unexpected error occurred', response.status_code)
def function[list_streams, parameter[self, session_id]]: constant[ Returns a list of Stream objects that contains information of all the streams in a OpenTok session, with the following attributes: -count: An integer that indicates the number of streams in the session -items: List of the Stream objects ] variable[endpoint] assign[=] call[name[self].endpoints.get_stream_url, parameter[name[session_id]]] variable[response] assign[=] call[name[requests].get, parameter[name[endpoint]]] if compare[name[response].status_code equal[==] constant[200]] begin[:] return[call[name[StreamList], parameter[call[name[response].json, parameter[]]]]]
keyword[def] identifier[list_streams] ( identifier[self] , identifier[session_id] ): literal[string] identifier[endpoint] = identifier[self] . identifier[endpoints] . identifier[get_stream_url] ( identifier[session_id] ) identifier[response] = identifier[requests] . identifier[get] ( identifier[endpoint] , identifier[headers] = identifier[self] . identifier[json_headers] (), identifier[proxies] = identifier[self] . identifier[proxies] , identifier[timeout] = identifier[self] . identifier[timeout] ) keyword[if] identifier[response] . identifier[status_code] == literal[int] : keyword[return] identifier[StreamList] ( identifier[response] . identifier[json] ()) keyword[elif] identifier[response] . identifier[status_code] == literal[int] : keyword[raise] identifier[GetStreamError] ( literal[string] ) keyword[elif] identifier[response] . identifier[status_code] == literal[int] : keyword[raise] identifier[AuthError] ( literal[string] ) keyword[else] : keyword[raise] identifier[RequestError] ( literal[string] , identifier[response] . identifier[status_code] )
def list_streams(self, session_id): """ Returns a list of Stream objects that contains information of all the streams in a OpenTok session, with the following attributes: -count: An integer that indicates the number of streams in the session -items: List of the Stream objects """ endpoint = self.endpoints.get_stream_url(session_id) response = requests.get(endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout) if response.status_code == 200: return StreamList(response.json()) # depends on [control=['if'], data=[]] elif response.status_code == 400: raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.') # depends on [control=['if'], data=[]] elif response.status_code == 403: raise AuthError('You passed in an invalid OpenTok API key or JWT token.') # depends on [control=['if'], data=[]] else: raise RequestError('An unexpected error occurred', response.status_code)
def get_hashhash(self, username): """ Generate a digest of the htpasswd hash """ return hashlib.sha256( self.users.get_hash(username) ).hexdigest()
def function[get_hashhash, parameter[self, username]]: constant[ Generate a digest of the htpasswd hash ] return[call[call[name[hashlib].sha256, parameter[call[name[self].users.get_hash, parameter[name[username]]]]].hexdigest, parameter[]]]
keyword[def] identifier[get_hashhash] ( identifier[self] , identifier[username] ): literal[string] keyword[return] identifier[hashlib] . identifier[sha256] ( identifier[self] . identifier[users] . identifier[get_hash] ( identifier[username] ) ). identifier[hexdigest] ()
def get_hashhash(self, username): """ Generate a digest of the htpasswd hash """ return hashlib.sha256(self.users.get_hash(username)).hexdigest()
def output(self) -> None: """Pretty print travel times.""" print("%s - %s" % (self.station, self.now)) print(self.products_filter) for j in sorted(self.journeys, key=lambda k: k.real_departure)[ : self.max_journeys ]: print("-------------") print(f"{j.product}: {j.number} ({j.train_id})") print(f"Richtung: {j.direction}") print(f"Abfahrt in {j.real_departure} min.") print(f"Abfahrt {j.departure.time()} (+{j.delay})") print(f"Nächste Haltestellen: {([s['station'] for s in j.stops])}") if j.info: print(f"Hinweis: {j.info}") print(f"Hinweis (lang): {j.info_long}") print(f"Icon: {j.icon}")
def function[output, parameter[self]]: constant[Pretty print travel times.] call[name[print], parameter[binary_operation[constant[%s - %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b11a8b80>, <ast.Attribute object at 0x7da1b11a81c0>]]]]] call[name[print], parameter[name[self].products_filter]] for taget[name[j]] in starred[call[call[name[sorted], parameter[name[self].journeys]]][<ast.Slice object at 0x7da1b11965f0>]] begin[:] call[name[print], parameter[constant[-------------]]] call[name[print], parameter[<ast.JoinedStr object at 0x7da1b1197a00>]] call[name[print], parameter[<ast.JoinedStr object at 0x7da1b1197280>]] call[name[print], parameter[<ast.JoinedStr object at 0x7da1b1197f10>]] call[name[print], parameter[<ast.JoinedStr object at 0x7da1b1197130>]] call[name[print], parameter[<ast.JoinedStr object at 0x7da1b1131d20>]] if name[j].info begin[:] call[name[print], parameter[<ast.JoinedStr object at 0x7da1b1131180>]] call[name[print], parameter[<ast.JoinedStr object at 0x7da1b1131db0>]] call[name[print], parameter[<ast.JoinedStr object at 0x7da1b1132290>]]
keyword[def] identifier[output] ( identifier[self] )-> keyword[None] : literal[string] identifier[print] ( literal[string] %( identifier[self] . identifier[station] , identifier[self] . identifier[now] )) identifier[print] ( identifier[self] . identifier[products_filter] ) keyword[for] identifier[j] keyword[in] identifier[sorted] ( identifier[self] . identifier[journeys] , identifier[key] = keyword[lambda] identifier[k] : identifier[k] . identifier[real_departure] )[ : identifier[self] . identifier[max_journeys] ]: identifier[print] ( literal[string] ) identifier[print] ( literal[string] ) identifier[print] ( literal[string] ) identifier[print] ( literal[string] ) identifier[print] ( literal[string] ) identifier[print] ( literal[string] ) keyword[if] identifier[j] . identifier[info] : identifier[print] ( literal[string] ) identifier[print] ( literal[string] ) identifier[print] ( literal[string] )
def output(self) -> None: """Pretty print travel times.""" print('%s - %s' % (self.station, self.now)) print(self.products_filter) for j in sorted(self.journeys, key=lambda k: k.real_departure)[:self.max_journeys]: print('-------------') print(f'{j.product}: {j.number} ({j.train_id})') print(f'Richtung: {j.direction}') print(f'Abfahrt in {j.real_departure} min.') print(f'Abfahrt {j.departure.time()} (+{j.delay})') print(f"Nächste Haltestellen: {[s['station'] for s in j.stops]}") if j.info: print(f'Hinweis: {j.info}') print(f'Hinweis (lang): {j.info_long}') # depends on [control=['if'], data=[]] print(f'Icon: {j.icon}') # depends on [control=['for'], data=['j']]
def from_binary_string(self, notification): """Unpack the notification from binary string.""" command = struct.unpack('>B', notification[0])[0] if command != self.COMMAND: raise NotificationInvalidCommandError() length = struct.unpack('>I', notification[1:5])[0] notification = notification[5:] offset = 0 def next_item(offset): iden, length = struct.unpack('>BH', notification[offset:offset+3]) offset += 3 payload = notification[offset:offset+length] offset += length if iden == self.PAYLOAD: payload = struct.unpack('>{0}s'.format(length), payload)[0] self.payload = json.loads(payload) elif iden == self.TOKEN: payload = struct.unpack('>{0}s'.format(length), payload)[0] self.token = binascii.hexlify(payload) elif iden == self.PRIORITY: self.priority = struct.unpack('>B', payload)[0] elif iden == self.NOTIFICATION_ID: self.iden = struct.unpack('>I', payload)[0] elif iden == self.EXPIRE: payload = struct.unpack('>I', payload)[0] self.expire = (self.EXPIRE_IMMEDIATELY if payload == 0 else datetime.fromtimestamp(payload)) else: raise NotificationInvalidIdError() return offset while offset < length: offset = next_item(offset)
def function[from_binary_string, parameter[self, notification]]: constant[Unpack the notification from binary string.] variable[command] assign[=] call[call[name[struct].unpack, parameter[constant[>B], call[name[notification]][constant[0]]]]][constant[0]] if compare[name[command] not_equal[!=] name[self].COMMAND] begin[:] <ast.Raise object at 0x7da20c796920> variable[length] assign[=] call[call[name[struct].unpack, parameter[constant[>I], call[name[notification]][<ast.Slice object at 0x7da20c795d50>]]]][constant[0]] variable[notification] assign[=] call[name[notification]][<ast.Slice object at 0x7da20c7968f0>] variable[offset] assign[=] constant[0] def function[next_item, parameter[offset]]: <ast.Tuple object at 0x7da20c796140> assign[=] call[name[struct].unpack, parameter[constant[>BH], call[name[notification]][<ast.Slice object at 0x7da20c794ee0>]]] <ast.AugAssign object at 0x7da20c795e70> variable[payload] assign[=] call[name[notification]][<ast.Slice object at 0x7da20c795f00>] <ast.AugAssign object at 0x7da20c7945e0> if compare[name[iden] equal[==] name[self].PAYLOAD] begin[:] variable[payload] assign[=] call[call[name[struct].unpack, parameter[call[constant[>{0}s].format, parameter[name[length]]], name[payload]]]][constant[0]] name[self].payload assign[=] call[name[json].loads, parameter[name[payload]]] return[name[offset]] while compare[name[offset] less[<] name[length]] begin[:] variable[offset] assign[=] call[name[next_item], parameter[name[offset]]]
keyword[def] identifier[from_binary_string] ( identifier[self] , identifier[notification] ): literal[string] identifier[command] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[notification] [ literal[int] ])[ literal[int] ] keyword[if] identifier[command] != identifier[self] . identifier[COMMAND] : keyword[raise] identifier[NotificationInvalidCommandError] () identifier[length] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[notification] [ literal[int] : literal[int] ])[ literal[int] ] identifier[notification] = identifier[notification] [ literal[int] :] identifier[offset] = literal[int] keyword[def] identifier[next_item] ( identifier[offset] ): identifier[iden] , identifier[length] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[notification] [ identifier[offset] : identifier[offset] + literal[int] ]) identifier[offset] += literal[int] identifier[payload] = identifier[notification] [ identifier[offset] : identifier[offset] + identifier[length] ] identifier[offset] += identifier[length] keyword[if] identifier[iden] == identifier[self] . identifier[PAYLOAD] : identifier[payload] = identifier[struct] . identifier[unpack] ( literal[string] . identifier[format] ( identifier[length] ), identifier[payload] )[ literal[int] ] identifier[self] . identifier[payload] = identifier[json] . identifier[loads] ( identifier[payload] ) keyword[elif] identifier[iden] == identifier[self] . identifier[TOKEN] : identifier[payload] = identifier[struct] . identifier[unpack] ( literal[string] . identifier[format] ( identifier[length] ), identifier[payload] )[ literal[int] ] identifier[self] . identifier[token] = identifier[binascii] . identifier[hexlify] ( identifier[payload] ) keyword[elif] identifier[iden] == identifier[self] . identifier[PRIORITY] : identifier[self] . identifier[priority] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[payload] )[ literal[int] ] keyword[elif] identifier[iden] == identifier[self] . identifier[NOTIFICATION_ID] : identifier[self] . identifier[iden] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[payload] )[ literal[int] ] keyword[elif] identifier[iden] == identifier[self] . identifier[EXPIRE] : identifier[payload] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[payload] )[ literal[int] ] identifier[self] . identifier[expire] =( identifier[self] . identifier[EXPIRE_IMMEDIATELY] keyword[if] identifier[payload] == literal[int] keyword[else] identifier[datetime] . identifier[fromtimestamp] ( identifier[payload] )) keyword[else] : keyword[raise] identifier[NotificationInvalidIdError] () keyword[return] identifier[offset] keyword[while] identifier[offset] < identifier[length] : identifier[offset] = identifier[next_item] ( identifier[offset] )
def from_binary_string(self, notification): """Unpack the notification from binary string.""" command = struct.unpack('>B', notification[0])[0] if command != self.COMMAND: raise NotificationInvalidCommandError() # depends on [control=['if'], data=[]] length = struct.unpack('>I', notification[1:5])[0] notification = notification[5:] offset = 0 def next_item(offset): (iden, length) = struct.unpack('>BH', notification[offset:offset + 3]) offset += 3 payload = notification[offset:offset + length] offset += length if iden == self.PAYLOAD: payload = struct.unpack('>{0}s'.format(length), payload)[0] self.payload = json.loads(payload) # depends on [control=['if'], data=[]] elif iden == self.TOKEN: payload = struct.unpack('>{0}s'.format(length), payload)[0] self.token = binascii.hexlify(payload) # depends on [control=['if'], data=[]] elif iden == self.PRIORITY: self.priority = struct.unpack('>B', payload)[0] # depends on [control=['if'], data=[]] elif iden == self.NOTIFICATION_ID: self.iden = struct.unpack('>I', payload)[0] # depends on [control=['if'], data=[]] elif iden == self.EXPIRE: payload = struct.unpack('>I', payload)[0] self.expire = self.EXPIRE_IMMEDIATELY if payload == 0 else datetime.fromtimestamp(payload) # depends on [control=['if'], data=[]] else: raise NotificationInvalidIdError() return offset while offset < length: offset = next_item(offset) # depends on [control=['while'], data=['offset']]
def medium(self, medium): """Get or set the constraints on the model exchanges. `model.medium` returns a dictionary of the bounds for each of the boundary reactions, in the form of `{rxn_id: bound}`, where `bound` specifies the absolute value of the bound in direction of metabolite creation (i.e., lower_bound for `met <--`, upper_bound for `met -->`) Parameters ---------- medium: dictionary-like The medium to initialize. medium should be a dictionary defining `{rxn_id: bound}` pairs. """ def set_active_bound(reaction, bound): if reaction.reactants: reaction.lower_bound = -bound elif reaction.products: reaction.upper_bound = bound # Set the given media bounds media_rxns = list() exchange_rxns = frozenset(self.exchanges) for rxn_id, bound in iteritems(medium): rxn = self.reactions.get_by_id(rxn_id) if rxn not in exchange_rxns: LOGGER.warn("%s does not seem to be an" " an exchange reaction. Applying bounds anyway.", rxn.id) media_rxns.append(rxn) set_active_bound(rxn, bound) media_rxns = frozenset(media_rxns) # Turn off reactions not present in media for rxn in (exchange_rxns - media_rxns): set_active_bound(rxn, 0)
def function[medium, parameter[self, medium]]: constant[Get or set the constraints on the model exchanges. `model.medium` returns a dictionary of the bounds for each of the boundary reactions, in the form of `{rxn_id: bound}`, where `bound` specifies the absolute value of the bound in direction of metabolite creation (i.e., lower_bound for `met <--`, upper_bound for `met -->`) Parameters ---------- medium: dictionary-like The medium to initialize. medium should be a dictionary defining `{rxn_id: bound}` pairs. ] def function[set_active_bound, parameter[reaction, bound]]: if name[reaction].reactants begin[:] name[reaction].lower_bound assign[=] <ast.UnaryOp object at 0x7da20e9570a0> variable[media_rxns] assign[=] call[name[list], parameter[]] variable[exchange_rxns] assign[=] call[name[frozenset], parameter[name[self].exchanges]] for taget[tuple[[<ast.Name object at 0x7da1b007c8e0>, <ast.Name object at 0x7da1b007c910>]]] in starred[call[name[iteritems], parameter[name[medium]]]] begin[:] variable[rxn] assign[=] call[name[self].reactions.get_by_id, parameter[name[rxn_id]]] if compare[name[rxn] <ast.NotIn object at 0x7da2590d7190> name[exchange_rxns]] begin[:] call[name[LOGGER].warn, parameter[constant[%s does not seem to be an an exchange reaction. Applying bounds anyway.], name[rxn].id]] call[name[media_rxns].append, parameter[name[rxn]]] call[name[set_active_bound], parameter[name[rxn], name[bound]]] variable[media_rxns] assign[=] call[name[frozenset], parameter[name[media_rxns]]] for taget[name[rxn]] in starred[binary_operation[name[exchange_rxns] - name[media_rxns]]] begin[:] call[name[set_active_bound], parameter[name[rxn], constant[0]]]
keyword[def] identifier[medium] ( identifier[self] , identifier[medium] ): literal[string] keyword[def] identifier[set_active_bound] ( identifier[reaction] , identifier[bound] ): keyword[if] identifier[reaction] . identifier[reactants] : identifier[reaction] . identifier[lower_bound] =- identifier[bound] keyword[elif] identifier[reaction] . identifier[products] : identifier[reaction] . identifier[upper_bound] = identifier[bound] identifier[media_rxns] = identifier[list] () identifier[exchange_rxns] = identifier[frozenset] ( identifier[self] . identifier[exchanges] ) keyword[for] identifier[rxn_id] , identifier[bound] keyword[in] identifier[iteritems] ( identifier[medium] ): identifier[rxn] = identifier[self] . identifier[reactions] . identifier[get_by_id] ( identifier[rxn_id] ) keyword[if] identifier[rxn] keyword[not] keyword[in] identifier[exchange_rxns] : identifier[LOGGER] . identifier[warn] ( literal[string] literal[string] , identifier[rxn] . identifier[id] ) identifier[media_rxns] . identifier[append] ( identifier[rxn] ) identifier[set_active_bound] ( identifier[rxn] , identifier[bound] ) identifier[media_rxns] = identifier[frozenset] ( identifier[media_rxns] ) keyword[for] identifier[rxn] keyword[in] ( identifier[exchange_rxns] - identifier[media_rxns] ): identifier[set_active_bound] ( identifier[rxn] , literal[int] )
def medium(self, medium): """Get or set the constraints on the model exchanges. `model.medium` returns a dictionary of the bounds for each of the boundary reactions, in the form of `{rxn_id: bound}`, where `bound` specifies the absolute value of the bound in direction of metabolite creation (i.e., lower_bound for `met <--`, upper_bound for `met -->`) Parameters ---------- medium: dictionary-like The medium to initialize. medium should be a dictionary defining `{rxn_id: bound}` pairs. """ def set_active_bound(reaction, bound): if reaction.reactants: reaction.lower_bound = -bound # depends on [control=['if'], data=[]] elif reaction.products: reaction.upper_bound = bound # depends on [control=['if'], data=[]] # Set the given media bounds media_rxns = list() exchange_rxns = frozenset(self.exchanges) for (rxn_id, bound) in iteritems(medium): rxn = self.reactions.get_by_id(rxn_id) if rxn not in exchange_rxns: LOGGER.warn('%s does not seem to be an an exchange reaction. Applying bounds anyway.', rxn.id) # depends on [control=['if'], data=['rxn']] media_rxns.append(rxn) set_active_bound(rxn, bound) # depends on [control=['for'], data=[]] media_rxns = frozenset(media_rxns) # Turn off reactions not present in media for rxn in exchange_rxns - media_rxns: set_active_bound(rxn, 0) # depends on [control=['for'], data=['rxn']]
def absent(name, auth=None): ''' Ensure a subnet does not exists name Name of the subnet ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} __salt__['neutronng.setup_clouds'](auth) subnet = __salt__['neutronng.subnet_get'](name=name) if subnet: if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': subnet.id} ret['comment'] = 'Project will be deleted.' return ret __salt__['neutronng.subnet_delete'](name=subnet) ret['changes']['id'] = name ret['comment'] = 'Deleted subnet' return ret
def function[absent, parameter[name, auth]]: constant[ Ensure a subnet does not exists name Name of the subnet ] variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b21345e0>, <ast.Constant object at 0x7da1b2136320>, <ast.Constant object at 0x7da1b2136fb0>, <ast.Constant object at 0x7da1b21370d0>], [<ast.Name object at 0x7da1b2137730>, <ast.Dict object at 0x7da1b21376d0>, <ast.Constant object at 0x7da1b2137700>, <ast.Constant object at 0x7da1b21376a0>]] call[call[name[__salt__]][constant[neutronng.setup_clouds]], parameter[name[auth]]] variable[subnet] assign[=] call[call[name[__salt__]][constant[neutronng.subnet_get]], parameter[]] if name[subnet] begin[:] if compare[call[name[__opts__]][constant[test]] is constant[True]] begin[:] call[name[ret]][constant[result]] assign[=] constant[None] call[name[ret]][constant[changes]] assign[=] dictionary[[<ast.Constant object at 0x7da1b2135510>], [<ast.Attribute object at 0x7da1b2134700>]] call[name[ret]][constant[comment]] assign[=] constant[Project will be deleted.] return[name[ret]] call[call[name[__salt__]][constant[neutronng.subnet_delete]], parameter[]] call[call[name[ret]][constant[changes]]][constant[id]] assign[=] name[name] call[name[ret]][constant[comment]] assign[=] constant[Deleted subnet] return[name[ret]]
keyword[def] identifier[absent] ( identifier[name] , identifier[auth] = keyword[None] ): literal[string] identifier[ret] ={ literal[string] : identifier[name] , literal[string] :{}, literal[string] : keyword[True] , literal[string] : literal[string] } identifier[__salt__] [ literal[string] ]( identifier[auth] ) identifier[subnet] = identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] ) keyword[if] identifier[subnet] : keyword[if] identifier[__opts__] [ literal[string] ] keyword[is] keyword[True] : identifier[ret] [ literal[string] ]= keyword[None] identifier[ret] [ literal[string] ]={ literal[string] : identifier[subnet] . identifier[id] } identifier[ret] [ literal[string] ]= literal[string] keyword[return] identifier[ret] identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[subnet] ) identifier[ret] [ literal[string] ][ literal[string] ]= identifier[name] identifier[ret] [ literal[string] ]= literal[string] keyword[return] identifier[ret]
def absent(name, auth=None): """ Ensure a subnet does not exists name Name of the subnet """ ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} __salt__['neutronng.setup_clouds'](auth) subnet = __salt__['neutronng.subnet_get'](name=name) if subnet: if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': subnet.id} ret['comment'] = 'Project will be deleted.' return ret # depends on [control=['if'], data=[]] __salt__['neutronng.subnet_delete'](name=subnet) ret['changes']['id'] = name ret['comment'] = 'Deleted subnet' # depends on [control=['if'], data=[]] return ret
def linspace(start, stop, num, decimals=18): """ Returns a list of evenly spaced numbers over a specified interval. Inspired from Numpy's linspace function: https://github.com/numpy/numpy/blob/master/numpy/core/function_base.py :param start: starting value :type start: float :param stop: end value :type stop: float :param num: number of samples to generate :type num: int :param decimals: number of significands :type decimals: int :return: a list of equally spaced numbers :rtype: list """ start = float(start) stop = float(stop) if abs(start - stop) <= 10e-8: return [start] num = int(num) if num > 1: div = num - 1 delta = stop - start return [float(("{:." + str(decimals) + "f}").format((start + (float(x) * float(delta) / float(div))))) for x in range(num)] return [float(("{:." + str(decimals) + "f}").format(start))]
def function[linspace, parameter[start, stop, num, decimals]]: constant[ Returns a list of evenly spaced numbers over a specified interval. Inspired from Numpy's linspace function: https://github.com/numpy/numpy/blob/master/numpy/core/function_base.py :param start: starting value :type start: float :param stop: end value :type stop: float :param num: number of samples to generate :type num: int :param decimals: number of significands :type decimals: int :return: a list of equally spaced numbers :rtype: list ] variable[start] assign[=] call[name[float], parameter[name[start]]] variable[stop] assign[=] call[name[float], parameter[name[stop]]] if compare[call[name[abs], parameter[binary_operation[name[start] - name[stop]]]] less_or_equal[<=] constant[1e-07]] begin[:] return[list[[<ast.Name object at 0x7da1b17d5240>]]] variable[num] assign[=] call[name[int], parameter[name[num]]] if compare[name[num] greater[>] constant[1]] begin[:] variable[div] assign[=] binary_operation[name[num] - constant[1]] variable[delta] assign[=] binary_operation[name[stop] - name[start]] return[<ast.ListComp object at 0x7da1b17d7a60>] return[list[[<ast.Call object at 0x7da1b17d6dd0>]]]
keyword[def] identifier[linspace] ( identifier[start] , identifier[stop] , identifier[num] , identifier[decimals] = literal[int] ): literal[string] identifier[start] = identifier[float] ( identifier[start] ) identifier[stop] = identifier[float] ( identifier[stop] ) keyword[if] identifier[abs] ( identifier[start] - identifier[stop] )<= literal[int] : keyword[return] [ identifier[start] ] identifier[num] = identifier[int] ( identifier[num] ) keyword[if] identifier[num] > literal[int] : identifier[div] = identifier[num] - literal[int] identifier[delta] = identifier[stop] - identifier[start] keyword[return] [ identifier[float] (( literal[string] + identifier[str] ( identifier[decimals] )+ literal[string] ). identifier[format] (( identifier[start] +( identifier[float] ( identifier[x] )* identifier[float] ( identifier[delta] )/ identifier[float] ( identifier[div] ))))) keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[num] )] keyword[return] [ identifier[float] (( literal[string] + identifier[str] ( identifier[decimals] )+ literal[string] ). identifier[format] ( identifier[start] ))]
def linspace(start, stop, num, decimals=18): """ Returns a list of evenly spaced numbers over a specified interval. Inspired from Numpy's linspace function: https://github.com/numpy/numpy/blob/master/numpy/core/function_base.py :param start: starting value :type start: float :param stop: end value :type stop: float :param num: number of samples to generate :type num: int :param decimals: number of significands :type decimals: int :return: a list of equally spaced numbers :rtype: list """ start = float(start) stop = float(stop) if abs(start - stop) <= 1e-07: return [start] # depends on [control=['if'], data=[]] num = int(num) if num > 1: div = num - 1 delta = stop - start return [float(('{:.' + str(decimals) + 'f}').format(start + float(x) * float(delta) / float(div))) for x in range(num)] # depends on [control=['if'], data=['num']] return [float(('{:.' + str(decimals) + 'f}').format(start))]
def _unpack_edition(cls, value): """ Unpack its elements and set the attributes in wfn accordingly. Parse out the five elements: ~ edition ~ software edition ~ target sw ~ target hw ~ other :param string value: Value of edition attribute :returns: Dictionary with parts of edition attribute :exception: ValueError - invalid value of edition attribute """ components = value.split(CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION) d = dict() ed = components[1] sw_ed = components[2] t_sw = components[3] t_hw = components[4] oth = components[5] ck = CPEComponent.ATT_EDITION d[ck] = CPE2_3_URI._create_component(ck, ed) ck = CPEComponent.ATT_SW_EDITION d[ck] = CPE2_3_URI._create_component(ck, sw_ed) ck = CPEComponent.ATT_TARGET_SW d[ck] = CPE2_3_URI._create_component(ck, t_sw) ck = CPEComponent.ATT_TARGET_HW d[ck] = CPE2_3_URI._create_component(ck, t_hw) ck = CPEComponent.ATT_OTHER d[ck] = CPE2_3_URI._create_component(ck, oth) return d
def function[_unpack_edition, parameter[cls, value]]: constant[ Unpack its elements and set the attributes in wfn accordingly. Parse out the five elements: ~ edition ~ software edition ~ target sw ~ target hw ~ other :param string value: Value of edition attribute :returns: Dictionary with parts of edition attribute :exception: ValueError - invalid value of edition attribute ] variable[components] assign[=] call[name[value].split, parameter[name[CPEComponent2_3_URI].SEPARATOR_PACKED_EDITION]] variable[d] assign[=] call[name[dict], parameter[]] variable[ed] assign[=] call[name[components]][constant[1]] variable[sw_ed] assign[=] call[name[components]][constant[2]] variable[t_sw] assign[=] call[name[components]][constant[3]] variable[t_hw] assign[=] call[name[components]][constant[4]] variable[oth] assign[=] call[name[components]][constant[5]] variable[ck] assign[=] name[CPEComponent].ATT_EDITION call[name[d]][name[ck]] assign[=] call[name[CPE2_3_URI]._create_component, parameter[name[ck], name[ed]]] variable[ck] assign[=] name[CPEComponent].ATT_SW_EDITION call[name[d]][name[ck]] assign[=] call[name[CPE2_3_URI]._create_component, parameter[name[ck], name[sw_ed]]] variable[ck] assign[=] name[CPEComponent].ATT_TARGET_SW call[name[d]][name[ck]] assign[=] call[name[CPE2_3_URI]._create_component, parameter[name[ck], name[t_sw]]] variable[ck] assign[=] name[CPEComponent].ATT_TARGET_HW call[name[d]][name[ck]] assign[=] call[name[CPE2_3_URI]._create_component, parameter[name[ck], name[t_hw]]] variable[ck] assign[=] name[CPEComponent].ATT_OTHER call[name[d]][name[ck]] assign[=] call[name[CPE2_3_URI]._create_component, parameter[name[ck], name[oth]]] return[name[d]]
keyword[def] identifier[_unpack_edition] ( identifier[cls] , identifier[value] ): literal[string] identifier[components] = identifier[value] . identifier[split] ( identifier[CPEComponent2_3_URI] . identifier[SEPARATOR_PACKED_EDITION] ) identifier[d] = identifier[dict] () identifier[ed] = identifier[components] [ literal[int] ] identifier[sw_ed] = identifier[components] [ literal[int] ] identifier[t_sw] = identifier[components] [ literal[int] ] identifier[t_hw] = identifier[components] [ literal[int] ] identifier[oth] = identifier[components] [ literal[int] ] identifier[ck] = identifier[CPEComponent] . identifier[ATT_EDITION] identifier[d] [ identifier[ck] ]= identifier[CPE2_3_URI] . identifier[_create_component] ( identifier[ck] , identifier[ed] ) identifier[ck] = identifier[CPEComponent] . identifier[ATT_SW_EDITION] identifier[d] [ identifier[ck] ]= identifier[CPE2_3_URI] . identifier[_create_component] ( identifier[ck] , identifier[sw_ed] ) identifier[ck] = identifier[CPEComponent] . identifier[ATT_TARGET_SW] identifier[d] [ identifier[ck] ]= identifier[CPE2_3_URI] . identifier[_create_component] ( identifier[ck] , identifier[t_sw] ) identifier[ck] = identifier[CPEComponent] . identifier[ATT_TARGET_HW] identifier[d] [ identifier[ck] ]= identifier[CPE2_3_URI] . identifier[_create_component] ( identifier[ck] , identifier[t_hw] ) identifier[ck] = identifier[CPEComponent] . identifier[ATT_OTHER] identifier[d] [ identifier[ck] ]= identifier[CPE2_3_URI] . identifier[_create_component] ( identifier[ck] , identifier[oth] ) keyword[return] identifier[d]
def _unpack_edition(cls, value): """ Unpack its elements and set the attributes in wfn accordingly. Parse out the five elements: ~ edition ~ software edition ~ target sw ~ target hw ~ other :param string value: Value of edition attribute :returns: Dictionary with parts of edition attribute :exception: ValueError - invalid value of edition attribute """ components = value.split(CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION) d = dict() ed = components[1] sw_ed = components[2] t_sw = components[3] t_hw = components[4] oth = components[5] ck = CPEComponent.ATT_EDITION d[ck] = CPE2_3_URI._create_component(ck, ed) ck = CPEComponent.ATT_SW_EDITION d[ck] = CPE2_3_URI._create_component(ck, sw_ed) ck = CPEComponent.ATT_TARGET_SW d[ck] = CPE2_3_URI._create_component(ck, t_sw) ck = CPEComponent.ATT_TARGET_HW d[ck] = CPE2_3_URI._create_component(ck, t_hw) ck = CPEComponent.ATT_OTHER d[ck] = CPE2_3_URI._create_component(ck, oth) return d
def url(self, **kwargs): """ Returns a formatted URL for the asset's File with serialized parameters. Usage: >>> my_asset.url() "//images.contentful.com/spaces/foobar/..." >>> my_asset.url(w=120, h=160) "//images.contentful.com/spaces/foobar/...?w=120&h=160" """ url = self.fields(self._locale()).get('file', {}).get('url', '') args = ['{0}={1}'.format(k, v) for k, v in kwargs.items()] if args: url += '?{0}'.format('&'.join(args)) return url
def function[url, parameter[self]]: constant[ Returns a formatted URL for the asset's File with serialized parameters. Usage: >>> my_asset.url() "//images.contentful.com/spaces/foobar/..." >>> my_asset.url(w=120, h=160) "//images.contentful.com/spaces/foobar/...?w=120&h=160" ] variable[url] assign[=] call[call[call[name[self].fields, parameter[call[name[self]._locale, parameter[]]]].get, parameter[constant[file], dictionary[[], []]]].get, parameter[constant[url], constant[]]] variable[args] assign[=] <ast.ListComp object at 0x7da1b115ed70> if name[args] begin[:] <ast.AugAssign object at 0x7da1b115edd0> return[name[url]]
keyword[def] identifier[url] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[url] = identifier[self] . identifier[fields] ( identifier[self] . identifier[_locale] ()). identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] , literal[string] ) identifier[args] =[ literal[string] . identifier[format] ( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] ()] keyword[if] identifier[args] : identifier[url] += literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[args] )) keyword[return] identifier[url]
def url(self, **kwargs): """ Returns a formatted URL for the asset's File with serialized parameters. Usage: >>> my_asset.url() "//images.contentful.com/spaces/foobar/..." >>> my_asset.url(w=120, h=160) "//images.contentful.com/spaces/foobar/...?w=120&h=160" """ url = self.fields(self._locale()).get('file', {}).get('url', '') args = ['{0}={1}'.format(k, v) for (k, v) in kwargs.items()] if args: url += '?{0}'.format('&'.join(args)) # depends on [control=['if'], data=[]] return url
def parse_environs(name, parse_class=ParseResult, **defaults): """ same as parse_environ() but will also check name_1, name_2, ..., name_N and return all the found dsn strings from the environment this will look for name, and name_N (where N is 1 through infinity) in the environment, if it finds them, it will assume they are dsn urls and will parse them. The num checks (eg PROM_DSN_1, PROM_DSN_2) go in order, so you can't do PROM_DSN_1, PROM_DSN_3, because it will fail on _2 and move on, so make sure your num dsns are in order (eg, 1, 2, 3, ...) example -- export DSN_1=some.Interface://host:port/dbname#i1 export DSN_2=some.Interface://host2:port/dbname2#i2 $ python >>> import dsnparse >>> print dsnparse.parse_environs('DSN') # prints list with 2 parsed dsn objects :param dsn_env_name: string, the name of the environment variables, _1, ... will be appended :param parse_class: ParseResult, the class that will be used to hold parsed values :returns: list all the found dsn strings in the environment with the given name prefix """ ret = [] if name in os.environ: ret.append(parse_environ(name, parse_class, **defaults)) # now try importing _1 -> _N dsns increment_name = lambda name, num: '{name}_{num}'.format(name=name, num=num) dsn_num = 0 if increment_name(name, 0) in os.environ else 1 dsn_env_num_name = increment_name(name, dsn_num) if dsn_env_num_name in os.environ: try: while True: ret.append(parse_environ(dsn_env_num_name, parse_class, **defaults)) dsn_num += 1 dsn_env_num_name = increment_name(name, dsn_num) except KeyError: pass return ret
def function[parse_environs, parameter[name, parse_class]]: constant[ same as parse_environ() but will also check name_1, name_2, ..., name_N and return all the found dsn strings from the environment this will look for name, and name_N (where N is 1 through infinity) in the environment, if it finds them, it will assume they are dsn urls and will parse them. The num checks (eg PROM_DSN_1, PROM_DSN_2) go in order, so you can't do PROM_DSN_1, PROM_DSN_3, because it will fail on _2 and move on, so make sure your num dsns are in order (eg, 1, 2, 3, ...) example -- export DSN_1=some.Interface://host:port/dbname#i1 export DSN_2=some.Interface://host2:port/dbname2#i2 $ python >>> import dsnparse >>> print dsnparse.parse_environs('DSN') # prints list with 2 parsed dsn objects :param dsn_env_name: string, the name of the environment variables, _1, ... will be appended :param parse_class: ParseResult, the class that will be used to hold parsed values :returns: list all the found dsn strings in the environment with the given name prefix ] variable[ret] assign[=] list[[]] if compare[name[name] in name[os].environ] begin[:] call[name[ret].append, parameter[call[name[parse_environ], parameter[name[name], name[parse_class]]]]] variable[increment_name] assign[=] <ast.Lambda object at 0x7da204621180> variable[dsn_num] assign[=] <ast.IfExp object at 0x7da2046237f0> variable[dsn_env_num_name] assign[=] call[name[increment_name], parameter[name[name], name[dsn_num]]] if compare[name[dsn_env_num_name] in name[os].environ] begin[:] <ast.Try object at 0x7da204623eb0> return[name[ret]]
keyword[def] identifier[parse_environs] ( identifier[name] , identifier[parse_class] = identifier[ParseResult] ,** identifier[defaults] ): literal[string] identifier[ret] =[] keyword[if] identifier[name] keyword[in] identifier[os] . identifier[environ] : identifier[ret] . identifier[append] ( identifier[parse_environ] ( identifier[name] , identifier[parse_class] ,** identifier[defaults] )) identifier[increment_name] = keyword[lambda] identifier[name] , identifier[num] : literal[string] . identifier[format] ( identifier[name] = identifier[name] , identifier[num] = identifier[num] ) identifier[dsn_num] = literal[int] keyword[if] identifier[increment_name] ( identifier[name] , literal[int] ) keyword[in] identifier[os] . identifier[environ] keyword[else] literal[int] identifier[dsn_env_num_name] = identifier[increment_name] ( identifier[name] , identifier[dsn_num] ) keyword[if] identifier[dsn_env_num_name] keyword[in] identifier[os] . identifier[environ] : keyword[try] : keyword[while] keyword[True] : identifier[ret] . identifier[append] ( identifier[parse_environ] ( identifier[dsn_env_num_name] , identifier[parse_class] ,** identifier[defaults] )) identifier[dsn_num] += literal[int] identifier[dsn_env_num_name] = identifier[increment_name] ( identifier[name] , identifier[dsn_num] ) keyword[except] identifier[KeyError] : keyword[pass] keyword[return] identifier[ret]
def parse_environs(name, parse_class=ParseResult, **defaults): """ same as parse_environ() but will also check name_1, name_2, ..., name_N and return all the found dsn strings from the environment this will look for name, and name_N (where N is 1 through infinity) in the environment, if it finds them, it will assume they are dsn urls and will parse them. The num checks (eg PROM_DSN_1, PROM_DSN_2) go in order, so you can't do PROM_DSN_1, PROM_DSN_3, because it will fail on _2 and move on, so make sure your num dsns are in order (eg, 1, 2, 3, ...) example -- export DSN_1=some.Interface://host:port/dbname#i1 export DSN_2=some.Interface://host2:port/dbname2#i2 $ python >>> import dsnparse >>> print dsnparse.parse_environs('DSN') # prints list with 2 parsed dsn objects :param dsn_env_name: string, the name of the environment variables, _1, ... will be appended :param parse_class: ParseResult, the class that will be used to hold parsed values :returns: list all the found dsn strings in the environment with the given name prefix """ ret = [] if name in os.environ: ret.append(parse_environ(name, parse_class, **defaults)) # depends on [control=['if'], data=['name']] # now try importing _1 -> _N dsns increment_name = lambda name, num: '{name}_{num}'.format(name=name, num=num) dsn_num = 0 if increment_name(name, 0) in os.environ else 1 dsn_env_num_name = increment_name(name, dsn_num) if dsn_env_num_name in os.environ: try: while True: ret.append(parse_environ(dsn_env_num_name, parse_class, **defaults)) dsn_num += 1 dsn_env_num_name = increment_name(name, dsn_num) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]] except KeyError: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['dsn_env_num_name']] return ret
def context(names): """Show JSON-LD context for repository objects.""" import json contexts = [_context_json(name) for name in set(names)] if contexts: click.echo( json.dumps( contexts[0] if len(contexts) == 1 else contexts, indent=2, ) )
def function[context, parameter[names]]: constant[Show JSON-LD context for repository objects.] import module[json] variable[contexts] assign[=] <ast.ListComp object at 0x7da1b26af460> if name[contexts] begin[:] call[name[click].echo, parameter[call[name[json].dumps, parameter[<ast.IfExp object at 0x7da18bc704f0>]]]]
keyword[def] identifier[context] ( identifier[names] ): literal[string] keyword[import] identifier[json] identifier[contexts] =[ identifier[_context_json] ( identifier[name] ) keyword[for] identifier[name] keyword[in] identifier[set] ( identifier[names] )] keyword[if] identifier[contexts] : identifier[click] . identifier[echo] ( identifier[json] . identifier[dumps] ( identifier[contexts] [ literal[int] ] keyword[if] identifier[len] ( identifier[contexts] )== literal[int] keyword[else] identifier[contexts] , identifier[indent] = literal[int] , ) )
def context(names): """Show JSON-LD context for repository objects.""" import json contexts = [_context_json(name) for name in set(names)] if contexts: click.echo(json.dumps(contexts[0] if len(contexts) == 1 else contexts, indent=2)) # depends on [control=['if'], data=[]]
def create_autoclassify_job_note(self, job, user=None): """ Create a JobNote, possibly via auto-classification. Create mappings from the given Job to Bugs via verified Classifications of this Job. Also creates a JobNote. """ # Only insert bugs for verified failures since these are automatically # mirrored to ES and the mirroring can't be undone # TODO: Decide whether this should change now that we're no longer mirroring. bug_numbers = set(ClassifiedFailure.objects .filter(best_for_errors__text_log_error__step__job=job, best_for_errors__best_is_verified=True) .exclude(bug_number=None) .exclude(bug_number=0) .values_list('bug_number', flat=True)) existing_maps = set(BugJobMap.objects.filter(bug_id__in=bug_numbers) .values_list('bug_id')) for bug_number in (bug_numbers - existing_maps): BugJobMap.objects.create(job_id=job.id, bug_id=bug_number, user=user) # if user is not specified, then this is an autoclassified job note and # we should mark it as such classification_name = 'intermittent' if user else 'autoclassified intermittent' classification = FailureClassification.objects.get(name=classification_name) return JobNote.objects.create(job=job, failure_classification=classification, user=user, text="")
def function[create_autoclassify_job_note, parameter[self, job, user]]: constant[ Create a JobNote, possibly via auto-classification. Create mappings from the given Job to Bugs via verified Classifications of this Job. Also creates a JobNote. ] variable[bug_numbers] assign[=] call[name[set], parameter[call[call[call[call[name[ClassifiedFailure].objects.filter, parameter[]].exclude, parameter[]].exclude, parameter[]].values_list, parameter[constant[bug_number]]]]] variable[existing_maps] assign[=] call[name[set], parameter[call[call[name[BugJobMap].objects.filter, parameter[]].values_list, parameter[constant[bug_id]]]]] for taget[name[bug_number]] in starred[binary_operation[name[bug_numbers] - name[existing_maps]]] begin[:] call[name[BugJobMap].objects.create, parameter[]] variable[classification_name] assign[=] <ast.IfExp object at 0x7da1b086bdf0> variable[classification] assign[=] call[name[FailureClassification].objects.get, parameter[]] return[call[name[JobNote].objects.create, parameter[]]]
keyword[def] identifier[create_autoclassify_job_note] ( identifier[self] , identifier[job] , identifier[user] = keyword[None] ): literal[string] identifier[bug_numbers] = identifier[set] ( identifier[ClassifiedFailure] . identifier[objects] . identifier[filter] ( identifier[best_for_errors__text_log_error__step__job] = identifier[job] , identifier[best_for_errors__best_is_verified] = keyword[True] ) . identifier[exclude] ( identifier[bug_number] = keyword[None] ) . identifier[exclude] ( identifier[bug_number] = literal[int] ) . identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] )) identifier[existing_maps] = identifier[set] ( identifier[BugJobMap] . identifier[objects] . identifier[filter] ( identifier[bug_id__in] = identifier[bug_numbers] ) . identifier[values_list] ( literal[string] )) keyword[for] identifier[bug_number] keyword[in] ( identifier[bug_numbers] - identifier[existing_maps] ): identifier[BugJobMap] . identifier[objects] . identifier[create] ( identifier[job_id] = identifier[job] . identifier[id] , identifier[bug_id] = identifier[bug_number] , identifier[user] = identifier[user] ) identifier[classification_name] = literal[string] keyword[if] identifier[user] keyword[else] literal[string] identifier[classification] = identifier[FailureClassification] . identifier[objects] . identifier[get] ( identifier[name] = identifier[classification_name] ) keyword[return] identifier[JobNote] . identifier[objects] . identifier[create] ( identifier[job] = identifier[job] , identifier[failure_classification] = identifier[classification] , identifier[user] = identifier[user] , identifier[text] = literal[string] )
def create_autoclassify_job_note(self, job, user=None): """ Create a JobNote, possibly via auto-classification. Create mappings from the given Job to Bugs via verified Classifications of this Job. Also creates a JobNote. """ # Only insert bugs for verified failures since these are automatically # mirrored to ES and the mirroring can't be undone # TODO: Decide whether this should change now that we're no longer mirroring. bug_numbers = set(ClassifiedFailure.objects.filter(best_for_errors__text_log_error__step__job=job, best_for_errors__best_is_verified=True).exclude(bug_number=None).exclude(bug_number=0).values_list('bug_number', flat=True)) existing_maps = set(BugJobMap.objects.filter(bug_id__in=bug_numbers).values_list('bug_id')) for bug_number in bug_numbers - existing_maps: BugJobMap.objects.create(job_id=job.id, bug_id=bug_number, user=user) # depends on [control=['for'], data=['bug_number']] # if user is not specified, then this is an autoclassified job note and # we should mark it as such classification_name = 'intermittent' if user else 'autoclassified intermittent' classification = FailureClassification.objects.get(name=classification_name) return JobNote.objects.create(job=job, failure_classification=classification, user=user, text='')
def as_dictionary(self): """ Return the parameter as a dictionary. :return: dict """ return { "name": self.name, "type": self.type, "value": remove_0x_prefix(self.value) if self.type == 'bytes32' else self.value }
def function[as_dictionary, parameter[self]]: constant[ Return the parameter as a dictionary. :return: dict ] return[dictionary[[<ast.Constant object at 0x7da18f09d090>, <ast.Constant object at 0x7da18f09fd30>, <ast.Constant object at 0x7da18f09d0f0>], [<ast.Attribute object at 0x7da18f09dcf0>, <ast.Attribute object at 0x7da18f09f790>, <ast.IfExp object at 0x7da18f09df00>]]]
keyword[def] identifier[as_dictionary] ( identifier[self] ): literal[string] keyword[return] { literal[string] : identifier[self] . identifier[name] , literal[string] : identifier[self] . identifier[type] , literal[string] : identifier[remove_0x_prefix] ( identifier[self] . identifier[value] ) keyword[if] identifier[self] . identifier[type] == literal[string] keyword[else] identifier[self] . identifier[value] }
def as_dictionary(self): """ Return the parameter as a dictionary. :return: dict """ return {'name': self.name, 'type': self.type, 'value': remove_0x_prefix(self.value) if self.type == 'bytes32' else self.value}
def delete_all_policies(self): '''**Description** Delete all existing policies. The falco rules file is unchanged. **Arguments** - None **Success Return Value** The string "Policies Deleted" **Example** `examples/delete_all_policies.py <https://github.com/draios/python-sdc-client/blob/master/examples/delete_all_policies.py>`_ ''' res = requests.post(self.url + '/api/policies/deleteAll', headers=self.hdrs, verify=self.ssl_verify) if not self._checkResponse(res): return [False, self.lasterr] return [True, "Policies Deleted"]
def function[delete_all_policies, parameter[self]]: constant[**Description** Delete all existing policies. The falco rules file is unchanged. **Arguments** - None **Success Return Value** The string "Policies Deleted" **Example** `examples/delete_all_policies.py <https://github.com/draios/python-sdc-client/blob/master/examples/delete_all_policies.py>`_ ] variable[res] assign[=] call[name[requests].post, parameter[binary_operation[name[self].url + constant[/api/policies/deleteAll]]]] if <ast.UnaryOp object at 0x7da18fe933a0> begin[:] return[list[[<ast.Constant object at 0x7da18fe93ca0>, <ast.Attribute object at 0x7da18fe93ee0>]]] return[list[[<ast.Constant object at 0x7da18fe91ae0>, <ast.Constant object at 0x7da18fe91480>]]]
keyword[def] identifier[delete_all_policies] ( identifier[self] ): literal[string] identifier[res] = identifier[requests] . identifier[post] ( identifier[self] . identifier[url] + literal[string] , identifier[headers] = identifier[self] . identifier[hdrs] , identifier[verify] = identifier[self] . identifier[ssl_verify] ) keyword[if] keyword[not] identifier[self] . identifier[_checkResponse] ( identifier[res] ): keyword[return] [ keyword[False] , identifier[self] . identifier[lasterr] ] keyword[return] [ keyword[True] , literal[string] ]
def delete_all_policies(self): """**Description** Delete all existing policies. The falco rules file is unchanged. **Arguments** - None **Success Return Value** The string "Policies Deleted" **Example** `examples/delete_all_policies.py <https://github.com/draios/python-sdc-client/blob/master/examples/delete_all_policies.py>`_ """ res = requests.post(self.url + '/api/policies/deleteAll', headers=self.hdrs, verify=self.ssl_verify) if not self._checkResponse(res): return [False, self.lasterr] # depends on [control=['if'], data=[]] return [True, 'Policies Deleted']
def crt_prf_ftr_tc(aryMdlRsp, aryTmpExpInf, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm=None, lgcPrint=True): """Create all spatial x feature prf time courses. Parameters ---------- aryMdlRsp : 2d numpy array, shape [n_x_pos * n_y_pos * n_sd, n_cond] Responses of 2D Gauss models to spatial conditions aryTmpExpInf: 2d numpy array, shape [unknown, 4] Temporal information about conditions varNumVol : float, positive Number of volumes of the (fMRI) data. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varTmpOvsmpl : int, positive Factor by which the data hs been temporally upsampled. switchHrfSet : int, (1, 2, 3) Switch to determine which hrf basis functions are used tplPngSize : tuple Pixel dimensions of the visual space (width, height). varPar : int, positive Description of input 1. dctPrm : dictionary, default None Dictionary with customized hrf parameters. If this is None, default hrf parameters will be used. lgcPrint: boolean, default True Should print messages be sent to user? Returns ------- aryNrlTcConv : 3d numpy array, shape [nr of models, nr of unique feautures, varNumVol] Prf time course models """ # Identify number of unique features vecFeat = np.unique(aryTmpExpInf[:, 3]) vecFeat = vecFeat[np.nonzero(vecFeat)[0]] # Preallocate the output array aryPrfTc = np.zeros((aryMdlRsp.shape[0], 0, varNumVol), dtype=np.float32) # Loop over unique features for indFtr, ftr in enumerate(vecFeat): if lgcPrint: print('---------Create prf time course model for feature ' + str(ftr)) # Derive sptial conditions, onsets and durations for this specific # feature aryTmpCnd = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 0] aryTmpOns = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 1] aryTmpDrt = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 2] # Create temporally upsampled neural time courses. aryNrlTcTmp = crt_nrl_tc(aryMdlRsp, aryTmpCnd, aryTmpOns, aryTmpDrt, varTr, varNumVol, varTmpOvsmpl, lgcPrint=lgcPrint) # Convolve with hrf to create model pRF time courses. aryPrfTcTmp = crt_prf_tc(aryNrlTcTmp, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm=dctPrm, lgcPrint=lgcPrint) # Add temporal time course to time course that will be returned aryPrfTc = np.concatenate((aryPrfTc, aryPrfTcTmp), axis=1) return aryPrfTc
def function[crt_prf_ftr_tc, parameter[aryMdlRsp, aryTmpExpInf, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm, lgcPrint]]: constant[Create all spatial x feature prf time courses. Parameters ---------- aryMdlRsp : 2d numpy array, shape [n_x_pos * n_y_pos * n_sd, n_cond] Responses of 2D Gauss models to spatial conditions aryTmpExpInf: 2d numpy array, shape [unknown, 4] Temporal information about conditions varNumVol : float, positive Number of volumes of the (fMRI) data. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varTmpOvsmpl : int, positive Factor by which the data hs been temporally upsampled. switchHrfSet : int, (1, 2, 3) Switch to determine which hrf basis functions are used tplPngSize : tuple Pixel dimensions of the visual space (width, height). varPar : int, positive Description of input 1. dctPrm : dictionary, default None Dictionary with customized hrf parameters. If this is None, default hrf parameters will be used. lgcPrint: boolean, default True Should print messages be sent to user? Returns ------- aryNrlTcConv : 3d numpy array, shape [nr of models, nr of unique feautures, varNumVol] Prf time course models ] variable[vecFeat] assign[=] call[name[np].unique, parameter[call[name[aryTmpExpInf]][tuple[[<ast.Slice object at 0x7da20c76e3b0>, <ast.Constant object at 0x7da20c76ec20>]]]]] variable[vecFeat] assign[=] call[name[vecFeat]][call[call[name[np].nonzero, parameter[name[vecFeat]]]][constant[0]]] variable[aryPrfTc] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Subscript object at 0x7da18fe93070>, <ast.Constant object at 0x7da18fe915a0>, <ast.Name object at 0x7da18fe938e0>]]]] for taget[tuple[[<ast.Name object at 0x7da18fe93bb0>, <ast.Name object at 0x7da18fe93a60>]]] in starred[call[name[enumerate], parameter[name[vecFeat]]]] begin[:] if name[lgcPrint] begin[:] call[name[print], parameter[binary_operation[constant[---------Create prf time course model for feature ] + call[name[str], parameter[name[ftr]]]]]] variable[aryTmpCnd] assign[=] call[name[aryTmpExpInf]][tuple[[<ast.Compare object at 0x7da18fe937f0>, <ast.Constant object at 0x7da18fe91e40>]]] variable[aryTmpOns] assign[=] call[name[aryTmpExpInf]][tuple[[<ast.Compare object at 0x7da18fe93a30>, <ast.Constant object at 0x7da18fe90940>]]] variable[aryTmpDrt] assign[=] call[name[aryTmpExpInf]][tuple[[<ast.Compare object at 0x7da18fe91e70>, <ast.Constant object at 0x7da18fe92aa0>]]] variable[aryNrlTcTmp] assign[=] call[name[crt_nrl_tc], parameter[name[aryMdlRsp], name[aryTmpCnd], name[aryTmpOns], name[aryTmpDrt], name[varTr], name[varNumVol], name[varTmpOvsmpl]]] variable[aryPrfTcTmp] assign[=] call[name[crt_prf_tc], parameter[name[aryNrlTcTmp], name[varNumVol], name[varTr], name[varTmpOvsmpl], name[switchHrfSet], name[tplPngSize], name[varPar]]] variable[aryPrfTc] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.Name object at 0x7da18fe90be0>, <ast.Name object at 0x7da18fe93280>]]]] return[name[aryPrfTc]]
keyword[def] identifier[crt_prf_ftr_tc] ( identifier[aryMdlRsp] , identifier[aryTmpExpInf] , identifier[varNumVol] , identifier[varTr] , identifier[varTmpOvsmpl] , identifier[switchHrfSet] , identifier[tplPngSize] , identifier[varPar] , identifier[dctPrm] = keyword[None] , identifier[lgcPrint] = keyword[True] ): literal[string] identifier[vecFeat] = identifier[np] . identifier[unique] ( identifier[aryTmpExpInf] [:, literal[int] ]) identifier[vecFeat] = identifier[vecFeat] [ identifier[np] . identifier[nonzero] ( identifier[vecFeat] )[ literal[int] ]] identifier[aryPrfTc] = identifier[np] . identifier[zeros] (( identifier[aryMdlRsp] . identifier[shape] [ literal[int] ], literal[int] , identifier[varNumVol] ), identifier[dtype] = identifier[np] . identifier[float32] ) keyword[for] identifier[indFtr] , identifier[ftr] keyword[in] identifier[enumerate] ( identifier[vecFeat] ): keyword[if] identifier[lgcPrint] : identifier[print] ( literal[string] + identifier[str] ( identifier[ftr] )) identifier[aryTmpCnd] = identifier[aryTmpExpInf] [ identifier[aryTmpExpInf] [:, literal[int] ]== identifier[ftr] , literal[int] ] identifier[aryTmpOns] = identifier[aryTmpExpInf] [ identifier[aryTmpExpInf] [:, literal[int] ]== identifier[ftr] , literal[int] ] identifier[aryTmpDrt] = identifier[aryTmpExpInf] [ identifier[aryTmpExpInf] [:, literal[int] ]== identifier[ftr] , literal[int] ] identifier[aryNrlTcTmp] = identifier[crt_nrl_tc] ( identifier[aryMdlRsp] , identifier[aryTmpCnd] , identifier[aryTmpOns] , identifier[aryTmpDrt] , identifier[varTr] , identifier[varNumVol] , identifier[varTmpOvsmpl] , identifier[lgcPrint] = identifier[lgcPrint] ) identifier[aryPrfTcTmp] = identifier[crt_prf_tc] ( identifier[aryNrlTcTmp] , identifier[varNumVol] , identifier[varTr] , identifier[varTmpOvsmpl] , identifier[switchHrfSet] , identifier[tplPngSize] , identifier[varPar] , identifier[dctPrm] = identifier[dctPrm] , identifier[lgcPrint] = identifier[lgcPrint] ) identifier[aryPrfTc] = identifier[np] . identifier[concatenate] (( identifier[aryPrfTc] , identifier[aryPrfTcTmp] ), identifier[axis] = literal[int] ) keyword[return] identifier[aryPrfTc]
def crt_prf_ftr_tc(aryMdlRsp, aryTmpExpInf, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm=None, lgcPrint=True): """Create all spatial x feature prf time courses. Parameters ---------- aryMdlRsp : 2d numpy array, shape [n_x_pos * n_y_pos * n_sd, n_cond] Responses of 2D Gauss models to spatial conditions aryTmpExpInf: 2d numpy array, shape [unknown, 4] Temporal information about conditions varNumVol : float, positive Number of volumes of the (fMRI) data. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varTmpOvsmpl : int, positive Factor by which the data hs been temporally upsampled. switchHrfSet : int, (1, 2, 3) Switch to determine which hrf basis functions are used tplPngSize : tuple Pixel dimensions of the visual space (width, height). varPar : int, positive Description of input 1. dctPrm : dictionary, default None Dictionary with customized hrf parameters. If this is None, default hrf parameters will be used. lgcPrint: boolean, default True Should print messages be sent to user? Returns ------- aryNrlTcConv : 3d numpy array, shape [nr of models, nr of unique feautures, varNumVol] Prf time course models """ # Identify number of unique features vecFeat = np.unique(aryTmpExpInf[:, 3]) vecFeat = vecFeat[np.nonzero(vecFeat)[0]] # Preallocate the output array aryPrfTc = np.zeros((aryMdlRsp.shape[0], 0, varNumVol), dtype=np.float32) # Loop over unique features for (indFtr, ftr) in enumerate(vecFeat): if lgcPrint: print('---------Create prf time course model for feature ' + str(ftr)) # depends on [control=['if'], data=[]] # Derive sptial conditions, onsets and durations for this specific # feature aryTmpCnd = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 0] aryTmpOns = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 1] aryTmpDrt = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 2] # Create temporally upsampled neural time courses. aryNrlTcTmp = crt_nrl_tc(aryMdlRsp, aryTmpCnd, aryTmpOns, aryTmpDrt, varTr, varNumVol, varTmpOvsmpl, lgcPrint=lgcPrint) # Convolve with hrf to create model pRF time courses. aryPrfTcTmp = crt_prf_tc(aryNrlTcTmp, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm=dctPrm, lgcPrint=lgcPrint) # Add temporal time course to time course that will be returned aryPrfTc = np.concatenate((aryPrfTc, aryPrfTcTmp), axis=1) # depends on [control=['for'], data=[]] return aryPrfTc
def pipe(maxsize=0, *, loop=None) -> Pipe: """\ A bidirectional pipe of Python objects. >>> async def example1(): ... a, b = pipe() ... a.send_nowait('foo') ... print(await b.recv()) >>> asyncio.run(example1()) foo >>> async def example2(): ... a, b = pipe() ... await b.send(eof=True) ... await a.recv() >>> asyncio.run(example2()) Traceback (most recent call last): ... EOFError """ class QueueStream: def __init__(self, maxsize=0, *, loop=None) -> None: self._queue: asyncio.Queue = asyncio.Queue(maxsize, loop=loop) self._eof = asyncio.locks.Event(loop=loop) def _check_send(self, value: Any, *, eof: bool) -> None: if self._eof.is_set(): raise EOFError("Cannot send after EOF") PipeEnd.check_send_args(value, eof=eof) def send_nowait(self, value: Any, *, eof: bool) -> None: self._check_send(value, eof=eof) if eof: self._eof.set() else: self._queue.put_nowait(value) async def send(self, value: Any, *, eof: bool) -> None: self._check_send(value, eof=eof) if eof: self._eof.set() else: await self._queue.put(value) async def recv(self) -> Any: get = asyncio.create_task(self._queue.get()) eof = asyncio.create_task(self._eof.wait()) done, pending = await asyncio.wait([get, eof], return_when=asyncio.FIRST_COMPLETED) # cancel get or eof, whichever is not finished for task in pending: task.cancel() if get in done: return get.result() else: raise EOFError class _PipeEnd(PipeEnd): def __init__(self, send: QueueStream, recv: QueueStream) -> None: super().__init__() self._send = send self._recv = recv def send_nowait(self, value: Any=PipeEnd._none, *, eof=False): self._send.send_nowait(value, eof=eof) async def send(self, value: Any=PipeEnd._none, *, eof=False): await self._send.send(value, eof=eof) async def recv(self)-> Any: return await self._recv.recv() a, b = QueueStream(maxsize, loop=loop), QueueStream(maxsize, loop=loop) return _PipeEnd(a, b), _PipeEnd(b, a)
def function[pipe, parameter[maxsize]]: constant[ A bidirectional pipe of Python objects. >>> async def example1(): ... a, b = pipe() ... a.send_nowait('foo') ... print(await b.recv()) >>> asyncio.run(example1()) foo >>> async def example2(): ... a, b = pipe() ... await b.send(eof=True) ... await a.recv() >>> asyncio.run(example2()) Traceback (most recent call last): ... EOFError ] class class[QueueStream, parameter[]] begin[:] def function[__init__, parameter[self, maxsize]]: <ast.AnnAssign object at 0x7da1b0ae1840> name[self]._eof assign[=] call[name[asyncio].locks.Event, parameter[]] def function[_check_send, parameter[self, value]]: if call[name[self]._eof.is_set, parameter[]] begin[:] <ast.Raise object at 0x7da1b0ae2cb0> call[name[PipeEnd].check_send_args, parameter[name[value]]] def function[send_nowait, parameter[self, value]]: call[name[self]._check_send, parameter[name[value]]] if name[eof] begin[:] call[name[self]._eof.set, parameter[]] <ast.AsyncFunctionDef object at 0x7da20e956650> <ast.AsyncFunctionDef object at 0x7da20e956ef0> class class[_PipeEnd, parameter[]] begin[:] def function[__init__, parameter[self, send, recv]]: call[call[name[super], parameter[]].__init__, parameter[]] name[self]._send assign[=] name[send] name[self]._recv assign[=] name[recv] def function[send_nowait, parameter[self, value]]: call[name[self]._send.send_nowait, parameter[name[value]]] <ast.AsyncFunctionDef object at 0x7da18dc9ac20> <ast.AsyncFunctionDef object at 0x7da18dc9aaa0> <ast.Tuple object at 0x7da18dc985b0> assign[=] tuple[[<ast.Call object at 0x7da18dc987f0>, <ast.Call object at 0x7da18dc9ab00>]] return[tuple[[<ast.Call object at 0x7da18dc980d0>, <ast.Call object at 0x7da18dc99f60>]]]
keyword[def] identifier[pipe] ( identifier[maxsize] = literal[int] ,*, identifier[loop] = keyword[None] )-> identifier[Pipe] : literal[string] keyword[class] identifier[QueueStream] : keyword[def] identifier[__init__] ( identifier[self] , identifier[maxsize] = literal[int] ,*, identifier[loop] = keyword[None] )-> keyword[None] : identifier[self] . identifier[_queue] : identifier[asyncio] . identifier[Queue] = identifier[asyncio] . identifier[Queue] ( identifier[maxsize] , identifier[loop] = identifier[loop] ) identifier[self] . identifier[_eof] = identifier[asyncio] . identifier[locks] . identifier[Event] ( identifier[loop] = identifier[loop] ) keyword[def] identifier[_check_send] ( identifier[self] , identifier[value] : identifier[Any] ,*, identifier[eof] : identifier[bool] )-> keyword[None] : keyword[if] identifier[self] . identifier[_eof] . identifier[is_set] (): keyword[raise] identifier[EOFError] ( literal[string] ) identifier[PipeEnd] . identifier[check_send_args] ( identifier[value] , identifier[eof] = identifier[eof] ) keyword[def] identifier[send_nowait] ( identifier[self] , identifier[value] : identifier[Any] ,*, identifier[eof] : identifier[bool] )-> keyword[None] : identifier[self] . identifier[_check_send] ( identifier[value] , identifier[eof] = identifier[eof] ) keyword[if] identifier[eof] : identifier[self] . identifier[_eof] . identifier[set] () keyword[else] : identifier[self] . identifier[_queue] . identifier[put_nowait] ( identifier[value] ) keyword[async] keyword[def] identifier[send] ( identifier[self] , identifier[value] : identifier[Any] ,*, identifier[eof] : identifier[bool] )-> keyword[None] : identifier[self] . identifier[_check_send] ( identifier[value] , identifier[eof] = identifier[eof] ) keyword[if] identifier[eof] : identifier[self] . identifier[_eof] . identifier[set] () keyword[else] : keyword[await] identifier[self] . identifier[_queue] . identifier[put] ( identifier[value] ) keyword[async] keyword[def] identifier[recv] ( identifier[self] )-> identifier[Any] : identifier[get] = identifier[asyncio] . identifier[create_task] ( identifier[self] . identifier[_queue] . identifier[get] ()) identifier[eof] = identifier[asyncio] . identifier[create_task] ( identifier[self] . identifier[_eof] . identifier[wait] ()) identifier[done] , identifier[pending] = keyword[await] identifier[asyncio] . identifier[wait] ([ identifier[get] , identifier[eof] ], identifier[return_when] = identifier[asyncio] . identifier[FIRST_COMPLETED] ) keyword[for] identifier[task] keyword[in] identifier[pending] : identifier[task] . identifier[cancel] () keyword[if] identifier[get] keyword[in] identifier[done] : keyword[return] identifier[get] . identifier[result] () keyword[else] : keyword[raise] identifier[EOFError] keyword[class] identifier[_PipeEnd] ( identifier[PipeEnd] ): keyword[def] identifier[__init__] ( identifier[self] , identifier[send] : identifier[QueueStream] , identifier[recv] : identifier[QueueStream] )-> keyword[None] : identifier[super] (). identifier[__init__] () identifier[self] . identifier[_send] = identifier[send] identifier[self] . identifier[_recv] = identifier[recv] keyword[def] identifier[send_nowait] ( identifier[self] , identifier[value] : identifier[Any] = identifier[PipeEnd] . identifier[_none] ,*, identifier[eof] = keyword[False] ): identifier[self] . identifier[_send] . identifier[send_nowait] ( identifier[value] , identifier[eof] = identifier[eof] ) keyword[async] keyword[def] identifier[send] ( identifier[self] , identifier[value] : identifier[Any] = identifier[PipeEnd] . identifier[_none] ,*, identifier[eof] = keyword[False] ): keyword[await] identifier[self] . identifier[_send] . identifier[send] ( identifier[value] , identifier[eof] = identifier[eof] ) keyword[async] keyword[def] identifier[recv] ( identifier[self] )-> identifier[Any] : keyword[return] keyword[await] identifier[self] . identifier[_recv] . identifier[recv] () identifier[a] , identifier[b] = identifier[QueueStream] ( identifier[maxsize] , identifier[loop] = identifier[loop] ), identifier[QueueStream] ( identifier[maxsize] , identifier[loop] = identifier[loop] ) keyword[return] identifier[_PipeEnd] ( identifier[a] , identifier[b] ), identifier[_PipeEnd] ( identifier[b] , identifier[a] )
def pipe(maxsize=0, *, loop=None) -> Pipe: """ A bidirectional pipe of Python objects. >>> async def example1(): ... a, b = pipe() ... a.send_nowait('foo') ... print(await b.recv()) >>> asyncio.run(example1()) foo >>> async def example2(): ... a, b = pipe() ... await b.send(eof=True) ... await a.recv() >>> asyncio.run(example2()) Traceback (most recent call last): ... EOFError """ class QueueStream: def __init__(self, maxsize=0, *, loop=None) -> None: self._queue: asyncio.Queue = asyncio.Queue(maxsize, loop=loop) self._eof = asyncio.locks.Event(loop=loop) def _check_send(self, value: Any, *, eof: bool) -> None: if self._eof.is_set(): raise EOFError('Cannot send after EOF') # depends on [control=['if'], data=[]] PipeEnd.check_send_args(value, eof=eof) def send_nowait(self, value: Any, *, eof: bool) -> None: self._check_send(value, eof=eof) if eof: self._eof.set() # depends on [control=['if'], data=[]] else: self._queue.put_nowait(value) async def send(self, value: Any, *, eof: bool) -> None: self._check_send(value, eof=eof) if eof: self._eof.set() # depends on [control=['if'], data=[]] else: await self._queue.put(value) async def recv(self) -> Any: get = asyncio.create_task(self._queue.get()) eof = asyncio.create_task(self._eof.wait()) (done, pending) = await asyncio.wait([get, eof], return_when=asyncio.FIRST_COMPLETED) # cancel get or eof, whichever is not finished for task in pending: task.cancel() # depends on [control=['for'], data=['task']] if get in done: return get.result() # depends on [control=['if'], data=['get']] else: raise EOFError class _PipeEnd(PipeEnd): def __init__(self, send: QueueStream, recv: QueueStream) -> None: super().__init__() self._send = send self._recv = recv def send_nowait(self, value: Any=PipeEnd._none, *, eof=False): self._send.send_nowait(value, eof=eof) async def send(self, value: Any=PipeEnd._none, *, eof=False): await self._send.send(value, eof=eof) async def recv(self) -> Any: return await self._recv.recv() (a, b) = (QueueStream(maxsize, loop=loop), QueueStream(maxsize, loop=loop)) return (_PipeEnd(a, b), _PipeEnd(b, a))
def LoadSNPs(self, snps=[]): """Define the SNP inclusions (by RSID). This overrides true boundary \ definition. :param snps: array of RSIDs :return: None This doesn't define RSID ranges, so it throws InvalidBoundarySpec if it encounters what appears to be a range (SNP contains a "-") """ for snp in snps: bounds = snp.split("-") if len(bounds) == 1: if bounds[0] != "": self.target_rs.append(bounds[0]) else: raise InvalidBoundarySpec(snp)
def function[LoadSNPs, parameter[self, snps]]: constant[Define the SNP inclusions (by RSID). This overrides true boundary definition. :param snps: array of RSIDs :return: None This doesn't define RSID ranges, so it throws InvalidBoundarySpec if it encounters what appears to be a range (SNP contains a "-") ] for taget[name[snp]] in starred[name[snps]] begin[:] variable[bounds] assign[=] call[name[snp].split, parameter[constant[-]]] if compare[call[name[len], parameter[name[bounds]]] equal[==] constant[1]] begin[:] if compare[call[name[bounds]][constant[0]] not_equal[!=] constant[]] begin[:] call[name[self].target_rs.append, parameter[call[name[bounds]][constant[0]]]]
keyword[def] identifier[LoadSNPs] ( identifier[self] , identifier[snps] =[]): literal[string] keyword[for] identifier[snp] keyword[in] identifier[snps] : identifier[bounds] = identifier[snp] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[bounds] )== literal[int] : keyword[if] identifier[bounds] [ literal[int] ]!= literal[string] : identifier[self] . identifier[target_rs] . identifier[append] ( identifier[bounds] [ literal[int] ]) keyword[else] : keyword[raise] identifier[InvalidBoundarySpec] ( identifier[snp] )
def LoadSNPs(self, snps=[]): """Define the SNP inclusions (by RSID). This overrides true boundary definition. :param snps: array of RSIDs :return: None This doesn't define RSID ranges, so it throws InvalidBoundarySpec if it encounters what appears to be a range (SNP contains a "-") """ for snp in snps: bounds = snp.split('-') if len(bounds) == 1: if bounds[0] != '': self.target_rs.append(bounds[0]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: raise InvalidBoundarySpec(snp) # depends on [control=['for'], data=['snp']]
def snapshot(*snapshot, **kwargs): ''' Creates snapshots with the given names. snapshot : string name of snapshot(s) recursive : boolean recursively create snapshots of all descendent datasets. properties : dict additional zfs properties (-o) .. note:: ZFS properties can be specified at the time of creation of the filesystem by passing an additional argument called "properties" and specifying the properties with their respective values in the form of a python dictionary:: properties="{'property1': 'value1', 'property2': 'value2'}" .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.snapshot myzpool/mydataset@yesterday [recursive=True] salt '*' zfs.snapshot myzpool/mydataset@yesterday myzpool/myotherdataset@yesterday [recursive=True] ''' ## Configure command # NOTE: initialize the defaults flags = [] # NOTE: push filesystem properties filesystem_properties = kwargs.get('properties', {}) # NOTE: set extra config from kwargs if kwargs.get('recursive', False): flags.append('-r') ## Create snapshot res = __salt__['cmd.run_all']( __utils__['zfs.zfs_command']( command='snapshot', flags=flags, filesystem_properties=filesystem_properties, target=list(snapshot), ), python_shell=False, ) return __utils__['zfs.parse_command_result'](res, 'snapshotted')
def function[snapshot, parameter[]]: constant[ Creates snapshots with the given names. snapshot : string name of snapshot(s) recursive : boolean recursively create snapshots of all descendent datasets. properties : dict additional zfs properties (-o) .. note:: ZFS properties can be specified at the time of creation of the filesystem by passing an additional argument called "properties" and specifying the properties with their respective values in the form of a python dictionary:: properties="{'property1': 'value1', 'property2': 'value2'}" .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.snapshot myzpool/mydataset@yesterday [recursive=True] salt '*' zfs.snapshot myzpool/mydataset@yesterday myzpool/myotherdataset@yesterday [recursive=True] ] variable[flags] assign[=] list[[]] variable[filesystem_properties] assign[=] call[name[kwargs].get, parameter[constant[properties], dictionary[[], []]]] if call[name[kwargs].get, parameter[constant[recursive], constant[False]]] begin[:] call[name[flags].append, parameter[constant[-r]]] variable[res] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[call[call[name[__utils__]][constant[zfs.zfs_command]], parameter[]]]] return[call[call[name[__utils__]][constant[zfs.parse_command_result]], parameter[name[res], constant[snapshotted]]]]
keyword[def] identifier[snapshot] (* identifier[snapshot] ,** identifier[kwargs] ): literal[string] identifier[flags] =[] identifier[filesystem_properties] = identifier[kwargs] . identifier[get] ( literal[string] ,{}) keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ): identifier[flags] . identifier[append] ( literal[string] ) identifier[res] = identifier[__salt__] [ literal[string] ]( identifier[__utils__] [ literal[string] ]( identifier[command] = literal[string] , identifier[flags] = identifier[flags] , identifier[filesystem_properties] = identifier[filesystem_properties] , identifier[target] = identifier[list] ( identifier[snapshot] ), ), identifier[python_shell] = keyword[False] , ) keyword[return] identifier[__utils__] [ literal[string] ]( identifier[res] , literal[string] )
def snapshot(*snapshot, **kwargs): """ Creates snapshots with the given names. snapshot : string name of snapshot(s) recursive : boolean recursively create snapshots of all descendent datasets. properties : dict additional zfs properties (-o) .. note:: ZFS properties can be specified at the time of creation of the filesystem by passing an additional argument called "properties" and specifying the properties with their respective values in the form of a python dictionary:: properties="{'property1': 'value1', 'property2': 'value2'}" .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.snapshot myzpool/mydataset@yesterday [recursive=True] salt '*' zfs.snapshot myzpool/mydataset@yesterday myzpool/myotherdataset@yesterday [recursive=True] """ ## Configure command # NOTE: initialize the defaults flags = [] # NOTE: push filesystem properties filesystem_properties = kwargs.get('properties', {}) # NOTE: set extra config from kwargs if kwargs.get('recursive', False): flags.append('-r') # depends on [control=['if'], data=[]] ## Create snapshot res = __salt__['cmd.run_all'](__utils__['zfs.zfs_command'](command='snapshot', flags=flags, filesystem_properties=filesystem_properties, target=list(snapshot)), python_shell=False) return __utils__['zfs.parse_command_result'](res, 'snapshotted')
def get_uids(self, project=None): """Return a list of UIDs project -- the Project to filter for """ self._update() if not project or project.endswith('all_projects'): return [self._gen_uid(task['uuid']) for project in self._tasks for task in self._tasks[project].values()] if basename(project) not in self._tasks: return [] return [self._gen_uid(uuid) for uuid in self._tasks[basename(project)]]
def function[get_uids, parameter[self, project]]: constant[Return a list of UIDs project -- the Project to filter for ] call[name[self]._update, parameter[]] if <ast.BoolOp object at 0x7da204344dc0> begin[:] return[<ast.ListComp object at 0x7da204344130>] if compare[call[name[basename], parameter[name[project]]] <ast.NotIn object at 0x7da2590d7190> name[self]._tasks] begin[:] return[list[[]]] return[<ast.ListComp object at 0x7da1b1529b10>]
keyword[def] identifier[get_uids] ( identifier[self] , identifier[project] = keyword[None] ): literal[string] identifier[self] . identifier[_update] () keyword[if] keyword[not] identifier[project] keyword[or] identifier[project] . identifier[endswith] ( literal[string] ): keyword[return] [ identifier[self] . identifier[_gen_uid] ( identifier[task] [ literal[string] ]) keyword[for] identifier[project] keyword[in] identifier[self] . identifier[_tasks] keyword[for] identifier[task] keyword[in] identifier[self] . identifier[_tasks] [ identifier[project] ]. identifier[values] ()] keyword[if] identifier[basename] ( identifier[project] ) keyword[not] keyword[in] identifier[self] . identifier[_tasks] : keyword[return] [] keyword[return] [ identifier[self] . identifier[_gen_uid] ( identifier[uuid] ) keyword[for] identifier[uuid] keyword[in] identifier[self] . identifier[_tasks] [ identifier[basename] ( identifier[project] )]]
def get_uids(self, project=None): """Return a list of UIDs project -- the Project to filter for """ self._update() if not project or project.endswith('all_projects'): return [self._gen_uid(task['uuid']) for project in self._tasks for task in self._tasks[project].values()] # depends on [control=['if'], data=[]] if basename(project) not in self._tasks: return [] # depends on [control=['if'], data=[]] return [self._gen_uid(uuid) for uuid in self._tasks[basename(project)]]
def load_layer_without_provider(layer_uri, layer_name='tmp'): """Helper to load a layer when don't know the driver. Don't use it, it's an empiric function to try each provider one per one. OGR/GDAL is printing a lot of error saying that the layer is not valid. :param layer_uri: Layer URI that will be used by QGIS to load the layer. :type layer_uri: basestring :param layer_name: Layer name to use. Default to 'tmp'. :type layer_name: basestring :return: The layer or None if it's failed. :rtype: QgsMapLayer """ # Let's try the most common vector driver layer = QgsVectorLayer(layer_uri, layer_name, VECTOR_DRIVERS[0]) if layer.isValid(): return layer # Let's try the most common raster driver layer = QgsRasterLayer(layer_uri, layer_name, RASTER_DRIVERS[0]) if layer.isValid(): return layer # Then try all other drivers for driver in VECTOR_DRIVERS[1:]: if driver == 'delimitedtext': # Explicitly use URI with delimiter or tests fail in Windows. TS. layer = QgsVectorLayer( 'file:///%s?delimiter=,' % layer_uri, layer_name, driver) if layer.isValid(): return layer layer = QgsVectorLayer(layer_uri, layer_name, driver) if layer.isValid(): return layer for driver in RASTER_DRIVERS[1:]: layer = QgsRasterLayer(layer_uri, layer_name, driver) if layer.isValid(): return layer return None
def function[load_layer_without_provider, parameter[layer_uri, layer_name]]: constant[Helper to load a layer when don't know the driver. Don't use it, it's an empiric function to try each provider one per one. OGR/GDAL is printing a lot of error saying that the layer is not valid. :param layer_uri: Layer URI that will be used by QGIS to load the layer. :type layer_uri: basestring :param layer_name: Layer name to use. Default to 'tmp'. :type layer_name: basestring :return: The layer or None if it's failed. :rtype: QgsMapLayer ] variable[layer] assign[=] call[name[QgsVectorLayer], parameter[name[layer_uri], name[layer_name], call[name[VECTOR_DRIVERS]][constant[0]]]] if call[name[layer].isValid, parameter[]] begin[:] return[name[layer]] variable[layer] assign[=] call[name[QgsRasterLayer], parameter[name[layer_uri], name[layer_name], call[name[RASTER_DRIVERS]][constant[0]]]] if call[name[layer].isValid, parameter[]] begin[:] return[name[layer]] for taget[name[driver]] in starred[call[name[VECTOR_DRIVERS]][<ast.Slice object at 0x7da207f9a980>]] begin[:] if compare[name[driver] equal[==] constant[delimitedtext]] begin[:] variable[layer] assign[=] call[name[QgsVectorLayer], parameter[binary_operation[constant[file:///%s?delimiter=,] <ast.Mod object at 0x7da2590d6920> name[layer_uri]], name[layer_name], name[driver]]] if call[name[layer].isValid, parameter[]] begin[:] return[name[layer]] variable[layer] assign[=] call[name[QgsVectorLayer], parameter[name[layer_uri], name[layer_name], name[driver]]] if call[name[layer].isValid, parameter[]] begin[:] return[name[layer]] for taget[name[driver]] in starred[call[name[RASTER_DRIVERS]][<ast.Slice object at 0x7da207f99420>]] begin[:] variable[layer] assign[=] call[name[QgsRasterLayer], parameter[name[layer_uri], name[layer_name], name[driver]]] if call[name[layer].isValid, parameter[]] begin[:] return[name[layer]] return[constant[None]]
keyword[def] identifier[load_layer_without_provider] ( identifier[layer_uri] , identifier[layer_name] = literal[string] ): literal[string] identifier[layer] = identifier[QgsVectorLayer] ( identifier[layer_uri] , identifier[layer_name] , identifier[VECTOR_DRIVERS] [ literal[int] ]) keyword[if] identifier[layer] . identifier[isValid] (): keyword[return] identifier[layer] identifier[layer] = identifier[QgsRasterLayer] ( identifier[layer_uri] , identifier[layer_name] , identifier[RASTER_DRIVERS] [ literal[int] ]) keyword[if] identifier[layer] . identifier[isValid] (): keyword[return] identifier[layer] keyword[for] identifier[driver] keyword[in] identifier[VECTOR_DRIVERS] [ literal[int] :]: keyword[if] identifier[driver] == literal[string] : identifier[layer] = identifier[QgsVectorLayer] ( literal[string] % identifier[layer_uri] , identifier[layer_name] , identifier[driver] ) keyword[if] identifier[layer] . identifier[isValid] (): keyword[return] identifier[layer] identifier[layer] = identifier[QgsVectorLayer] ( identifier[layer_uri] , identifier[layer_name] , identifier[driver] ) keyword[if] identifier[layer] . identifier[isValid] (): keyword[return] identifier[layer] keyword[for] identifier[driver] keyword[in] identifier[RASTER_DRIVERS] [ literal[int] :]: identifier[layer] = identifier[QgsRasterLayer] ( identifier[layer_uri] , identifier[layer_name] , identifier[driver] ) keyword[if] identifier[layer] . identifier[isValid] (): keyword[return] identifier[layer] keyword[return] keyword[None]
def load_layer_without_provider(layer_uri, layer_name='tmp'): """Helper to load a layer when don't know the driver. Don't use it, it's an empiric function to try each provider one per one. OGR/GDAL is printing a lot of error saying that the layer is not valid. :param layer_uri: Layer URI that will be used by QGIS to load the layer. :type layer_uri: basestring :param layer_name: Layer name to use. Default to 'tmp'. :type layer_name: basestring :return: The layer or None if it's failed. :rtype: QgsMapLayer """ # Let's try the most common vector driver layer = QgsVectorLayer(layer_uri, layer_name, VECTOR_DRIVERS[0]) if layer.isValid(): return layer # depends on [control=['if'], data=[]] # Let's try the most common raster driver layer = QgsRasterLayer(layer_uri, layer_name, RASTER_DRIVERS[0]) if layer.isValid(): return layer # depends on [control=['if'], data=[]] # Then try all other drivers for driver in VECTOR_DRIVERS[1:]: if driver == 'delimitedtext': # Explicitly use URI with delimiter or tests fail in Windows. TS. layer = QgsVectorLayer('file:///%s?delimiter=,' % layer_uri, layer_name, driver) if layer.isValid(): return layer # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['driver']] layer = QgsVectorLayer(layer_uri, layer_name, driver) if layer.isValid(): return layer # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['driver']] for driver in RASTER_DRIVERS[1:]: layer = QgsRasterLayer(layer_uri, layer_name, driver) if layer.isValid(): return layer # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['driver']] return None
def _make_class_unpicklable(cls): """Make the given class un-picklable.""" def _break_on_call_reduce(self, protocol=None): raise TypeError('%r cannot be pickled' % self) cls.__reduce_ex__ = _break_on_call_reduce cls.__module__ = '<unknown>'
def function[_make_class_unpicklable, parameter[cls]]: constant[Make the given class un-picklable.] def function[_break_on_call_reduce, parameter[self, protocol]]: <ast.Raise object at 0x7da1b1e74520> name[cls].__reduce_ex__ assign[=] name[_break_on_call_reduce] name[cls].__module__ assign[=] constant[<unknown>]
keyword[def] identifier[_make_class_unpicklable] ( identifier[cls] ): literal[string] keyword[def] identifier[_break_on_call_reduce] ( identifier[self] , identifier[protocol] = keyword[None] ): keyword[raise] identifier[TypeError] ( literal[string] % identifier[self] ) identifier[cls] . identifier[__reduce_ex__] = identifier[_break_on_call_reduce] identifier[cls] . identifier[__module__] = literal[string]
def _make_class_unpicklable(cls): """Make the given class un-picklable.""" def _break_on_call_reduce(self, protocol=None): raise TypeError('%r cannot be pickled' % self) cls.__reduce_ex__ = _break_on_call_reduce cls.__module__ = '<unknown>'
def parse_table(table_string, header, remove_rows=1): '''parse a table to json from a string, where a header is expected by default. Return a jsonified table. Parameters ========== table_string: the string table, ideally with a header header: header of expected table, must match dimension (number columns) remove_rows: an integer to indicate a number of rows to remove from top the default is 1 assuming we don't want the header ''' rows = [x for x in table_string.split('\n') if x] rows = rows[0+remove_rows:] # Parse into json dictionary parsed = [] for row in rows: item = {} # This assumes no white spaces in each entry, which should be the case row = [x for x in row.split(' ') if x] for e in range(len(row)): item[header[e]] = row[e] parsed.append(item) return parsed
def function[parse_table, parameter[table_string, header, remove_rows]]: constant[parse a table to json from a string, where a header is expected by default. Return a jsonified table. Parameters ========== table_string: the string table, ideally with a header header: header of expected table, must match dimension (number columns) remove_rows: an integer to indicate a number of rows to remove from top the default is 1 assuming we don't want the header ] variable[rows] assign[=] <ast.ListComp object at 0x7da1b04b60e0> variable[rows] assign[=] call[name[rows]][<ast.Slice object at 0x7da1b04a5e10>] variable[parsed] assign[=] list[[]] for taget[name[row]] in starred[name[rows]] begin[:] variable[item] assign[=] dictionary[[], []] variable[row] assign[=] <ast.ListComp object at 0x7da1b04a51e0> for taget[name[e]] in starred[call[name[range], parameter[call[name[len], parameter[name[row]]]]]] begin[:] call[name[item]][call[name[header]][name[e]]] assign[=] call[name[row]][name[e]] call[name[parsed].append, parameter[name[item]]] return[name[parsed]]
keyword[def] identifier[parse_table] ( identifier[table_string] , identifier[header] , identifier[remove_rows] = literal[int] ): literal[string] identifier[rows] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[table_string] . identifier[split] ( literal[string] ) keyword[if] identifier[x] ] identifier[rows] = identifier[rows] [ literal[int] + identifier[remove_rows] :] identifier[parsed] =[] keyword[for] identifier[row] keyword[in] identifier[rows] : identifier[item] ={} identifier[row] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[row] . identifier[split] ( literal[string] ) keyword[if] identifier[x] ] keyword[for] identifier[e] keyword[in] identifier[range] ( identifier[len] ( identifier[row] )): identifier[item] [ identifier[header] [ identifier[e] ]]= identifier[row] [ identifier[e] ] identifier[parsed] . identifier[append] ( identifier[item] ) keyword[return] identifier[parsed]
def parse_table(table_string, header, remove_rows=1): """parse a table to json from a string, where a header is expected by default. Return a jsonified table. Parameters ========== table_string: the string table, ideally with a header header: header of expected table, must match dimension (number columns) remove_rows: an integer to indicate a number of rows to remove from top the default is 1 assuming we don't want the header """ rows = [x for x in table_string.split('\n') if x] rows = rows[0 + remove_rows:] # Parse into json dictionary parsed = [] for row in rows: item = {} # This assumes no white spaces in each entry, which should be the case row = [x for x in row.split(' ') if x] for e in range(len(row)): item[header[e]] = row[e] # depends on [control=['for'], data=['e']] parsed.append(item) # depends on [control=['for'], data=['row']] return parsed