code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def get_monitoring_problems(self): """Get the schedulers satellites problems list :return: problems dictionary :rtype: dict """ res = self.get_id() res['problems'] = {} # Report our schedulers information, but only if a dispatcher exists if getattr(self, 'dispatcher', None) is None: return res for satellite in self.dispatcher.all_daemons_links: if satellite.type not in ['scheduler']: continue if not satellite.active: continue if satellite.statistics and 'problems' in satellite.statistics: res['problems'][satellite.name] = { '_freshness': satellite.statistics['_freshness'], 'problems': satellite.statistics['problems'] } return res
def function[get_monitoring_problems, parameter[self]]: constant[Get the schedulers satellites problems list :return: problems dictionary :rtype: dict ] variable[res] assign[=] call[name[self].get_id, parameter[]] call[name[res]][constant[problems]] assign[=] dictionary[[], []] if compare[call[name[getattr], parameter[name[self], constant[dispatcher], constant[None]]] is constant[None]] begin[:] return[name[res]] for taget[name[satellite]] in starred[name[self].dispatcher.all_daemons_links] begin[:] if compare[name[satellite].type <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18fe90a90>]]] begin[:] continue if <ast.UnaryOp object at 0x7da18fe90a00> begin[:] continue if <ast.BoolOp object at 0x7da18fe93e20> begin[:] call[call[name[res]][constant[problems]]][name[satellite].name] assign[=] dictionary[[<ast.Constant object at 0x7da18bc724a0>, <ast.Constant object at 0x7da18bc70b50>], [<ast.Subscript object at 0x7da18bc72dd0>, <ast.Subscript object at 0x7da18bc73ee0>]] return[name[res]]
keyword[def] identifier[get_monitoring_problems] ( identifier[self] ): literal[string] identifier[res] = identifier[self] . identifier[get_id] () identifier[res] [ literal[string] ]={} keyword[if] identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ) keyword[is] keyword[None] : keyword[return] identifier[res] keyword[for] identifier[satellite] keyword[in] identifier[self] . identifier[dispatcher] . identifier[all_daemons_links] : keyword[if] identifier[satellite] . identifier[type] keyword[not] keyword[in] [ literal[string] ]: keyword[continue] keyword[if] keyword[not] identifier[satellite] . identifier[active] : keyword[continue] keyword[if] identifier[satellite] . identifier[statistics] keyword[and] literal[string] keyword[in] identifier[satellite] . identifier[statistics] : identifier[res] [ literal[string] ][ identifier[satellite] . identifier[name] ]={ literal[string] : identifier[satellite] . identifier[statistics] [ literal[string] ], literal[string] : identifier[satellite] . identifier[statistics] [ literal[string] ] } keyword[return] identifier[res]
def get_monitoring_problems(self): """Get the schedulers satellites problems list :return: problems dictionary :rtype: dict """ res = self.get_id() res['problems'] = {} # Report our schedulers information, but only if a dispatcher exists if getattr(self, 'dispatcher', None) is None: return res # depends on [control=['if'], data=[]] for satellite in self.dispatcher.all_daemons_links: if satellite.type not in ['scheduler']: continue # depends on [control=['if'], data=[]] if not satellite.active: continue # depends on [control=['if'], data=[]] if satellite.statistics and 'problems' in satellite.statistics: res['problems'][satellite.name] = {'_freshness': satellite.statistics['_freshness'], 'problems': satellite.statistics['problems']} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['satellite']] return res
def create_html(self, fname, title="ClassTracker Statistics"): """ Create HTML page `fname` and additional files in a directory derived from `fname`. """ # Create a folder to store the charts and additional HTML files. self.basedir = os.path.dirname(os.path.abspath(fname)) self.filesdir = os.path.splitext(fname)[0] + '_files' if not os.path.isdir(self.filesdir): os.mkdir(self.filesdir) self.filesdir = os.path.abspath(self.filesdir) self.links = {} # Annotate all snapshots in advance self.annotate() # Create charts. The tags to show the images are returned and stored in # the self.charts dictionary. This allows to return alternative text if # the chart creation framework is not available. self.charts = {} fn = os.path.join(self.filesdir, 'timespace.png') self.charts['snapshots'] = self.create_snapshot_chart(fn) for fp, idx in zip(self.snapshots, list(range(len(self.snapshots)))): fn = os.path.join(self.filesdir, 'fp%d.png' % (idx)) self.charts[fp] = self.create_pie_chart(fp, fn) for cn in list(self.index.keys()): fn = os.path.join(self.filesdir, cn.replace('.', '_')+'-lt.png') self.charts[cn] = self.create_lifetime_chart(cn, fn) # Create HTML pages first for each class and then the index page. for cn in list(self.index.keys()): fn = os.path.join(self.filesdir, cn.replace('.', '_')+'.html') self.links[cn] = fn self.print_class_details(fn, cn) self.create_title_page(fname, title=title)
def function[create_html, parameter[self, fname, title]]: constant[ Create HTML page `fname` and additional files in a directory derived from `fname`. ] name[self].basedir assign[=] call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[name[fname]]]]] name[self].filesdir assign[=] binary_operation[call[call[name[os].path.splitext, parameter[name[fname]]]][constant[0]] + constant[_files]] if <ast.UnaryOp object at 0x7da1b04719f0> begin[:] call[name[os].mkdir, parameter[name[self].filesdir]] name[self].filesdir assign[=] call[name[os].path.abspath, parameter[name[self].filesdir]] name[self].links assign[=] dictionary[[], []] call[name[self].annotate, parameter[]] name[self].charts assign[=] dictionary[[], []] variable[fn] assign[=] call[name[os].path.join, parameter[name[self].filesdir, constant[timespace.png]]] call[name[self].charts][constant[snapshots]] assign[=] call[name[self].create_snapshot_chart, parameter[name[fn]]] for taget[tuple[[<ast.Name object at 0x7da1b0472e00>, <ast.Name object at 0x7da1b0473df0>]]] in starred[call[name[zip], parameter[name[self].snapshots, call[name[list], parameter[call[name[range], parameter[call[name[len], parameter[name[self].snapshots]]]]]]]]] begin[:] variable[fn] assign[=] call[name[os].path.join, parameter[name[self].filesdir, binary_operation[constant[fp%d.png] <ast.Mod object at 0x7da2590d6920> name[idx]]]] call[name[self].charts][name[fp]] assign[=] call[name[self].create_pie_chart, parameter[name[fp], name[fn]]] for taget[name[cn]] in starred[call[name[list], parameter[call[name[self].index.keys, parameter[]]]]] begin[:] variable[fn] assign[=] call[name[os].path.join, parameter[name[self].filesdir, binary_operation[call[name[cn].replace, parameter[constant[.], constant[_]]] + constant[-lt.png]]]] call[name[self].charts][name[cn]] assign[=] call[name[self].create_lifetime_chart, parameter[name[cn], name[fn]]] for taget[name[cn]] in starred[call[name[list], parameter[call[name[self].index.keys, parameter[]]]]] begin[:] variable[fn] assign[=] call[name[os].path.join, parameter[name[self].filesdir, binary_operation[call[name[cn].replace, parameter[constant[.], constant[_]]] + constant[.html]]]] call[name[self].links][name[cn]] assign[=] name[fn] call[name[self].print_class_details, parameter[name[fn], name[cn]]] call[name[self].create_title_page, parameter[name[fname]]]
keyword[def] identifier[create_html] ( identifier[self] , identifier[fname] , identifier[title] = literal[string] ): literal[string] identifier[self] . identifier[basedir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[fname] )) identifier[self] . identifier[filesdir] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[fname] )[ literal[int] ]+ literal[string] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[filesdir] ): identifier[os] . identifier[mkdir] ( identifier[self] . identifier[filesdir] ) identifier[self] . identifier[filesdir] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[self] . identifier[filesdir] ) identifier[self] . identifier[links] ={} identifier[self] . identifier[annotate] () identifier[self] . identifier[charts] ={} identifier[fn] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[filesdir] , literal[string] ) identifier[self] . identifier[charts] [ literal[string] ]= identifier[self] . identifier[create_snapshot_chart] ( identifier[fn] ) keyword[for] identifier[fp] , identifier[idx] keyword[in] identifier[zip] ( identifier[self] . identifier[snapshots] , identifier[list] ( identifier[range] ( identifier[len] ( identifier[self] . identifier[snapshots] )))): identifier[fn] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[filesdir] , literal[string] %( identifier[idx] )) identifier[self] . identifier[charts] [ identifier[fp] ]= identifier[self] . identifier[create_pie_chart] ( identifier[fp] , identifier[fn] ) keyword[for] identifier[cn] keyword[in] identifier[list] ( identifier[self] . identifier[index] . identifier[keys] ()): identifier[fn] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[filesdir] , identifier[cn] . identifier[replace] ( literal[string] , literal[string] )+ literal[string] ) identifier[self] . identifier[charts] [ identifier[cn] ]= identifier[self] . identifier[create_lifetime_chart] ( identifier[cn] , identifier[fn] ) keyword[for] identifier[cn] keyword[in] identifier[list] ( identifier[self] . identifier[index] . identifier[keys] ()): identifier[fn] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[filesdir] , identifier[cn] . identifier[replace] ( literal[string] , literal[string] )+ literal[string] ) identifier[self] . identifier[links] [ identifier[cn] ]= identifier[fn] identifier[self] . identifier[print_class_details] ( identifier[fn] , identifier[cn] ) identifier[self] . identifier[create_title_page] ( identifier[fname] , identifier[title] = identifier[title] )
def create_html(self, fname, title='ClassTracker Statistics'): """ Create HTML page `fname` and additional files in a directory derived from `fname`. """ # Create a folder to store the charts and additional HTML files. self.basedir = os.path.dirname(os.path.abspath(fname)) self.filesdir = os.path.splitext(fname)[0] + '_files' if not os.path.isdir(self.filesdir): os.mkdir(self.filesdir) # depends on [control=['if'], data=[]] self.filesdir = os.path.abspath(self.filesdir) self.links = {} # Annotate all snapshots in advance self.annotate() # Create charts. The tags to show the images are returned and stored in # the self.charts dictionary. This allows to return alternative text if # the chart creation framework is not available. self.charts = {} fn = os.path.join(self.filesdir, 'timespace.png') self.charts['snapshots'] = self.create_snapshot_chart(fn) for (fp, idx) in zip(self.snapshots, list(range(len(self.snapshots)))): fn = os.path.join(self.filesdir, 'fp%d.png' % idx) self.charts[fp] = self.create_pie_chart(fp, fn) # depends on [control=['for'], data=[]] for cn in list(self.index.keys()): fn = os.path.join(self.filesdir, cn.replace('.', '_') + '-lt.png') self.charts[cn] = self.create_lifetime_chart(cn, fn) # depends on [control=['for'], data=['cn']] # Create HTML pages first for each class and then the index page. for cn in list(self.index.keys()): fn = os.path.join(self.filesdir, cn.replace('.', '_') + '.html') self.links[cn] = fn self.print_class_details(fn, cn) # depends on [control=['for'], data=['cn']] self.create_title_page(fname, title=title)
def _get_resource_id(resource, name, region=None, key=None, keyid=None, profile=None): ''' Get an AWS id for a VPC resource by type and name. ''' _id = _cache_id(name, sub_resource=resource, region=region, key=key, keyid=keyid, profile=profile) if _id: return _id r = _get_resource(resource, name=name, region=region, key=key, keyid=keyid, profile=profile) if r: return r.id
def function[_get_resource_id, parameter[resource, name, region, key, keyid, profile]]: constant[ Get an AWS id for a VPC resource by type and name. ] variable[_id] assign[=] call[name[_cache_id], parameter[name[name]]] if name[_id] begin[:] return[name[_id]] variable[r] assign[=] call[name[_get_resource], parameter[name[resource]]] if name[r] begin[:] return[name[r].id]
keyword[def] identifier[_get_resource_id] ( identifier[resource] , identifier[name] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ): literal[string] identifier[_id] = identifier[_cache_id] ( identifier[name] , identifier[sub_resource] = identifier[resource] , identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ) keyword[if] identifier[_id] : keyword[return] identifier[_id] identifier[r] = identifier[_get_resource] ( identifier[resource] , identifier[name] = identifier[name] , identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ) keyword[if] identifier[r] : keyword[return] identifier[r] . identifier[id]
def _get_resource_id(resource, name, region=None, key=None, keyid=None, profile=None): """ Get an AWS id for a VPC resource by type and name. """ _id = _cache_id(name, sub_resource=resource, region=region, key=key, keyid=keyid, profile=profile) if _id: return _id # depends on [control=['if'], data=[]] r = _get_resource(resource, name=name, region=region, key=key, keyid=keyid, profile=profile) if r: return r.id # depends on [control=['if'], data=[]]
def load_manifest(app, filename='manifest.json'): '''Load an assets json manifest''' if os.path.isabs(filename): path = filename else: path = pkg_resources.resource_filename(app, filename) with io.open(path, mode='r', encoding='utf8') as stream: data = json.load(stream) _registered_manifests[app] = path return data
def function[load_manifest, parameter[app, filename]]: constant[Load an assets json manifest] if call[name[os].path.isabs, parameter[name[filename]]] begin[:] variable[path] assign[=] name[filename] with call[name[io].open, parameter[name[path]]] begin[:] variable[data] assign[=] call[name[json].load, parameter[name[stream]]] call[name[_registered_manifests]][name[app]] assign[=] name[path] return[name[data]]
keyword[def] identifier[load_manifest] ( identifier[app] , identifier[filename] = literal[string] ): literal[string] keyword[if] identifier[os] . identifier[path] . identifier[isabs] ( identifier[filename] ): identifier[path] = identifier[filename] keyword[else] : identifier[path] = identifier[pkg_resources] . identifier[resource_filename] ( identifier[app] , identifier[filename] ) keyword[with] identifier[io] . identifier[open] ( identifier[path] , identifier[mode] = literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[stream] : identifier[data] = identifier[json] . identifier[load] ( identifier[stream] ) identifier[_registered_manifests] [ identifier[app] ]= identifier[path] keyword[return] identifier[data]
def load_manifest(app, filename='manifest.json'): """Load an assets json manifest""" if os.path.isabs(filename): path = filename # depends on [control=['if'], data=[]] else: path = pkg_resources.resource_filename(app, filename) with io.open(path, mode='r', encoding='utf8') as stream: data = json.load(stream) # depends on [control=['with'], data=['stream']] _registered_manifests[app] = path return data
def compute_inverted(self): """ Returns: inverted: bool True if the stored image is inverted. """ # astheader = storage.get_astheader(self.obs.expnum, self.obs.ccdnum, version=self.obs.ftype) # pvwcs = wcs.WCS(astheader) # (x, y) = pvwcs.sky2xy(self.ra, self.dec) # logger.debug("is_inverted: X,Y {},{} -> wcs X,Y {},{}".format(self.x, self.y, x, y)) # dr2 = ((x-self.x)**2 + (y-self.y)**2) # return dr2 > 2 if self.ssos or self.obs.is_fake() or self.obs.ftype == 's': inverted = False else: inverted = True if self.get_ccd_num() - 1 in INVERTED_CCDS else False logger.debug("Got that {} is_inverted: {}".format(self.obs.rawname, inverted)) return inverted
def function[compute_inverted, parameter[self]]: constant[ Returns: inverted: bool True if the stored image is inverted. ] if <ast.BoolOp object at 0x7da1b1a1cdc0> begin[:] variable[inverted] assign[=] constant[False] call[name[logger].debug, parameter[call[constant[Got that {} is_inverted: {}].format, parameter[name[self].obs.rawname, name[inverted]]]]] return[name[inverted]]
keyword[def] identifier[compute_inverted] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[ssos] keyword[or] identifier[self] . identifier[obs] . identifier[is_fake] () keyword[or] identifier[self] . identifier[obs] . identifier[ftype] == literal[string] : identifier[inverted] = keyword[False] keyword[else] : identifier[inverted] = keyword[True] keyword[if] identifier[self] . identifier[get_ccd_num] ()- literal[int] keyword[in] identifier[INVERTED_CCDS] keyword[else] keyword[False] identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[obs] . identifier[rawname] , identifier[inverted] )) keyword[return] identifier[inverted]
def compute_inverted(self): """ Returns: inverted: bool True if the stored image is inverted. """ # astheader = storage.get_astheader(self.obs.expnum, self.obs.ccdnum, version=self.obs.ftype) # pvwcs = wcs.WCS(astheader) # (x, y) = pvwcs.sky2xy(self.ra, self.dec) # logger.debug("is_inverted: X,Y {},{} -> wcs X,Y {},{}".format(self.x, self.y, x, y)) # dr2 = ((x-self.x)**2 + (y-self.y)**2) # return dr2 > 2 if self.ssos or self.obs.is_fake() or self.obs.ftype == 's': inverted = False # depends on [control=['if'], data=[]] else: inverted = True if self.get_ccd_num() - 1 in INVERTED_CCDS else False logger.debug('Got that {} is_inverted: {}'.format(self.obs.rawname, inverted)) return inverted
def maybe_colored(msg, color, opt): """Maybe it will render in color maybe it will not!""" if opt.monochrome: return msg return colored(msg, color)
def function[maybe_colored, parameter[msg, color, opt]]: constant[Maybe it will render in color maybe it will not!] if name[opt].monochrome begin[:] return[name[msg]] return[call[name[colored], parameter[name[msg], name[color]]]]
keyword[def] identifier[maybe_colored] ( identifier[msg] , identifier[color] , identifier[opt] ): literal[string] keyword[if] identifier[opt] . identifier[monochrome] : keyword[return] identifier[msg] keyword[return] identifier[colored] ( identifier[msg] , identifier[color] )
def maybe_colored(msg, color, opt): """Maybe it will render in color maybe it will not!""" if opt.monochrome: return msg # depends on [control=['if'], data=[]] return colored(msg, color)
def hugoniot_t_single(rho, rho0, c0, s, gamma0, q, theta0, n, mass, three_r=3. * constants.R, t_ref=300., c_v=0.): """ internal function to calculate pressure along Hugoniot :param rho: density in g/cm^3 :param rho0: density at 1 bar in g/cm^3 :param c0: velocity at 1 bar in km/s :param s: slope of the velocity change :param gamma0: Gruneisen parameter at 1 bar :param q: logarithmic derivative of Gruneisen parameter :param theta0: Debye temperature in K :param n: number of elements in a chemical formula :param mass: molar mass in gram :param three_r: 3 times gas constant. Jamieson modified this value to compensate for mismatches :param t_ref: reference temperature, 300 K :param c_v: heat capacity, see Jamieson 1983 for detail :return: temperature along hugoniot """ eta = 1. - rho0 / rho if eta == 0.0: return 300. threenk = three_r / mass * n # [J/mol/K] / [g/mol] = [J/g/K] k = [rho0, c0, s, gamma0, q, theta0 / 1.e3] t_h = odeint(_dT_h_delta, t_ref / 1.e3, [0., eta], args=(k, threenk, c_v), full_output=1) temp_h = np.squeeze(t_h[0][1]) return temp_h * 1.e3
def function[hugoniot_t_single, parameter[rho, rho0, c0, s, gamma0, q, theta0, n, mass, three_r, t_ref, c_v]]: constant[ internal function to calculate pressure along Hugoniot :param rho: density in g/cm^3 :param rho0: density at 1 bar in g/cm^3 :param c0: velocity at 1 bar in km/s :param s: slope of the velocity change :param gamma0: Gruneisen parameter at 1 bar :param q: logarithmic derivative of Gruneisen parameter :param theta0: Debye temperature in K :param n: number of elements in a chemical formula :param mass: molar mass in gram :param three_r: 3 times gas constant. Jamieson modified this value to compensate for mismatches :param t_ref: reference temperature, 300 K :param c_v: heat capacity, see Jamieson 1983 for detail :return: temperature along hugoniot ] variable[eta] assign[=] binary_operation[constant[1.0] - binary_operation[name[rho0] / name[rho]]] if compare[name[eta] equal[==] constant[0.0]] begin[:] return[constant[300.0]] variable[threenk] assign[=] binary_operation[binary_operation[name[three_r] / name[mass]] * name[n]] variable[k] assign[=] list[[<ast.Name object at 0x7da18f09c2b0>, <ast.Name object at 0x7da18f09d780>, <ast.Name object at 0x7da18f09e110>, <ast.Name object at 0x7da18f09ffd0>, <ast.Name object at 0x7da18f09df30>, <ast.BinOp object at 0x7da18f09e050>]] variable[t_h] assign[=] call[name[odeint], parameter[name[_dT_h_delta], binary_operation[name[t_ref] / constant[1000.0]], list[[<ast.Constant object at 0x7da18f09ef20>, <ast.Name object at 0x7da18f09e6b0>]]]] variable[temp_h] assign[=] call[name[np].squeeze, parameter[call[call[name[t_h]][constant[0]]][constant[1]]]] return[binary_operation[name[temp_h] * constant[1000.0]]]
keyword[def] identifier[hugoniot_t_single] ( identifier[rho] , identifier[rho0] , identifier[c0] , identifier[s] , identifier[gamma0] , identifier[q] , identifier[theta0] , identifier[n] , identifier[mass] , identifier[three_r] = literal[int] * identifier[constants] . identifier[R] , identifier[t_ref] = literal[int] , identifier[c_v] = literal[int] ): literal[string] identifier[eta] = literal[int] - identifier[rho0] / identifier[rho] keyword[if] identifier[eta] == literal[int] : keyword[return] literal[int] identifier[threenk] = identifier[three_r] / identifier[mass] * identifier[n] identifier[k] =[ identifier[rho0] , identifier[c0] , identifier[s] , identifier[gamma0] , identifier[q] , identifier[theta0] / literal[int] ] identifier[t_h] = identifier[odeint] ( identifier[_dT_h_delta] , identifier[t_ref] / literal[int] ,[ literal[int] , identifier[eta] ], identifier[args] =( identifier[k] , identifier[threenk] , identifier[c_v] ), identifier[full_output] = literal[int] ) identifier[temp_h] = identifier[np] . identifier[squeeze] ( identifier[t_h] [ literal[int] ][ literal[int] ]) keyword[return] identifier[temp_h] * literal[int]
def hugoniot_t_single(rho, rho0, c0, s, gamma0, q, theta0, n, mass, three_r=3.0 * constants.R, t_ref=300.0, c_v=0.0): """ internal function to calculate pressure along Hugoniot :param rho: density in g/cm^3 :param rho0: density at 1 bar in g/cm^3 :param c0: velocity at 1 bar in km/s :param s: slope of the velocity change :param gamma0: Gruneisen parameter at 1 bar :param q: logarithmic derivative of Gruneisen parameter :param theta0: Debye temperature in K :param n: number of elements in a chemical formula :param mass: molar mass in gram :param three_r: 3 times gas constant. Jamieson modified this value to compensate for mismatches :param t_ref: reference temperature, 300 K :param c_v: heat capacity, see Jamieson 1983 for detail :return: temperature along hugoniot """ eta = 1.0 - rho0 / rho if eta == 0.0: return 300.0 # depends on [control=['if'], data=[]] threenk = three_r / mass * n # [J/mol/K] / [g/mol] = [J/g/K] k = [rho0, c0, s, gamma0, q, theta0 / 1000.0] t_h = odeint(_dT_h_delta, t_ref / 1000.0, [0.0, eta], args=(k, threenk, c_v), full_output=1) temp_h = np.squeeze(t_h[0][1]) return temp_h * 1000.0
def encode_positions(self, positions: mx.sym.Symbol, data: mx.sym.Symbol) -> mx.sym.Symbol: """ :param positions: (batch_size,) :param data: (batch_size, num_embed) :return: (batch_size, num_embed) """ # (batch_size, source_seq_len, num_embed) pos_embedding = mx.sym.Embedding(data=positions, input_dim=self.max_seq_len, weight=self.embed_weight, output_dim=self.num_embed, name=self.prefix + "pos_embed") return mx.sym.broadcast_add(data, pos_embedding, name="%s_add" % self.prefix)
def function[encode_positions, parameter[self, positions, data]]: constant[ :param positions: (batch_size,) :param data: (batch_size, num_embed) :return: (batch_size, num_embed) ] variable[pos_embedding] assign[=] call[name[mx].sym.Embedding, parameter[]] return[call[name[mx].sym.broadcast_add, parameter[name[data], name[pos_embedding]]]]
keyword[def] identifier[encode_positions] ( identifier[self] , identifier[positions] : identifier[mx] . identifier[sym] . identifier[Symbol] , identifier[data] : identifier[mx] . identifier[sym] . identifier[Symbol] )-> identifier[mx] . identifier[sym] . identifier[Symbol] : literal[string] identifier[pos_embedding] = identifier[mx] . identifier[sym] . identifier[Embedding] ( identifier[data] = identifier[positions] , identifier[input_dim] = identifier[self] . identifier[max_seq_len] , identifier[weight] = identifier[self] . identifier[embed_weight] , identifier[output_dim] = identifier[self] . identifier[num_embed] , identifier[name] = identifier[self] . identifier[prefix] + literal[string] ) keyword[return] identifier[mx] . identifier[sym] . identifier[broadcast_add] ( identifier[data] , identifier[pos_embedding] , identifier[name] = literal[string] % identifier[self] . identifier[prefix] )
def encode_positions(self, positions: mx.sym.Symbol, data: mx.sym.Symbol) -> mx.sym.Symbol: """ :param positions: (batch_size,) :param data: (batch_size, num_embed) :return: (batch_size, num_embed) """ # (batch_size, source_seq_len, num_embed) pos_embedding = mx.sym.Embedding(data=positions, input_dim=self.max_seq_len, weight=self.embed_weight, output_dim=self.num_embed, name=self.prefix + 'pos_embed') return mx.sym.broadcast_add(data, pos_embedding, name='%s_add' % self.prefix)
def force_recalculate(obj): ''' Recalculate all ImageCountField and UserImageCountField fields in object ``obj``. This should be used if auto-updating of these fields was disabled for some reason. To disable auto-update when saving AttachedImage instance (for example when you need to save a lot of images and want to recalculate denormalised values only after all images are saved) use this pattern:: image = AttachedImage(...) image.send_signal = False image.save() ''' class Stub(object): content_object = obj img = Stub() image_saved.send(sender = obj.__class__, instance = img)
def function[force_recalculate, parameter[obj]]: constant[ Recalculate all ImageCountField and UserImageCountField fields in object ``obj``. This should be used if auto-updating of these fields was disabled for some reason. To disable auto-update when saving AttachedImage instance (for example when you need to save a lot of images and want to recalculate denormalised values only after all images are saved) use this pattern:: image = AttachedImage(...) image.send_signal = False image.save() ] class class[Stub, parameter[]] begin[:] variable[content_object] assign[=] name[obj] variable[img] assign[=] call[name[Stub], parameter[]] call[name[image_saved].send, parameter[]]
keyword[def] identifier[force_recalculate] ( identifier[obj] ): literal[string] keyword[class] identifier[Stub] ( identifier[object] ): identifier[content_object] = identifier[obj] identifier[img] = identifier[Stub] () identifier[image_saved] . identifier[send] ( identifier[sender] = identifier[obj] . identifier[__class__] , identifier[instance] = identifier[img] )
def force_recalculate(obj): """ Recalculate all ImageCountField and UserImageCountField fields in object ``obj``. This should be used if auto-updating of these fields was disabled for some reason. To disable auto-update when saving AttachedImage instance (for example when you need to save a lot of images and want to recalculate denormalised values only after all images are saved) use this pattern:: image = AttachedImage(...) image.send_signal = False image.save() """ class Stub(object): content_object = obj img = Stub() image_saved.send(sender=obj.__class__, instance=img)
def before(func): """ Run a function before the handler is invoked, is passed the event & context and must return an event & context too. Usage:: >>> # to create a reusable decorator >>> @before ... def print_request_id(event, context): ... print(context.aws_request_id) ... return event, context >>> @print_request_id ... def handler(event, context): ... pass >>> class Context: ... aws_request_id = 'ID!' >>> handler({}, Context()) ID! >>> # or a one off >>> @before(lambda e, c: (e['body'], c)) ... def handler(body, context): ... return body >>> handler({'body': 'BOOODYY'}, object()) 'BOOODYY' """ class BeforeDecorator(LambdaDecorator): def before(self, event, context): return func(event, context) return BeforeDecorator
def function[before, parameter[func]]: constant[ Run a function before the handler is invoked, is passed the event & context and must return an event & context too. Usage:: >>> # to create a reusable decorator >>> @before ... def print_request_id(event, context): ... print(context.aws_request_id) ... return event, context >>> @print_request_id ... def handler(event, context): ... pass >>> class Context: ... aws_request_id = 'ID!' >>> handler({}, Context()) ID! >>> # or a one off >>> @before(lambda e, c: (e['body'], c)) ... def handler(body, context): ... return body >>> handler({'body': 'BOOODYY'}, object()) 'BOOODYY' ] class class[BeforeDecorator, parameter[]] begin[:] def function[before, parameter[self, event, context]]: return[call[name[func], parameter[name[event], name[context]]]] return[name[BeforeDecorator]]
keyword[def] identifier[before] ( identifier[func] ): literal[string] keyword[class] identifier[BeforeDecorator] ( identifier[LambdaDecorator] ): keyword[def] identifier[before] ( identifier[self] , identifier[event] , identifier[context] ): keyword[return] identifier[func] ( identifier[event] , identifier[context] ) keyword[return] identifier[BeforeDecorator]
def before(func): """ Run a function before the handler is invoked, is passed the event & context and must return an event & context too. Usage:: >>> # to create a reusable decorator >>> @before ... def print_request_id(event, context): ... print(context.aws_request_id) ... return event, context >>> @print_request_id ... def handler(event, context): ... pass >>> class Context: ... aws_request_id = 'ID!' >>> handler({}, Context()) ID! >>> # or a one off >>> @before(lambda e, c: (e['body'], c)) ... def handler(body, context): ... return body >>> handler({'body': 'BOOODYY'}, object()) 'BOOODYY' """ class BeforeDecorator(LambdaDecorator): def before(self, event, context): return func(event, context) return BeforeDecorator
def fill_buffer(heap_data, i_chan): """Blocking function to populate data in the heap. This is run in an executor. """ # Calculate the time count and fraction. now = datetime.datetime.utcnow() time_full = now.timestamp() time_count = int(time_full) time_fraction = int((time_full - time_count) * (2**32 - 1)) diff = now - (now.replace(hour=0, minute=0, second=0, microsecond=0)) time_data = diff.seconds + 1e-6 * diff.microseconds # Write the data into the buffer. heap_data['visibility_timestamp_count'] = time_count heap_data['visibility_timestamp_fraction'] = time_fraction heap_data['correlator_output_data']['VIS'][:][:] = \ time_data + i_chan * 1j
def function[fill_buffer, parameter[heap_data, i_chan]]: constant[Blocking function to populate data in the heap. This is run in an executor. ] variable[now] assign[=] call[name[datetime].datetime.utcnow, parameter[]] variable[time_full] assign[=] call[name[now].timestamp, parameter[]] variable[time_count] assign[=] call[name[int], parameter[name[time_full]]] variable[time_fraction] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[time_full] - name[time_count]] * binary_operation[binary_operation[constant[2] ** constant[32]] - constant[1]]]]] variable[diff] assign[=] binary_operation[name[now] - call[name[now].replace, parameter[]]] variable[time_data] assign[=] binary_operation[name[diff].seconds + binary_operation[constant[1e-06] * name[diff].microseconds]] call[name[heap_data]][constant[visibility_timestamp_count]] assign[=] name[time_count] call[name[heap_data]][constant[visibility_timestamp_fraction]] assign[=] name[time_fraction] call[call[call[call[name[heap_data]][constant[correlator_output_data]]][constant[VIS]]][<ast.Slice object at 0x7da1b0535c90>]][<ast.Slice object at 0x7da1b05379d0>] assign[=] binary_operation[name[time_data] + binary_operation[name[i_chan] * constant[1j]]]
keyword[def] identifier[fill_buffer] ( identifier[heap_data] , identifier[i_chan] ): literal[string] identifier[now] = identifier[datetime] . identifier[datetime] . identifier[utcnow] () identifier[time_full] = identifier[now] . identifier[timestamp] () identifier[time_count] = identifier[int] ( identifier[time_full] ) identifier[time_fraction] = identifier[int] (( identifier[time_full] - identifier[time_count] )*( literal[int] ** literal[int] - literal[int] )) identifier[diff] = identifier[now] -( identifier[now] . identifier[replace] ( identifier[hour] = literal[int] , identifier[minute] = literal[int] , identifier[second] = literal[int] , identifier[microsecond] = literal[int] )) identifier[time_data] = identifier[diff] . identifier[seconds] + literal[int] * identifier[diff] . identifier[microseconds] identifier[heap_data] [ literal[string] ]= identifier[time_count] identifier[heap_data] [ literal[string] ]= identifier[time_fraction] identifier[heap_data] [ literal[string] ][ literal[string] ][:][:]= identifier[time_data] + identifier[i_chan] * literal[int]
def fill_buffer(heap_data, i_chan): """Blocking function to populate data in the heap. This is run in an executor. """ # Calculate the time count and fraction. now = datetime.datetime.utcnow() time_full = now.timestamp() time_count = int(time_full) time_fraction = int((time_full - time_count) * (2 ** 32 - 1)) diff = now - now.replace(hour=0, minute=0, second=0, microsecond=0) time_data = diff.seconds + 1e-06 * diff.microseconds # Write the data into the buffer. heap_data['visibility_timestamp_count'] = time_count heap_data['visibility_timestamp_fraction'] = time_fraction heap_data['correlator_output_data']['VIS'][:][:] = time_data + i_chan * 1j
def _freeze(self, final_text, err=False): """Stop spinner, compose last frame and 'freeze' it.""" if not final_text: final_text = "" target = self.stderr if err else self.stdout if target.closed: target = sys.stderr if err else sys.stdout text = to_text(final_text) last_frame = self._compose_out(text, mode="last") self._last_frame = decode_output(last_frame, target_stream=target) # Should be stopped here, otherwise prints after # self._freeze call will mess up the spinner self.stop() target.write(self._last_frame)
def function[_freeze, parameter[self, final_text, err]]: constant[Stop spinner, compose last frame and 'freeze' it.] if <ast.UnaryOp object at 0x7da18ede4760> begin[:] variable[final_text] assign[=] constant[] variable[target] assign[=] <ast.IfExp object at 0x7da18ede5540> if name[target].closed begin[:] variable[target] assign[=] <ast.IfExp object at 0x7da18ede7970> variable[text] assign[=] call[name[to_text], parameter[name[final_text]]] variable[last_frame] assign[=] call[name[self]._compose_out, parameter[name[text]]] name[self]._last_frame assign[=] call[name[decode_output], parameter[name[last_frame]]] call[name[self].stop, parameter[]] call[name[target].write, parameter[name[self]._last_frame]]
keyword[def] identifier[_freeze] ( identifier[self] , identifier[final_text] , identifier[err] = keyword[False] ): literal[string] keyword[if] keyword[not] identifier[final_text] : identifier[final_text] = literal[string] identifier[target] = identifier[self] . identifier[stderr] keyword[if] identifier[err] keyword[else] identifier[self] . identifier[stdout] keyword[if] identifier[target] . identifier[closed] : identifier[target] = identifier[sys] . identifier[stderr] keyword[if] identifier[err] keyword[else] identifier[sys] . identifier[stdout] identifier[text] = identifier[to_text] ( identifier[final_text] ) identifier[last_frame] = identifier[self] . identifier[_compose_out] ( identifier[text] , identifier[mode] = literal[string] ) identifier[self] . identifier[_last_frame] = identifier[decode_output] ( identifier[last_frame] , identifier[target_stream] = identifier[target] ) identifier[self] . identifier[stop] () identifier[target] . identifier[write] ( identifier[self] . identifier[_last_frame] )
def _freeze(self, final_text, err=False): """Stop spinner, compose last frame and 'freeze' it.""" if not final_text: final_text = '' # depends on [control=['if'], data=[]] target = self.stderr if err else self.stdout if target.closed: target = sys.stderr if err else sys.stdout # depends on [control=['if'], data=[]] text = to_text(final_text) last_frame = self._compose_out(text, mode='last') self._last_frame = decode_output(last_frame, target_stream=target) # Should be stopped here, otherwise prints after # self._freeze call will mess up the spinner self.stop() target.write(self._last_frame)
def setProperty(self, key, value): """ Sets the custom property for this item's key to the inputed value. If the widget has a column that matches the inputed key, then the value will be added to the tree widget as well. :param key | <str> value | <variant> """ if key == 'Name': self.setName(value) elif key == 'Start': self.setDateStart(value) elif key == 'End': self.setDateEnd(value) elif key == 'Calendar Days': self.setDuration(value) elif key == 'Time Start': self.setTimeStart(value) elif key == 'Time End': self.setTimeEnd(value) elif key == 'All Day': self.setAllDay(value) elif key == 'Workadys': pass else: self._properties[nativestring(key)] = value tree = self.treeWidget() if tree: col = tree.column(key) if col != -1: self.setData(col, Qt.EditRole, wrapVariant(value))
def function[setProperty, parameter[self, key, value]]: constant[ Sets the custom property for this item's key to the inputed value. If the widget has a column that matches the inputed key, then the value will be added to the tree widget as well. :param key | <str> value | <variant> ] if compare[name[key] equal[==] constant[Name]] begin[:] call[name[self].setName, parameter[name[value]]]
keyword[def] identifier[setProperty] ( identifier[self] , identifier[key] , identifier[value] ): literal[string] keyword[if] identifier[key] == literal[string] : identifier[self] . identifier[setName] ( identifier[value] ) keyword[elif] identifier[key] == literal[string] : identifier[self] . identifier[setDateStart] ( identifier[value] ) keyword[elif] identifier[key] == literal[string] : identifier[self] . identifier[setDateEnd] ( identifier[value] ) keyword[elif] identifier[key] == literal[string] : identifier[self] . identifier[setDuration] ( identifier[value] ) keyword[elif] identifier[key] == literal[string] : identifier[self] . identifier[setTimeStart] ( identifier[value] ) keyword[elif] identifier[key] == literal[string] : identifier[self] . identifier[setTimeEnd] ( identifier[value] ) keyword[elif] identifier[key] == literal[string] : identifier[self] . identifier[setAllDay] ( identifier[value] ) keyword[elif] identifier[key] == literal[string] : keyword[pass] keyword[else] : identifier[self] . identifier[_properties] [ identifier[nativestring] ( identifier[key] )]= identifier[value] identifier[tree] = identifier[self] . identifier[treeWidget] () keyword[if] identifier[tree] : identifier[col] = identifier[tree] . identifier[column] ( identifier[key] ) keyword[if] identifier[col] !=- literal[int] : identifier[self] . identifier[setData] ( identifier[col] , identifier[Qt] . identifier[EditRole] , identifier[wrapVariant] ( identifier[value] ))
def setProperty(self, key, value): """ Sets the custom property for this item's key to the inputed value. If the widget has a column that matches the inputed key, then the value will be added to the tree widget as well. :param key | <str> value | <variant> """ if key == 'Name': self.setName(value) # depends on [control=['if'], data=[]] elif key == 'Start': self.setDateStart(value) # depends on [control=['if'], data=[]] elif key == 'End': self.setDateEnd(value) # depends on [control=['if'], data=[]] elif key == 'Calendar Days': self.setDuration(value) # depends on [control=['if'], data=[]] elif key == 'Time Start': self.setTimeStart(value) # depends on [control=['if'], data=[]] elif key == 'Time End': self.setTimeEnd(value) # depends on [control=['if'], data=[]] elif key == 'All Day': self.setAllDay(value) # depends on [control=['if'], data=[]] elif key == 'Workadys': pass # depends on [control=['if'], data=[]] else: self._properties[nativestring(key)] = value tree = self.treeWidget() if tree: col = tree.column(key) if col != -1: self.setData(col, Qt.EditRole, wrapVariant(value)) # depends on [control=['if'], data=['col']] # depends on [control=['if'], data=[]]
def get_parents_letters(self, goobj): """Get the letters representing all parent terms which are depth-01 GO terms.""" parents_all = set.union(self.go2parents[goobj.id]) parents_all.add(goobj.id) # print "{}({}) D{:02}".format(goobj.id, goobj.name, goobj.depth), parents_all parents_d1 = parents_all.intersection(self.gos_depth1) return [self.goone2ntletter[g].D1 for g in parents_d1]
def function[get_parents_letters, parameter[self, goobj]]: constant[Get the letters representing all parent terms which are depth-01 GO terms.] variable[parents_all] assign[=] call[name[set].union, parameter[call[name[self].go2parents][name[goobj].id]]] call[name[parents_all].add, parameter[name[goobj].id]] variable[parents_d1] assign[=] call[name[parents_all].intersection, parameter[name[self].gos_depth1]] return[<ast.ListComp object at 0x7da18bcca800>]
keyword[def] identifier[get_parents_letters] ( identifier[self] , identifier[goobj] ): literal[string] identifier[parents_all] = identifier[set] . identifier[union] ( identifier[self] . identifier[go2parents] [ identifier[goobj] . identifier[id] ]) identifier[parents_all] . identifier[add] ( identifier[goobj] . identifier[id] ) identifier[parents_d1] = identifier[parents_all] . identifier[intersection] ( identifier[self] . identifier[gos_depth1] ) keyword[return] [ identifier[self] . identifier[goone2ntletter] [ identifier[g] ]. identifier[D1] keyword[for] identifier[g] keyword[in] identifier[parents_d1] ]
def get_parents_letters(self, goobj): """Get the letters representing all parent terms which are depth-01 GO terms.""" parents_all = set.union(self.go2parents[goobj.id]) parents_all.add(goobj.id) # print "{}({}) D{:02}".format(goobj.id, goobj.name, goobj.depth), parents_all parents_d1 = parents_all.intersection(self.gos_depth1) return [self.goone2ntletter[g].D1 for g in parents_d1]
def _dict_key_priority(s): """Return priority for a given key object.""" if isinstance(s, Hook): return _priority(s._schema) - 0.5 if isinstance(s, Optional): return _priority(s._schema) + 0.5 return _priority(s)
def function[_dict_key_priority, parameter[s]]: constant[Return priority for a given key object.] if call[name[isinstance], parameter[name[s], name[Hook]]] begin[:] return[binary_operation[call[name[_priority], parameter[name[s]._schema]] - constant[0.5]]] if call[name[isinstance], parameter[name[s], name[Optional]]] begin[:] return[binary_operation[call[name[_priority], parameter[name[s]._schema]] + constant[0.5]]] return[call[name[_priority], parameter[name[s]]]]
keyword[def] identifier[_dict_key_priority] ( identifier[s] ): literal[string] keyword[if] identifier[isinstance] ( identifier[s] , identifier[Hook] ): keyword[return] identifier[_priority] ( identifier[s] . identifier[_schema] )- literal[int] keyword[if] identifier[isinstance] ( identifier[s] , identifier[Optional] ): keyword[return] identifier[_priority] ( identifier[s] . identifier[_schema] )+ literal[int] keyword[return] identifier[_priority] ( identifier[s] )
def _dict_key_priority(s): """Return priority for a given key object.""" if isinstance(s, Hook): return _priority(s._schema) - 0.5 # depends on [control=['if'], data=[]] if isinstance(s, Optional): return _priority(s._schema) + 0.5 # depends on [control=['if'], data=[]] return _priority(s)
def uniq(args): """ %prog uniq bedfile Remove overlapping features with higher scores. """ from jcvi.formats.sizes import Sizes p = OptionParser(uniq.__doc__) p.add_option("--sizes", help="Use sequence length as score") p.add_option("--mode", default="span", choices=("span", "score"), help="Pile mode") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args uniqbedfile = bedfile.split(".")[0] + ".uniq.bed" bed = Bed(bedfile) if opts.sizes: sizes = Sizes(opts.sizes).mapping ranges = [Range(x.seqid, x.start, x.end, sizes[x.accn], i) \ for i, x in enumerate(bed)] else: if opts.mode == "span": ranges = [Range(x.seqid, x.start, x.end, x.end - x.start + 1, i) \ for i, x in enumerate(bed)] else: ranges = [Range(x.seqid, x.start, x.end, float(x.score), i) \ for i, x in enumerate(bed)] selected, score = range_chain(ranges) selected = [x.id for x in selected] selected_ids = set(selected) selected = [bed[x] for x in selected] notselected = [x for i, x in enumerate(bed) if i not in selected_ids] newbed = Bed() newbed.extend(selected) newbed.print_to_file(uniqbedfile, sorted=True) if notselected: leftoverfile = bedfile.split(".")[0] + ".leftover.bed" leftoverbed = Bed() leftoverbed.extend(notselected) leftoverbed.print_to_file(leftoverfile, sorted=True) logging.debug("Imported: {0}, Exported: {1}".format(len(bed), len(newbed))) return uniqbedfile
def function[uniq, parameter[args]]: constant[ %prog uniq bedfile Remove overlapping features with higher scores. ] from relative_module[jcvi.formats.sizes] import module[Sizes] variable[p] assign[=] call[name[OptionParser], parameter[name[uniq].__doc__]] call[name[p].add_option, parameter[constant[--sizes]]] call[name[p].add_option, parameter[constant[--mode]]] <ast.Tuple object at 0x7da20c76d8d0> assign[=] call[name[p].parse_args, parameter[name[args]]] if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da20c76d9f0>]] <ast.Tuple object at 0x7da20c76c490> assign[=] name[args] variable[uniqbedfile] assign[=] binary_operation[call[call[name[bedfile].split, parameter[constant[.]]]][constant[0]] + constant[.uniq.bed]] variable[bed] assign[=] call[name[Bed], parameter[name[bedfile]]] if name[opts].sizes begin[:] variable[sizes] assign[=] call[name[Sizes], parameter[name[opts].sizes]].mapping variable[ranges] assign[=] <ast.ListComp object at 0x7da20c76dff0> <ast.Tuple object at 0x7da20c76c610> assign[=] call[name[range_chain], parameter[name[ranges]]] variable[selected] assign[=] <ast.ListComp object at 0x7da20c76c730> variable[selected_ids] assign[=] call[name[set], parameter[name[selected]]] variable[selected] assign[=] <ast.ListComp object at 0x7da20c76c3d0> variable[notselected] assign[=] <ast.ListComp object at 0x7da20c76db10> variable[newbed] assign[=] call[name[Bed], parameter[]] call[name[newbed].extend, parameter[name[selected]]] call[name[newbed].print_to_file, parameter[name[uniqbedfile]]] if name[notselected] begin[:] variable[leftoverfile] assign[=] binary_operation[call[call[name[bedfile].split, parameter[constant[.]]]][constant[0]] + constant[.leftover.bed]] variable[leftoverbed] assign[=] call[name[Bed], parameter[]] call[name[leftoverbed].extend, parameter[name[notselected]]] call[name[leftoverbed].print_to_file, parameter[name[leftoverfile]]] call[name[logging].debug, parameter[call[constant[Imported: {0}, Exported: {1}].format, parameter[call[name[len], parameter[name[bed]]], call[name[len], parameter[name[newbed]]]]]]] return[name[uniqbedfile]]
keyword[def] identifier[uniq] ( identifier[args] ): literal[string] keyword[from] identifier[jcvi] . identifier[formats] . identifier[sizes] keyword[import] identifier[Sizes] identifier[p] = identifier[OptionParser] ( identifier[uniq] . identifier[__doc__] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[help] = literal[string] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[string] , identifier[choices] =( literal[string] , literal[string] ), identifier[help] = literal[string] ) identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] ) keyword[if] identifier[len] ( identifier[args] )!= literal[int] : identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ()) identifier[bedfile] ,= identifier[args] identifier[uniqbedfile] = identifier[bedfile] . identifier[split] ( literal[string] )[ literal[int] ]+ literal[string] identifier[bed] = identifier[Bed] ( identifier[bedfile] ) keyword[if] identifier[opts] . identifier[sizes] : identifier[sizes] = identifier[Sizes] ( identifier[opts] . identifier[sizes] ). identifier[mapping] identifier[ranges] =[ identifier[Range] ( identifier[x] . identifier[seqid] , identifier[x] . identifier[start] , identifier[x] . identifier[end] , identifier[sizes] [ identifier[x] . identifier[accn] ], identifier[i] ) keyword[for] identifier[i] , identifier[x] keyword[in] identifier[enumerate] ( identifier[bed] )] keyword[else] : keyword[if] identifier[opts] . identifier[mode] == literal[string] : identifier[ranges] =[ identifier[Range] ( identifier[x] . identifier[seqid] , identifier[x] . identifier[start] , identifier[x] . identifier[end] , identifier[x] . identifier[end] - identifier[x] . identifier[start] + literal[int] , identifier[i] ) keyword[for] identifier[i] , identifier[x] keyword[in] identifier[enumerate] ( identifier[bed] )] keyword[else] : identifier[ranges] =[ identifier[Range] ( identifier[x] . identifier[seqid] , identifier[x] . identifier[start] , identifier[x] . identifier[end] , identifier[float] ( identifier[x] . identifier[score] ), identifier[i] ) keyword[for] identifier[i] , identifier[x] keyword[in] identifier[enumerate] ( identifier[bed] )] identifier[selected] , identifier[score] = identifier[range_chain] ( identifier[ranges] ) identifier[selected] =[ identifier[x] . identifier[id] keyword[for] identifier[x] keyword[in] identifier[selected] ] identifier[selected_ids] = identifier[set] ( identifier[selected] ) identifier[selected] =[ identifier[bed] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[selected] ] identifier[notselected] =[ identifier[x] keyword[for] identifier[i] , identifier[x] keyword[in] identifier[enumerate] ( identifier[bed] ) keyword[if] identifier[i] keyword[not] keyword[in] identifier[selected_ids] ] identifier[newbed] = identifier[Bed] () identifier[newbed] . identifier[extend] ( identifier[selected] ) identifier[newbed] . identifier[print_to_file] ( identifier[uniqbedfile] , identifier[sorted] = keyword[True] ) keyword[if] identifier[notselected] : identifier[leftoverfile] = identifier[bedfile] . identifier[split] ( literal[string] )[ literal[int] ]+ literal[string] identifier[leftoverbed] = identifier[Bed] () identifier[leftoverbed] . identifier[extend] ( identifier[notselected] ) identifier[leftoverbed] . identifier[print_to_file] ( identifier[leftoverfile] , identifier[sorted] = keyword[True] ) identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[bed] ), identifier[len] ( identifier[newbed] ))) keyword[return] identifier[uniqbedfile]
def uniq(args): """ %prog uniq bedfile Remove overlapping features with higher scores. """ from jcvi.formats.sizes import Sizes p = OptionParser(uniq.__doc__) p.add_option('--sizes', help='Use sequence length as score') p.add_option('--mode', default='span', choices=('span', 'score'), help='Pile mode') (opts, args) = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) # depends on [control=['if'], data=[]] (bedfile,) = args uniqbedfile = bedfile.split('.')[0] + '.uniq.bed' bed = Bed(bedfile) if opts.sizes: sizes = Sizes(opts.sizes).mapping ranges = [Range(x.seqid, x.start, x.end, sizes[x.accn], i) for (i, x) in enumerate(bed)] # depends on [control=['if'], data=[]] elif opts.mode == 'span': ranges = [Range(x.seqid, x.start, x.end, x.end - x.start + 1, i) for (i, x) in enumerate(bed)] # depends on [control=['if'], data=[]] else: ranges = [Range(x.seqid, x.start, x.end, float(x.score), i) for (i, x) in enumerate(bed)] (selected, score) = range_chain(ranges) selected = [x.id for x in selected] selected_ids = set(selected) selected = [bed[x] for x in selected] notselected = [x for (i, x) in enumerate(bed) if i not in selected_ids] newbed = Bed() newbed.extend(selected) newbed.print_to_file(uniqbedfile, sorted=True) if notselected: leftoverfile = bedfile.split('.')[0] + '.leftover.bed' leftoverbed = Bed() leftoverbed.extend(notselected) leftoverbed.print_to_file(leftoverfile, sorted=True) # depends on [control=['if'], data=[]] logging.debug('Imported: {0}, Exported: {1}'.format(len(bed), len(newbed))) return uniqbedfile
def sign(self, message): """Signs a message. Args: message: bytes, Message to be signed. Returns: string, The signature of the message for the given key. """ message = _helpers._to_bytes(message, encoding='utf-8') return rsa.pkcs1.sign(message, self._key, 'SHA-256')
def function[sign, parameter[self, message]]: constant[Signs a message. Args: message: bytes, Message to be signed. Returns: string, The signature of the message for the given key. ] variable[message] assign[=] call[name[_helpers]._to_bytes, parameter[name[message]]] return[call[name[rsa].pkcs1.sign, parameter[name[message], name[self]._key, constant[SHA-256]]]]
keyword[def] identifier[sign] ( identifier[self] , identifier[message] ): literal[string] identifier[message] = identifier[_helpers] . identifier[_to_bytes] ( identifier[message] , identifier[encoding] = literal[string] ) keyword[return] identifier[rsa] . identifier[pkcs1] . identifier[sign] ( identifier[message] , identifier[self] . identifier[_key] , literal[string] )
def sign(self, message): """Signs a message. Args: message: bytes, Message to be signed. Returns: string, The signature of the message for the given key. """ message = _helpers._to_bytes(message, encoding='utf-8') return rsa.pkcs1.sign(message, self._key, 'SHA-256')
def getStatus(self): """Returns the charger's charge status, as a string""" command = '$GS' status = self.sendCommand(command) return states[int(status[1])]
def function[getStatus, parameter[self]]: constant[Returns the charger's charge status, as a string] variable[command] assign[=] constant[$GS] variable[status] assign[=] call[name[self].sendCommand, parameter[name[command]]] return[call[name[states]][call[name[int], parameter[call[name[status]][constant[1]]]]]]
keyword[def] identifier[getStatus] ( identifier[self] ): literal[string] identifier[command] = literal[string] identifier[status] = identifier[self] . identifier[sendCommand] ( identifier[command] ) keyword[return] identifier[states] [ identifier[int] ( identifier[status] [ literal[int] ])]
def getStatus(self): """Returns the charger's charge status, as a string""" command = '$GS' status = self.sendCommand(command) return states[int(status[1])]
def get_parent(self): """Returns the parent :obj:`Gtk.TreeModelRow` or htis row or None""" parent_iter = self.model.iter_parent(self.iter) if parent_iter: return TreeModelRow(self.model, parent_iter)
def function[get_parent, parameter[self]]: constant[Returns the parent :obj:`Gtk.TreeModelRow` or htis row or None] variable[parent_iter] assign[=] call[name[self].model.iter_parent, parameter[name[self].iter]] if name[parent_iter] begin[:] return[call[name[TreeModelRow], parameter[name[self].model, name[parent_iter]]]]
keyword[def] identifier[get_parent] ( identifier[self] ): literal[string] identifier[parent_iter] = identifier[self] . identifier[model] . identifier[iter_parent] ( identifier[self] . identifier[iter] ) keyword[if] identifier[parent_iter] : keyword[return] identifier[TreeModelRow] ( identifier[self] . identifier[model] , identifier[parent_iter] )
def get_parent(self): """Returns the parent :obj:`Gtk.TreeModelRow` or htis row or None""" parent_iter = self.model.iter_parent(self.iter) if parent_iter: return TreeModelRow(self.model, parent_iter) # depends on [control=['if'], data=[]]
def pipe_strconcat(context=None, _INPUT=None, conf=None, **kwargs): """A string module that builds a string. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipeforever pipe or an iterable of items conf : { 'part': [ {'value': '<img src="'}, {'subkey': 'img.src'}, {'value': '">'} ] } Returns ------- _OUTPUT : generator of joined strings """ splits = get_splits(_INPUT, conf['part'], **cdicts(opts, kwargs)) _OUTPUT = starmap(parse_result, splits) return _OUTPUT
def function[pipe_strconcat, parameter[context, _INPUT, conf]]: constant[A string module that builds a string. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipeforever pipe or an iterable of items conf : { 'part': [ {'value': '<img src="'}, {'subkey': 'img.src'}, {'value': '">'} ] } Returns ------- _OUTPUT : generator of joined strings ] variable[splits] assign[=] call[name[get_splits], parameter[name[_INPUT], call[name[conf]][constant[part]]]] variable[_OUTPUT] assign[=] call[name[starmap], parameter[name[parse_result], name[splits]]] return[name[_OUTPUT]]
keyword[def] identifier[pipe_strconcat] ( identifier[context] = keyword[None] , identifier[_INPUT] = keyword[None] , identifier[conf] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[splits] = identifier[get_splits] ( identifier[_INPUT] , identifier[conf] [ literal[string] ],** identifier[cdicts] ( identifier[opts] , identifier[kwargs] )) identifier[_OUTPUT] = identifier[starmap] ( identifier[parse_result] , identifier[splits] ) keyword[return] identifier[_OUTPUT]
def pipe_strconcat(context=None, _INPUT=None, conf=None, **kwargs): """A string module that builds a string. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipeforever pipe or an iterable of items conf : { 'part': [ {'value': '<img src="'}, {'subkey': 'img.src'}, {'value': '">'} ] } Returns ------- _OUTPUT : generator of joined strings """ splits = get_splits(_INPUT, conf['part'], **cdicts(opts, kwargs)) _OUTPUT = starmap(parse_result, splits) return _OUTPUT
def _on_close(self, socket): """ Called when the connection was closed. """ self.logger.debug('Connection closed.') for subscription in self.subscriptions.values(): if subscription.state == 'subscribed': subscription.state = 'connection_pending'
def function[_on_close, parameter[self, socket]]: constant[ Called when the connection was closed. ] call[name[self].logger.debug, parameter[constant[Connection closed.]]] for taget[name[subscription]] in starred[call[name[self].subscriptions.values, parameter[]]] begin[:] if compare[name[subscription].state equal[==] constant[subscribed]] begin[:] name[subscription].state assign[=] constant[connection_pending]
keyword[def] identifier[_on_close] ( identifier[self] , identifier[socket] ): literal[string] identifier[self] . identifier[logger] . identifier[debug] ( literal[string] ) keyword[for] identifier[subscription] keyword[in] identifier[self] . identifier[subscriptions] . identifier[values] (): keyword[if] identifier[subscription] . identifier[state] == literal[string] : identifier[subscription] . identifier[state] = literal[string]
def _on_close(self, socket): """ Called when the connection was closed. """ self.logger.debug('Connection closed.') for subscription in self.subscriptions.values(): if subscription.state == 'subscribed': subscription.state = 'connection_pending' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['subscription']]
def get_grade_system_metadata(self): """Gets the metadata for a grading system. return: (osid.Metadata) - metadata for the grade system *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['grade_system']) metadata.update({'existing_id_values': self._my_map['gradeSystemId']}) return Metadata(**metadata)
def function[get_grade_system_metadata, parameter[self]]: constant[Gets the metadata for a grading system. return: (osid.Metadata) - metadata for the grade system *compliance: mandatory -- This method must be implemented.* ] variable[metadata] assign[=] call[name[dict], parameter[call[name[self]._mdata][constant[grade_system]]]] call[name[metadata].update, parameter[dictionary[[<ast.Constant object at 0x7da20c7cbe80>], [<ast.Subscript object at 0x7da20c7cafb0>]]]] return[call[name[Metadata], parameter[]]]
keyword[def] identifier[get_grade_system_metadata] ( identifier[self] ): literal[string] identifier[metadata] = identifier[dict] ( identifier[self] . identifier[_mdata] [ literal[string] ]) identifier[metadata] . identifier[update] ({ literal[string] : identifier[self] . identifier[_my_map] [ literal[string] ]}) keyword[return] identifier[Metadata] (** identifier[metadata] )
def get_grade_system_metadata(self): """Gets the metadata for a grading system. return: (osid.Metadata) - metadata for the grade system *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['grade_system']) metadata.update({'existing_id_values': self._my_map['gradeSystemId']}) return Metadata(**metadata)
def from_file(cls, fpath, position=1, file_id=None): """ Convience method to create a kappa file object from a file on disk Inputs ------ fpath -- path to the file on disk position -- (default 1) rank among all files of the model while parsing see FileMetadata file_id -- (default = fpath) the file_id that will be used by kappa. """ if file_id is None: file_id = fpath with open(fpath) as f: code = f.read() file_content = str(code) file_metadata = FileMetadata(file_id, position) return cls(file_metadata, file_content)
def function[from_file, parameter[cls, fpath, position, file_id]]: constant[ Convience method to create a kappa file object from a file on disk Inputs ------ fpath -- path to the file on disk position -- (default 1) rank among all files of the model while parsing see FileMetadata file_id -- (default = fpath) the file_id that will be used by kappa. ] if compare[name[file_id] is constant[None]] begin[:] variable[file_id] assign[=] name[fpath] with call[name[open], parameter[name[fpath]]] begin[:] variable[code] assign[=] call[name[f].read, parameter[]] variable[file_content] assign[=] call[name[str], parameter[name[code]]] variable[file_metadata] assign[=] call[name[FileMetadata], parameter[name[file_id], name[position]]] return[call[name[cls], parameter[name[file_metadata], name[file_content]]]]
keyword[def] identifier[from_file] ( identifier[cls] , identifier[fpath] , identifier[position] = literal[int] , identifier[file_id] = keyword[None] ): literal[string] keyword[if] identifier[file_id] keyword[is] keyword[None] : identifier[file_id] = identifier[fpath] keyword[with] identifier[open] ( identifier[fpath] ) keyword[as] identifier[f] : identifier[code] = identifier[f] . identifier[read] () identifier[file_content] = identifier[str] ( identifier[code] ) identifier[file_metadata] = identifier[FileMetadata] ( identifier[file_id] , identifier[position] ) keyword[return] identifier[cls] ( identifier[file_metadata] , identifier[file_content] )
def from_file(cls, fpath, position=1, file_id=None): """ Convience method to create a kappa file object from a file on disk Inputs ------ fpath -- path to the file on disk position -- (default 1) rank among all files of the model while parsing see FileMetadata file_id -- (default = fpath) the file_id that will be used by kappa. """ if file_id is None: file_id = fpath # depends on [control=['if'], data=['file_id']] with open(fpath) as f: code = f.read() file_content = str(code) file_metadata = FileMetadata(file_id, position) return cls(file_metadata, file_content) # depends on [control=['with'], data=['f']]
def iterative_plane_errors(axes,covariance_matrix, **kwargs): """ An iterative version of `pca.plane_errors`, which computes an error surface for a plane. """ sheet = kwargs.pop('sheet','upper') level = kwargs.pop('level',1) n = kwargs.pop('n',100) cov = N.sqrt(N.diagonal(covariance_matrix)) u = N.linspace(0, 2*N.pi, n) scales = dict(upper=1,lower=-1,nominal=0) c1 = scales[sheet] c1 *= -1 # We assume upper hemisphere if axes[2,2] < 0: c1 *= -1 def sdot(a,b): return sum([i*j for i,j in zip(a,b)]) def step_func(a): e = [ N.cos(a)*cov[0], N.sin(a)*cov[1], c1*cov[2]] d = [sdot(e,i) for i in axes.T] x,y,z = d[2],d[0],d[1] r = N.sqrt(x**2 + y**2 + z**2) lat = N.arcsin(z/r) lon = N.arctan2(y, x) return lon,lat # Get a bundle of vectors defining # a full rotation around the unit circle return N.array([step_func(i) for i in u])
def function[iterative_plane_errors, parameter[axes, covariance_matrix]]: constant[ An iterative version of `pca.plane_errors`, which computes an error surface for a plane. ] variable[sheet] assign[=] call[name[kwargs].pop, parameter[constant[sheet], constant[upper]]] variable[level] assign[=] call[name[kwargs].pop, parameter[constant[level], constant[1]]] variable[n] assign[=] call[name[kwargs].pop, parameter[constant[n], constant[100]]] variable[cov] assign[=] call[name[N].sqrt, parameter[call[name[N].diagonal, parameter[name[covariance_matrix]]]]] variable[u] assign[=] call[name[N].linspace, parameter[constant[0], binary_operation[constant[2] * name[N].pi], name[n]]] variable[scales] assign[=] call[name[dict], parameter[]] variable[c1] assign[=] call[name[scales]][name[sheet]] <ast.AugAssign object at 0x7da1b185c910> if compare[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b185ec20>, <ast.Constant object at 0x7da1b185c8b0>]]] less[<] constant[0]] begin[:] <ast.AugAssign object at 0x7da1b185ece0> def function[sdot, parameter[a, b]]: return[call[name[sum], parameter[<ast.ListComp object at 0x7da1b185d480>]]] def function[step_func, parameter[a]]: variable[e] assign[=] list[[<ast.BinOp object at 0x7da1b185fee0>, <ast.BinOp object at 0x7da1b185d7b0>, <ast.BinOp object at 0x7da1b185e0e0>]] variable[d] assign[=] <ast.ListComp object at 0x7da1b185cd00> <ast.Tuple object at 0x7da1b185d720> assign[=] tuple[[<ast.Subscript object at 0x7da1b185ea40>, <ast.Subscript object at 0x7da1b185c5e0>, <ast.Subscript object at 0x7da1b185d060>]] variable[r] assign[=] call[name[N].sqrt, parameter[binary_operation[binary_operation[binary_operation[name[x] ** constant[2]] + binary_operation[name[y] ** constant[2]]] + binary_operation[name[z] ** constant[2]]]]] variable[lat] assign[=] call[name[N].arcsin, parameter[binary_operation[name[z] / name[r]]]] variable[lon] assign[=] call[name[N].arctan2, parameter[name[y], name[x]]] return[tuple[[<ast.Name object at 0x7da1b185e5f0>, <ast.Name object at 0x7da1b185c6d0>]]] return[call[name[N].array, parameter[<ast.ListComp object at 0x7da1b18c0610>]]]
keyword[def] identifier[iterative_plane_errors] ( identifier[axes] , identifier[covariance_matrix] ,** identifier[kwargs] ): literal[string] identifier[sheet] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] ) identifier[level] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[int] ) identifier[n] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[int] ) identifier[cov] = identifier[N] . identifier[sqrt] ( identifier[N] . identifier[diagonal] ( identifier[covariance_matrix] )) identifier[u] = identifier[N] . identifier[linspace] ( literal[int] , literal[int] * identifier[N] . identifier[pi] , identifier[n] ) identifier[scales] = identifier[dict] ( identifier[upper] = literal[int] , identifier[lower] =- literal[int] , identifier[nominal] = literal[int] ) identifier[c1] = identifier[scales] [ identifier[sheet] ] identifier[c1] *=- literal[int] keyword[if] identifier[axes] [ literal[int] , literal[int] ]< literal[int] : identifier[c1] *=- literal[int] keyword[def] identifier[sdot] ( identifier[a] , identifier[b] ): keyword[return] identifier[sum] ([ identifier[i] * identifier[j] keyword[for] identifier[i] , identifier[j] keyword[in] identifier[zip] ( identifier[a] , identifier[b] )]) keyword[def] identifier[step_func] ( identifier[a] ): identifier[e] =[ identifier[N] . identifier[cos] ( identifier[a] )* identifier[cov] [ literal[int] ], identifier[N] . identifier[sin] ( identifier[a] )* identifier[cov] [ literal[int] ], identifier[c1] * identifier[cov] [ literal[int] ]] identifier[d] =[ identifier[sdot] ( identifier[e] , identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[axes] . identifier[T] ] identifier[x] , identifier[y] , identifier[z] = identifier[d] [ literal[int] ], identifier[d] [ literal[int] ], identifier[d] [ literal[int] ] identifier[r] = identifier[N] . identifier[sqrt] ( identifier[x] ** literal[int] + identifier[y] ** literal[int] + identifier[z] ** literal[int] ) identifier[lat] = identifier[N] . identifier[arcsin] ( identifier[z] / identifier[r] ) identifier[lon] = identifier[N] . identifier[arctan2] ( identifier[y] , identifier[x] ) keyword[return] identifier[lon] , identifier[lat] keyword[return] identifier[N] . identifier[array] ([ identifier[step_func] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[u] ])
def iterative_plane_errors(axes, covariance_matrix, **kwargs): """ An iterative version of `pca.plane_errors`, which computes an error surface for a plane. """ sheet = kwargs.pop('sheet', 'upper') level = kwargs.pop('level', 1) n = kwargs.pop('n', 100) cov = N.sqrt(N.diagonal(covariance_matrix)) u = N.linspace(0, 2 * N.pi, n) scales = dict(upper=1, lower=-1, nominal=0) c1 = scales[sheet] c1 *= -1 # We assume upper hemisphere if axes[2, 2] < 0: c1 *= -1 # depends on [control=['if'], data=[]] def sdot(a, b): return sum([i * j for (i, j) in zip(a, b)]) def step_func(a): e = [N.cos(a) * cov[0], N.sin(a) * cov[1], c1 * cov[2]] d = [sdot(e, i) for i in axes.T] (x, y, z) = (d[2], d[0], d[1]) r = N.sqrt(x ** 2 + y ** 2 + z ** 2) lat = N.arcsin(z / r) lon = N.arctan2(y, x) return (lon, lat) # Get a bundle of vectors defining # a full rotation around the unit circle return N.array([step_func(i) for i in u])
def _statsd_address(self): """Return a tuple of host and port for the statsd server to send stats to. :return: tuple(host, port) """ return (self.application.settings.get('statsd', {}).get('host', self.STATSD_HOST), self.application.settings.get('statsd', {}).get('port', self.STATSD_PORT))
def function[_statsd_address, parameter[self]]: constant[Return a tuple of host and port for the statsd server to send stats to. :return: tuple(host, port) ] return[tuple[[<ast.Call object at 0x7da204623910>, <ast.Call object at 0x7da2046217b0>]]]
keyword[def] identifier[_statsd_address] ( identifier[self] ): literal[string] keyword[return] ( identifier[self] . identifier[application] . identifier[settings] . identifier[get] ( literal[string] , {}). identifier[get] ( literal[string] , identifier[self] . identifier[STATSD_HOST] ), identifier[self] . identifier[application] . identifier[settings] . identifier[get] ( literal[string] , {}). identifier[get] ( literal[string] , identifier[self] . identifier[STATSD_PORT] ))
def _statsd_address(self): """Return a tuple of host and port for the statsd server to send stats to. :return: tuple(host, port) """ return (self.application.settings.get('statsd', {}).get('host', self.STATSD_HOST), self.application.settings.get('statsd', {}).get('port', self.STATSD_PORT))
def match_url(self, request): """ match url determines if this is selected """ matched = False if self.exact_url: if re.match("%s$" % (self.url,), request.path): matched = True elif re.match("%s" % self.url, request.path): matched = True return matched
def function[match_url, parameter[self, request]]: constant[ match url determines if this is selected ] variable[matched] assign[=] constant[False] if name[self].exact_url begin[:] if call[name[re].match, parameter[binary_operation[constant[%s$] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b050ab30>]]], name[request].path]] begin[:] variable[matched] assign[=] constant[True] return[name[matched]]
keyword[def] identifier[match_url] ( identifier[self] , identifier[request] ): literal[string] identifier[matched] = keyword[False] keyword[if] identifier[self] . identifier[exact_url] : keyword[if] identifier[re] . identifier[match] ( literal[string] %( identifier[self] . identifier[url] ,), identifier[request] . identifier[path] ): identifier[matched] = keyword[True] keyword[elif] identifier[re] . identifier[match] ( literal[string] % identifier[self] . identifier[url] , identifier[request] . identifier[path] ): identifier[matched] = keyword[True] keyword[return] identifier[matched]
def match_url(self, request): """ match url determines if this is selected """ matched = False if self.exact_url: if re.match('%s$' % (self.url,), request.path): matched = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif re.match('%s' % self.url, request.path): matched = True # depends on [control=['if'], data=[]] return matched
def returns_cumulative(returns, geometric=True, expanding=False): """ return the cumulative return Parameters ---------- returns : DataFrame or Series geometric : bool, default is True If True, geometrically link returns expanding : bool default is False If True, return expanding series/frame of returns If False, return the final value(s) """ if expanding: if geometric: return (1. + returns).cumprod() - 1. else: return returns.cumsum() else: if geometric: return (1. + returns).prod() - 1. else: return returns.sum()
def function[returns_cumulative, parameter[returns, geometric, expanding]]: constant[ return the cumulative return Parameters ---------- returns : DataFrame or Series geometric : bool, default is True If True, geometrically link returns expanding : bool default is False If True, return expanding series/frame of returns If False, return the final value(s) ] if name[expanding] begin[:] if name[geometric] begin[:] return[binary_operation[call[binary_operation[constant[1.0] + name[returns]].cumprod, parameter[]] - constant[1.0]]]
keyword[def] identifier[returns_cumulative] ( identifier[returns] , identifier[geometric] = keyword[True] , identifier[expanding] = keyword[False] ): literal[string] keyword[if] identifier[expanding] : keyword[if] identifier[geometric] : keyword[return] ( literal[int] + identifier[returns] ). identifier[cumprod] ()- literal[int] keyword[else] : keyword[return] identifier[returns] . identifier[cumsum] () keyword[else] : keyword[if] identifier[geometric] : keyword[return] ( literal[int] + identifier[returns] ). identifier[prod] ()- literal[int] keyword[else] : keyword[return] identifier[returns] . identifier[sum] ()
def returns_cumulative(returns, geometric=True, expanding=False): """ return the cumulative return Parameters ---------- returns : DataFrame or Series geometric : bool, default is True If True, geometrically link returns expanding : bool default is False If True, return expanding series/frame of returns If False, return the final value(s) """ if expanding: if geometric: return (1.0 + returns).cumprod() - 1.0 # depends on [control=['if'], data=[]] else: return returns.cumsum() # depends on [control=['if'], data=[]] elif geometric: return (1.0 + returns).prod() - 1.0 # depends on [control=['if'], data=[]] else: return returns.sum()
def _get_substitute_element(head, elt, ps): '''if elt matches a member of the head substitutionGroup, return the GED typecode. head -- ElementDeclaration typecode, elt -- the DOM element being parsed ps -- ParsedSoap Instance ''' if not isinstance(head, ElementDeclaration): return None return ElementDeclaration.getSubstitutionElement(head, elt, ps)
def function[_get_substitute_element, parameter[head, elt, ps]]: constant[if elt matches a member of the head substitutionGroup, return the GED typecode. head -- ElementDeclaration typecode, elt -- the DOM element being parsed ps -- ParsedSoap Instance ] if <ast.UnaryOp object at 0x7da20c6c62f0> begin[:] return[constant[None]] return[call[name[ElementDeclaration].getSubstitutionElement, parameter[name[head], name[elt], name[ps]]]]
keyword[def] identifier[_get_substitute_element] ( identifier[head] , identifier[elt] , identifier[ps] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[head] , identifier[ElementDeclaration] ): keyword[return] keyword[None] keyword[return] identifier[ElementDeclaration] . identifier[getSubstitutionElement] ( identifier[head] , identifier[elt] , identifier[ps] )
def _get_substitute_element(head, elt, ps): """if elt matches a member of the head substitutionGroup, return the GED typecode. head -- ElementDeclaration typecode, elt -- the DOM element being parsed ps -- ParsedSoap Instance """ if not isinstance(head, ElementDeclaration): return None # depends on [control=['if'], data=[]] return ElementDeclaration.getSubstitutionElement(head, elt, ps)
def channel_names(self, usecols=None): """Attempt to extract the channel names from the data file. Return a list with names. Return None on failed attempt. usecols: A list with columns to use. If present, the returned list will include only names for columns requested. It will align with the columns returned by numpys loadtxt by using the same keyword (usecols). """ # Search from [rts - 1] and up (last row before data). Split respective # row on datdel. Accept consecutive elements starting with alphas # character after strip. If the count of elements equals the data count # on row rts + 1, accept it as the channel names. if self.decdel == '.': datcnt = self.matches_p[self.rts] elif self.decdel == ',': datcnt = self.matches_c[self.rts] if usecols and max(usecols) >= datcnt: mess = ' Max column index is ' raise IndexError(str(usecols) + mess + str(datcnt - 1)) names = None if not self.rts: # Only data. return None # From last row before data and up. for row in self.rows[self.rts - 1::-1]: # datdel might be None, (whitespace) splitlist = row.split(self.datdel) for i, word in enumerate(splitlist): if not word.strip().startswith(ALPHAS): break elif i + 1 == datcnt: # Accept names = [ch.strip() for ch in splitlist[:datcnt]] break if names: break if usecols: names = [names[i] for i in sorted(usecols)] return names
def function[channel_names, parameter[self, usecols]]: constant[Attempt to extract the channel names from the data file. Return a list with names. Return None on failed attempt. usecols: A list with columns to use. If present, the returned list will include only names for columns requested. It will align with the columns returned by numpys loadtxt by using the same keyword (usecols). ] if compare[name[self].decdel equal[==] constant[.]] begin[:] variable[datcnt] assign[=] call[name[self].matches_p][name[self].rts] if <ast.BoolOp object at 0x7da20c76d300> begin[:] variable[mess] assign[=] constant[ Max column index is ] <ast.Raise object at 0x7da20c76ca00> variable[names] assign[=] constant[None] if <ast.UnaryOp object at 0x7da20c76f2e0> begin[:] return[constant[None]] for taget[name[row]] in starred[call[name[self].rows][<ast.Slice object at 0x7da20c76cfd0>]] begin[:] variable[splitlist] assign[=] call[name[row].split, parameter[name[self].datdel]] for taget[tuple[[<ast.Name object at 0x7da20c76c340>, <ast.Name object at 0x7da20c76db10>]]] in starred[call[name[enumerate], parameter[name[splitlist]]]] begin[:] if <ast.UnaryOp object at 0x7da20c76dba0> begin[:] break if name[names] begin[:] break if name[usecols] begin[:] variable[names] assign[=] <ast.ListComp object at 0x7da20c76e230> return[name[names]]
keyword[def] identifier[channel_names] ( identifier[self] , identifier[usecols] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[decdel] == literal[string] : identifier[datcnt] = identifier[self] . identifier[matches_p] [ identifier[self] . identifier[rts] ] keyword[elif] identifier[self] . identifier[decdel] == literal[string] : identifier[datcnt] = identifier[self] . identifier[matches_c] [ identifier[self] . identifier[rts] ] keyword[if] identifier[usecols] keyword[and] identifier[max] ( identifier[usecols] )>= identifier[datcnt] : identifier[mess] = literal[string] keyword[raise] identifier[IndexError] ( identifier[str] ( identifier[usecols] )+ identifier[mess] + identifier[str] ( identifier[datcnt] - literal[int] )) identifier[names] = keyword[None] keyword[if] keyword[not] identifier[self] . identifier[rts] : keyword[return] keyword[None] keyword[for] identifier[row] keyword[in] identifier[self] . identifier[rows] [ identifier[self] . identifier[rts] - literal[int] ::- literal[int] ]: identifier[splitlist] = identifier[row] . identifier[split] ( identifier[self] . identifier[datdel] ) keyword[for] identifier[i] , identifier[word] keyword[in] identifier[enumerate] ( identifier[splitlist] ): keyword[if] keyword[not] identifier[word] . identifier[strip] (). identifier[startswith] ( identifier[ALPHAS] ): keyword[break] keyword[elif] identifier[i] + literal[int] == identifier[datcnt] : identifier[names] =[ identifier[ch] . identifier[strip] () keyword[for] identifier[ch] keyword[in] identifier[splitlist] [: identifier[datcnt] ]] keyword[break] keyword[if] identifier[names] : keyword[break] keyword[if] identifier[usecols] : identifier[names] =[ identifier[names] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[sorted] ( identifier[usecols] )] keyword[return] identifier[names]
def channel_names(self, usecols=None): """Attempt to extract the channel names from the data file. Return a list with names. Return None on failed attempt. usecols: A list with columns to use. If present, the returned list will include only names for columns requested. It will align with the columns returned by numpys loadtxt by using the same keyword (usecols). """ # Search from [rts - 1] and up (last row before data). Split respective # row on datdel. Accept consecutive elements starting with alphas # character after strip. If the count of elements equals the data count # on row rts + 1, accept it as the channel names. if self.decdel == '.': datcnt = self.matches_p[self.rts] # depends on [control=['if'], data=[]] elif self.decdel == ',': datcnt = self.matches_c[self.rts] # depends on [control=['if'], data=[]] if usecols and max(usecols) >= datcnt: mess = ' Max column index is ' raise IndexError(str(usecols) + mess + str(datcnt - 1)) # depends on [control=['if'], data=[]] names = None if not self.rts: # Only data. return None # depends on [control=['if'], data=[]] # From last row before data and up. for row in self.rows[self.rts - 1::-1]: # datdel might be None, (whitespace) splitlist = row.split(self.datdel) for (i, word) in enumerate(splitlist): if not word.strip().startswith(ALPHAS): break # depends on [control=['if'], data=[]] elif i + 1 == datcnt: # Accept names = [ch.strip() for ch in splitlist[:datcnt]] break # depends on [control=['if'], data=['datcnt']] # depends on [control=['for'], data=[]] if names: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']] if usecols: names = [names[i] for i in sorted(usecols)] # depends on [control=['if'], data=[]] return names
def merge( left, right, how="inner", on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=("_x", "_y"), copy=True, indicator=False, validate=None, ): """Database style join, where common columns in "on" are merged. Args: left: DataFrame. right: DataFrame. how: What type of join to use. on: The common column name(s) to join on. If None, and left_on and right_on are also None, will default to all commonly named columns. left_on: The column(s) on the left to use for the join. right_on: The column(s) on the right to use for the join. left_index: Use the index from the left as the join keys. right_index: Use the index from the right as the join keys. sort: Sort the join keys lexicographically in the result. suffixes: Add this suffix to the common names not in the "on". copy: Does nothing in our implementation indicator: Adds a column named _merge to the DataFrame with metadata from the merge about each row. validate: Checks if merge is a specific type. Returns: A merged Dataframe """ if not isinstance(left, DataFrame): raise ValueError( "can not merge DataFrame with instance of type {}".format(type(right)) ) return left.merge( right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy, indicator=indicator, validate=validate, )
def function[merge, parameter[left, right, how, on, left_on, right_on, left_index, right_index, sort, suffixes, copy, indicator, validate]]: constant[Database style join, where common columns in "on" are merged. Args: left: DataFrame. right: DataFrame. how: What type of join to use. on: The common column name(s) to join on. If None, and left_on and right_on are also None, will default to all commonly named columns. left_on: The column(s) on the left to use for the join. right_on: The column(s) on the right to use for the join. left_index: Use the index from the left as the join keys. right_index: Use the index from the right as the join keys. sort: Sort the join keys lexicographically in the result. suffixes: Add this suffix to the common names not in the "on". copy: Does nothing in our implementation indicator: Adds a column named _merge to the DataFrame with metadata from the merge about each row. validate: Checks if merge is a specific type. Returns: A merged Dataframe ] if <ast.UnaryOp object at 0x7da207f00fd0> begin[:] <ast.Raise object at 0x7da207f03a90> return[call[name[left].merge, parameter[name[right]]]]
keyword[def] identifier[merge] ( identifier[left] , identifier[right] , identifier[how] = literal[string] , identifier[on] = keyword[None] , identifier[left_on] = keyword[None] , identifier[right_on] = keyword[None] , identifier[left_index] = keyword[False] , identifier[right_index] = keyword[False] , identifier[sort] = keyword[False] , identifier[suffixes] =( literal[string] , literal[string] ), identifier[copy] = keyword[True] , identifier[indicator] = keyword[False] , identifier[validate] = keyword[None] , ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[left] , identifier[DataFrame] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[right] )) ) keyword[return] identifier[left] . identifier[merge] ( identifier[right] , identifier[how] = identifier[how] , identifier[on] = identifier[on] , identifier[left_on] = identifier[left_on] , identifier[right_on] = identifier[right_on] , identifier[left_index] = identifier[left_index] , identifier[right_index] = identifier[right_index] , identifier[sort] = identifier[sort] , identifier[suffixes] = identifier[suffixes] , identifier[copy] = identifier[copy] , identifier[indicator] = identifier[indicator] , identifier[validate] = identifier[validate] , )
def merge(left, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None): """Database style join, where common columns in "on" are merged. Args: left: DataFrame. right: DataFrame. how: What type of join to use. on: The common column name(s) to join on. If None, and left_on and right_on are also None, will default to all commonly named columns. left_on: The column(s) on the left to use for the join. right_on: The column(s) on the right to use for the join. left_index: Use the index from the left as the join keys. right_index: Use the index from the right as the join keys. sort: Sort the join keys lexicographically in the result. suffixes: Add this suffix to the common names not in the "on". copy: Does nothing in our implementation indicator: Adds a column named _merge to the DataFrame with metadata from the merge about each row. validate: Checks if merge is a specific type. Returns: A merged Dataframe """ if not isinstance(left, DataFrame): raise ValueError('can not merge DataFrame with instance of type {}'.format(type(right))) # depends on [control=['if'], data=[]] return left.merge(right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy, indicator=indicator, validate=validate)
def remove_event_detect(self, pin): """Remove edge detection for a particular GPIO channel. Pin should be type IN. """ self.mraa_gpio.Gpio.isrExit(self.mraa_gpio.Gpio(pin))
def function[remove_event_detect, parameter[self, pin]]: constant[Remove edge detection for a particular GPIO channel. Pin should be type IN. ] call[name[self].mraa_gpio.Gpio.isrExit, parameter[call[name[self].mraa_gpio.Gpio, parameter[name[pin]]]]]
keyword[def] identifier[remove_event_detect] ( identifier[self] , identifier[pin] ): literal[string] identifier[self] . identifier[mraa_gpio] . identifier[Gpio] . identifier[isrExit] ( identifier[self] . identifier[mraa_gpio] . identifier[Gpio] ( identifier[pin] ))
def remove_event_detect(self, pin): """Remove edge detection for a particular GPIO channel. Pin should be type IN. """ self.mraa_gpio.Gpio.isrExit(self.mraa_gpio.Gpio(pin))
def _send_packet_safe(self, cr, packet): """ Adds 1bit counter to CRTP header to guarantee that no ack (downlink) payload are lost and no uplink packet are duplicated. The caller should resend packet if not acked (ie. same as with a direct call to crazyradio.send_packet) """ # packet = bytearray(packet) packet[0] &= 0xF3 packet[0] |= self._curr_up << 3 | self._curr_down << 2 resp = cr.send_packet(packet) if resp and resp.ack and len(resp.data) and \ (resp.data[0] & 0x04) == (self._curr_down << 2): self._curr_down = 1 - self._curr_down if resp and resp.ack: self._curr_up = 1 - self._curr_up return resp
def function[_send_packet_safe, parameter[self, cr, packet]]: constant[ Adds 1bit counter to CRTP header to guarantee that no ack (downlink) payload are lost and no uplink packet are duplicated. The caller should resend packet if not acked (ie. same as with a direct call to crazyradio.send_packet) ] <ast.AugAssign object at 0x7da1b16284c0> <ast.AugAssign object at 0x7da1b16282b0> variable[resp] assign[=] call[name[cr].send_packet, parameter[name[packet]]] if <ast.BoolOp object at 0x7da1b162aa70> begin[:] name[self]._curr_down assign[=] binary_operation[constant[1] - name[self]._curr_down] if <ast.BoolOp object at 0x7da1b16294b0> begin[:] name[self]._curr_up assign[=] binary_operation[constant[1] - name[self]._curr_up] return[name[resp]]
keyword[def] identifier[_send_packet_safe] ( identifier[self] , identifier[cr] , identifier[packet] ): literal[string] identifier[packet] [ literal[int] ]&= literal[int] identifier[packet] [ literal[int] ]|= identifier[self] . identifier[_curr_up] << literal[int] | identifier[self] . identifier[_curr_down] << literal[int] identifier[resp] = identifier[cr] . identifier[send_packet] ( identifier[packet] ) keyword[if] identifier[resp] keyword[and] identifier[resp] . identifier[ack] keyword[and] identifier[len] ( identifier[resp] . identifier[data] ) keyword[and] ( identifier[resp] . identifier[data] [ literal[int] ]& literal[int] )==( identifier[self] . identifier[_curr_down] << literal[int] ): identifier[self] . identifier[_curr_down] = literal[int] - identifier[self] . identifier[_curr_down] keyword[if] identifier[resp] keyword[and] identifier[resp] . identifier[ack] : identifier[self] . identifier[_curr_up] = literal[int] - identifier[self] . identifier[_curr_up] keyword[return] identifier[resp]
def _send_packet_safe(self, cr, packet): """ Adds 1bit counter to CRTP header to guarantee that no ack (downlink) payload are lost and no uplink packet are duplicated. The caller should resend packet if not acked (ie. same as with a direct call to crazyradio.send_packet) """ # packet = bytearray(packet) packet[0] &= 243 packet[0] |= self._curr_up << 3 | self._curr_down << 2 resp = cr.send_packet(packet) if resp and resp.ack and len(resp.data) and (resp.data[0] & 4 == self._curr_down << 2): self._curr_down = 1 - self._curr_down # depends on [control=['if'], data=[]] if resp and resp.ack: self._curr_up = 1 - self._curr_up # depends on [control=['if'], data=[]] return resp
def get_surveys(self): """Gets all surveys in account Args: None Returns: list: a list of all surveys """ payload = { 'Request': 'getSurveys', 'Format': 'JSON' } r = self._session.get(QUALTRICS_URL, params=payload) output = r.json() return output['Result']['Surveys']
def function[get_surveys, parameter[self]]: constant[Gets all surveys in account Args: None Returns: list: a list of all surveys ] variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da2044c0340>, <ast.Constant object at 0x7da2044c3d30>], [<ast.Constant object at 0x7da2044c22c0>, <ast.Constant object at 0x7da2044c2ef0>]] variable[r] assign[=] call[name[self]._session.get, parameter[name[QUALTRICS_URL]]] variable[output] assign[=] call[name[r].json, parameter[]] return[call[call[name[output]][constant[Result]]][constant[Surveys]]]
keyword[def] identifier[get_surveys] ( identifier[self] ): literal[string] identifier[payload] ={ literal[string] : literal[string] , literal[string] : literal[string] } identifier[r] = identifier[self] . identifier[_session] . identifier[get] ( identifier[QUALTRICS_URL] , identifier[params] = identifier[payload] ) identifier[output] = identifier[r] . identifier[json] () keyword[return] identifier[output] [ literal[string] ][ literal[string] ]
def get_surveys(self): """Gets all surveys in account Args: None Returns: list: a list of all surveys """ payload = {'Request': 'getSurveys', 'Format': 'JSON'} r = self._session.get(QUALTRICS_URL, params=payload) output = r.json() return output['Result']['Surveys']
def purge_cache(self, object_type): """ Purge the named cache of all values. If no cache exists for object_type, nothing is done """ if object_type in self.mapping: cache = self.mapping[object_type] log.debug("Purging [{}] cache of {} values.".format(object_type, len(cache))) cache.purge()
def function[purge_cache, parameter[self, object_type]]: constant[ Purge the named cache of all values. If no cache exists for object_type, nothing is done ] if compare[name[object_type] in name[self].mapping] begin[:] variable[cache] assign[=] call[name[self].mapping][name[object_type]] call[name[log].debug, parameter[call[constant[Purging [{}] cache of {} values.].format, parameter[name[object_type], call[name[len], parameter[name[cache]]]]]]] call[name[cache].purge, parameter[]]
keyword[def] identifier[purge_cache] ( identifier[self] , identifier[object_type] ): literal[string] keyword[if] identifier[object_type] keyword[in] identifier[self] . identifier[mapping] : identifier[cache] = identifier[self] . identifier[mapping] [ identifier[object_type] ] identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[object_type] , identifier[len] ( identifier[cache] ))) identifier[cache] . identifier[purge] ()
def purge_cache(self, object_type): """ Purge the named cache of all values. If no cache exists for object_type, nothing is done """ if object_type in self.mapping: cache = self.mapping[object_type] log.debug('Purging [{}] cache of {} values.'.format(object_type, len(cache))) cache.purge() # depends on [control=['if'], data=['object_type']]
def statstable(args): """ %prog statstable *.gff Print gene statistics table. """ p = OptionParser(statstable.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) gff_files = args for metric in metrics: logging.debug("Parsing files in `{0}`..".format(metric)) table = {} for x in gff_files: pf = op.basename(x).split(".")[0] numberfile = op.join(metric, pf + ".txt") ar = [int(x.strip()) for x in open(numberfile)] sum = SummaryStats(ar).todict().items() keys, vals = zip(*sum) keys = [(pf, x) for x in keys] table.update(dict(zip(keys, vals))) print(tabulate(table), file=sys.stderr)
def function[statstable, parameter[args]]: constant[ %prog statstable *.gff Print gene statistics table. ] variable[p] assign[=] call[name[OptionParser], parameter[name[statstable].__doc__]] <ast.Tuple object at 0x7da18bccb130> assign[=] call[name[p].parse_args, parameter[name[args]]] if compare[call[name[len], parameter[name[args]]] less[<] constant[1]] begin[:] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da18bcc8a00>]] variable[gff_files] assign[=] name[args] for taget[name[metric]] in starred[name[metrics]] begin[:] call[name[logging].debug, parameter[call[constant[Parsing files in `{0}`..].format, parameter[name[metric]]]]] variable[table] assign[=] dictionary[[], []] for taget[name[x]] in starred[name[gff_files]] begin[:] variable[pf] assign[=] call[call[call[name[op].basename, parameter[name[x]]].split, parameter[constant[.]]]][constant[0]] variable[numberfile] assign[=] call[name[op].join, parameter[name[metric], binary_operation[name[pf] + constant[.txt]]]] variable[ar] assign[=] <ast.ListComp object at 0x7da18fe92dd0> variable[sum] assign[=] call[call[call[name[SummaryStats], parameter[name[ar]]].todict, parameter[]].items, parameter[]] <ast.Tuple object at 0x7da18fe92ec0> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da18fe93100>]] variable[keys] assign[=] <ast.ListComp object at 0x7da18fe93760> call[name[table].update, parameter[call[name[dict], parameter[call[name[zip], parameter[name[keys], name[vals]]]]]]] call[name[print], parameter[call[name[tabulate], parameter[name[table]]]]]
keyword[def] identifier[statstable] ( identifier[args] ): literal[string] identifier[p] = identifier[OptionParser] ( identifier[statstable] . identifier[__doc__] ) identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] ) keyword[if] identifier[len] ( identifier[args] )< literal[int] : identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ()) identifier[gff_files] = identifier[args] keyword[for] identifier[metric] keyword[in] identifier[metrics] : identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[metric] )) identifier[table] ={} keyword[for] identifier[x] keyword[in] identifier[gff_files] : identifier[pf] = identifier[op] . identifier[basename] ( identifier[x] ). identifier[split] ( literal[string] )[ literal[int] ] identifier[numberfile] = identifier[op] . identifier[join] ( identifier[metric] , identifier[pf] + literal[string] ) identifier[ar] =[ identifier[int] ( identifier[x] . identifier[strip] ()) keyword[for] identifier[x] keyword[in] identifier[open] ( identifier[numberfile] )] identifier[sum] = identifier[SummaryStats] ( identifier[ar] ). identifier[todict] (). identifier[items] () identifier[keys] , identifier[vals] = identifier[zip] (* identifier[sum] ) identifier[keys] =[( identifier[pf] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[keys] ] identifier[table] . identifier[update] ( identifier[dict] ( identifier[zip] ( identifier[keys] , identifier[vals] ))) identifier[print] ( identifier[tabulate] ( identifier[table] ), identifier[file] = identifier[sys] . identifier[stderr] )
def statstable(args): """ %prog statstable *.gff Print gene statistics table. """ p = OptionParser(statstable.__doc__) (opts, args) = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) # depends on [control=['if'], data=[]] gff_files = args for metric in metrics: logging.debug('Parsing files in `{0}`..'.format(metric)) table = {} for x in gff_files: pf = op.basename(x).split('.')[0] numberfile = op.join(metric, pf + '.txt') ar = [int(x.strip()) for x in open(numberfile)] sum = SummaryStats(ar).todict().items() (keys, vals) = zip(*sum) keys = [(pf, x) for x in keys] table.update(dict(zip(keys, vals))) # depends on [control=['for'], data=['x']] print(tabulate(table), file=sys.stderr) # depends on [control=['for'], data=['metric']]
def get_payment_method(self, payment_method_id, **params): """https://developers.coinbase.com/api/v2#show-a-payment-method""" response = self._get('v2', 'payment-methods', payment_method_id, params=params) return self._make_api_object(response, PaymentMethod)
def function[get_payment_method, parameter[self, payment_method_id]]: constant[https://developers.coinbase.com/api/v2#show-a-payment-method] variable[response] assign[=] call[name[self]._get, parameter[constant[v2], constant[payment-methods], name[payment_method_id]]] return[call[name[self]._make_api_object, parameter[name[response], name[PaymentMethod]]]]
keyword[def] identifier[get_payment_method] ( identifier[self] , identifier[payment_method_id] ,** identifier[params] ): literal[string] identifier[response] = identifier[self] . identifier[_get] ( literal[string] , literal[string] , identifier[payment_method_id] , identifier[params] = identifier[params] ) keyword[return] identifier[self] . identifier[_make_api_object] ( identifier[response] , identifier[PaymentMethod] )
def get_payment_method(self, payment_method_id, **params): """https://developers.coinbase.com/api/v2#show-a-payment-method""" response = self._get('v2', 'payment-methods', payment_method_id, params=params) return self._make_api_object(response, PaymentMethod)
def getItalianAccentedVocal(vocal, acc_type="g"): """ It returns given vocal with grave or acute accent """ vocals = {'a': {'g': u'\xe0', 'a': u'\xe1'}, 'e': {'g': u'\xe8', 'a': u'\xe9'}, 'i': {'g': u'\xec', 'a': u'\xed'}, 'o': {'g': u'\xf2', 'a': u'\xf3'}, 'u': {'g': u'\xf9', 'a': u'\xfa'}} return vocals[vocal][acc_type]
def function[getItalianAccentedVocal, parameter[vocal, acc_type]]: constant[ It returns given vocal with grave or acute accent ] variable[vocals] assign[=] dictionary[[<ast.Constant object at 0x7da1b1528b50>, <ast.Constant object at 0x7da1b1528730>, <ast.Constant object at 0x7da1b1529870>, <ast.Constant object at 0x7da1b1528910>, <ast.Constant object at 0x7da1b1529d20>], [<ast.Dict object at 0x7da1b1528f70>, <ast.Dict object at 0x7da1b1528550>, <ast.Dict object at 0x7da1b1529690>, <ast.Dict object at 0x7da1b1529c90>, <ast.Dict object at 0x7da1b1528880>]] return[call[call[name[vocals]][name[vocal]]][name[acc_type]]]
keyword[def] identifier[getItalianAccentedVocal] ( identifier[vocal] , identifier[acc_type] = literal[string] ): literal[string] identifier[vocals] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }, literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }, literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }, literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }, literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }} keyword[return] identifier[vocals] [ identifier[vocal] ][ identifier[acc_type] ]
def getItalianAccentedVocal(vocal, acc_type='g'): """ It returns given vocal with grave or acute accent """ vocals = {'a': {'g': u'à', 'a': u'á'}, 'e': {'g': u'è', 'a': u'é'}, 'i': {'g': u'ì', 'a': u'í'}, 'o': {'g': u'ò', 'a': u'ó'}, 'u': {'g': u'ù', 'a': u'ú'}} return vocals[vocal][acc_type]
def set_generator_validation_nb(self, number): """ sets self.nb_val_samples which is used in model.fit if input is a generator :param number: :return: """ self.nb_val_samples = number diff_to_batch = number % self.get_batch_size() if diff_to_batch > 0: self.nb_val_samples += self.get_batch_size() - diff_to_batch import keras if '1' != keras.__version__[0]: self.nb_val_samples = self.nb_val_samples // self.get_batch_size()
def function[set_generator_validation_nb, parameter[self, number]]: constant[ sets self.nb_val_samples which is used in model.fit if input is a generator :param number: :return: ] name[self].nb_val_samples assign[=] name[number] variable[diff_to_batch] assign[=] binary_operation[name[number] <ast.Mod object at 0x7da2590d6920> call[name[self].get_batch_size, parameter[]]] if compare[name[diff_to_batch] greater[>] constant[0]] begin[:] <ast.AugAssign object at 0x7da2041d8f70> import module[keras] if compare[constant[1] not_equal[!=] call[name[keras].__version__][constant[0]]] begin[:] name[self].nb_val_samples assign[=] binary_operation[name[self].nb_val_samples <ast.FloorDiv object at 0x7da2590d6bc0> call[name[self].get_batch_size, parameter[]]]
keyword[def] identifier[set_generator_validation_nb] ( identifier[self] , identifier[number] ): literal[string] identifier[self] . identifier[nb_val_samples] = identifier[number] identifier[diff_to_batch] = identifier[number] % identifier[self] . identifier[get_batch_size] () keyword[if] identifier[diff_to_batch] > literal[int] : identifier[self] . identifier[nb_val_samples] += identifier[self] . identifier[get_batch_size] ()- identifier[diff_to_batch] keyword[import] identifier[keras] keyword[if] literal[string] != identifier[keras] . identifier[__version__] [ literal[int] ]: identifier[self] . identifier[nb_val_samples] = identifier[self] . identifier[nb_val_samples] // identifier[self] . identifier[get_batch_size] ()
def set_generator_validation_nb(self, number): """ sets self.nb_val_samples which is used in model.fit if input is a generator :param number: :return: """ self.nb_val_samples = number diff_to_batch = number % self.get_batch_size() if diff_to_batch > 0: self.nb_val_samples += self.get_batch_size() - diff_to_batch # depends on [control=['if'], data=['diff_to_batch']] import keras if '1' != keras.__version__[0]: self.nb_val_samples = self.nb_val_samples // self.get_batch_size() # depends on [control=['if'], data=[]]
def _mb_model(self, beta, mini_batch): """ Creates the structure of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- lambda : np.array Contains the values for the conditional volatility series Y : np.array Contains the length-adjusted time series (accounting for lags) scores : np.array Contains the score terms for the time series """ # Transform latent variables parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) rand_int = np.random.randint(low=0, high=self.data_length-mini_batch+1) sample = np.arange(start=rand_int, stop=rand_int+mini_batch) sampled_data = self.data[sample] Y = np.array(sampled_data[self.max_lag:]) X = np.ones(Y.shape[0]) scores = np.zeros(Y.shape[0]) lmda = np.ones(Y.shape[0])*parm[0] theta = np.ones(Y.shape[0])*parm[-1] # Loop over time series for t in range(0,Y.shape[0]): if t < self.max_lag: lmda[t] = parm[0]/(1-np.sum(parm[1:(self.p+1)])) theta[t] += (parm[-3] - (1.0/parm[-3]))*np.exp(lmda[t])*(np.sqrt(parm[-2])*sp.gamma((parm[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(parm[-2]/2.0)) else: # Loop over GARCH terms for p_term in range(0,self.p): lmda[t] += parm[1+p_term]*lmda[t-p_term-1] # Loop over Score terms for q_term in range(0,self.q): lmda[t] += parm[1+self.p+q_term]*scores[t-q_term-1] if self.leverage is True: lmda[t] += parm[-4]*np.sign(-(Y[t-1]-theta[t-1]))*(scores[t-1]+1) theta[t] += (parm[-3] - (1.0/parm[-3]))*np.exp(lmda[t]/2.0)*(np.sqrt(parm[-2])*sp.gamma((parm[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(parm[-2]/2.0)) if (Y[t]-theta[t])>=0: scores[t] = (((parm[-2]+1.0)*np.power(Y[t]-theta[t],2))/float(np.power(parm[-3], 2)*parm[-2]*np.exp(lmda[t]) + np.power(Y[t]-theta[t],2))) - 1.0 else: scores[t] = (((parm[-2]+1.0)*np.power(Y[t]-theta[t],2))/float(np.power(parm[-3],-2)*parm[-2]*np.exp(lmda[t]) + np.power(Y[t]-theta[t],2))) - 1.0 return lmda, Y, scores, theta
def function[_mb_model, parameter[self, beta, mini_batch]]: constant[ Creates the structure of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- lambda : np.array Contains the values for the conditional volatility series Y : np.array Contains the length-adjusted time series (accounting for lags) scores : np.array Contains the score terms for the time series ] variable[parm] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da20e9621a0>]] variable[rand_int] assign[=] call[name[np].random.randint, parameter[]] variable[sample] assign[=] call[name[np].arange, parameter[]] variable[sampled_data] assign[=] call[name[self].data][name[sample]] variable[Y] assign[=] call[name[np].array, parameter[call[name[sampled_data]][<ast.Slice object at 0x7da20e961720>]]] variable[X] assign[=] call[name[np].ones, parameter[call[name[Y].shape][constant[0]]]] variable[scores] assign[=] call[name[np].zeros, parameter[call[name[Y].shape][constant[0]]]] variable[lmda] assign[=] binary_operation[call[name[np].ones, parameter[call[name[Y].shape][constant[0]]]] * call[name[parm]][constant[0]]] variable[theta] assign[=] binary_operation[call[name[np].ones, parameter[call[name[Y].shape][constant[0]]]] * call[name[parm]][<ast.UnaryOp object at 0x7da20e960d00>]] for taget[name[t]] in starred[call[name[range], parameter[constant[0], call[name[Y].shape][constant[0]]]]] begin[:] if compare[name[t] less[<] name[self].max_lag] begin[:] call[name[lmda]][name[t]] assign[=] binary_operation[call[name[parm]][constant[0]] / binary_operation[constant[1] - call[name[np].sum, parameter[call[name[parm]][<ast.Slice object at 0x7da20e962020>]]]]] <ast.AugAssign object at 0x7da20e961600> if compare[binary_operation[call[name[Y]][name[t]] - call[name[theta]][name[t]]] greater_or_equal[>=] constant[0]] begin[:] call[name[scores]][name[t]] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[parm]][<ast.UnaryOp object at 0x7da18f58fe20>] + constant[1.0]] * call[name[np].power, parameter[binary_operation[call[name[Y]][name[t]] - call[name[theta]][name[t]]], constant[2]]]] / call[name[float], parameter[binary_operation[binary_operation[binary_operation[call[name[np].power, parameter[call[name[parm]][<ast.UnaryOp object at 0x7da18f58d480>], constant[2]]] * call[name[parm]][<ast.UnaryOp object at 0x7da18f58ce80>]] * call[name[np].exp, parameter[call[name[lmda]][name[t]]]]] + call[name[np].power, parameter[binary_operation[call[name[Y]][name[t]] - call[name[theta]][name[t]]], constant[2]]]]]]] - constant[1.0]] return[tuple[[<ast.Name object at 0x7da18f58c910>, <ast.Name object at 0x7da18f58fdf0>, <ast.Name object at 0x7da18f58f0d0>, <ast.Name object at 0x7da18f58ec20>]]]
keyword[def] identifier[_mb_model] ( identifier[self] , identifier[beta] , identifier[mini_batch] ): literal[string] identifier[parm] = identifier[np] . identifier[array] ([ identifier[self] . identifier[latent_variables] . identifier[z_list] [ identifier[k] ]. identifier[prior] . identifier[transform] ( identifier[beta] [ identifier[k] ]) keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[beta] . identifier[shape] [ literal[int] ])]) identifier[rand_int] = identifier[np] . identifier[random] . identifier[randint] ( identifier[low] = literal[int] , identifier[high] = identifier[self] . identifier[data_length] - identifier[mini_batch] + literal[int] ) identifier[sample] = identifier[np] . identifier[arange] ( identifier[start] = identifier[rand_int] , identifier[stop] = identifier[rand_int] + identifier[mini_batch] ) identifier[sampled_data] = identifier[self] . identifier[data] [ identifier[sample] ] identifier[Y] = identifier[np] . identifier[array] ( identifier[sampled_data] [ identifier[self] . identifier[max_lag] :]) identifier[X] = identifier[np] . identifier[ones] ( identifier[Y] . identifier[shape] [ literal[int] ]) identifier[scores] = identifier[np] . identifier[zeros] ( identifier[Y] . identifier[shape] [ literal[int] ]) identifier[lmda] = identifier[np] . identifier[ones] ( identifier[Y] . identifier[shape] [ literal[int] ])* identifier[parm] [ literal[int] ] identifier[theta] = identifier[np] . identifier[ones] ( identifier[Y] . identifier[shape] [ literal[int] ])* identifier[parm] [- literal[int] ] keyword[for] identifier[t] keyword[in] identifier[range] ( literal[int] , identifier[Y] . identifier[shape] [ literal[int] ]): keyword[if] identifier[t] < identifier[self] . identifier[max_lag] : identifier[lmda] [ identifier[t] ]= identifier[parm] [ literal[int] ]/( literal[int] - identifier[np] . identifier[sum] ( identifier[parm] [ literal[int] :( identifier[self] . identifier[p] + literal[int] )])) identifier[theta] [ identifier[t] ]+=( identifier[parm] [- literal[int] ]-( literal[int] / identifier[parm] [- literal[int] ]))* identifier[np] . identifier[exp] ( identifier[lmda] [ identifier[t] ])*( identifier[np] . identifier[sqrt] ( identifier[parm] [- literal[int] ])* identifier[sp] . identifier[gamma] (( identifier[parm] [- literal[int] ]- literal[int] )/ literal[int] ))/( identifier[np] . identifier[sqrt] ( identifier[np] . identifier[pi] )* identifier[sp] . identifier[gamma] ( identifier[parm] [- literal[int] ]/ literal[int] )) keyword[else] : keyword[for] identifier[p_term] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[p] ): identifier[lmda] [ identifier[t] ]+= identifier[parm] [ literal[int] + identifier[p_term] ]* identifier[lmda] [ identifier[t] - identifier[p_term] - literal[int] ] keyword[for] identifier[q_term] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[q] ): identifier[lmda] [ identifier[t] ]+= identifier[parm] [ literal[int] + identifier[self] . identifier[p] + identifier[q_term] ]* identifier[scores] [ identifier[t] - identifier[q_term] - literal[int] ] keyword[if] identifier[self] . identifier[leverage] keyword[is] keyword[True] : identifier[lmda] [ identifier[t] ]+= identifier[parm] [- literal[int] ]* identifier[np] . identifier[sign] (-( identifier[Y] [ identifier[t] - literal[int] ]- identifier[theta] [ identifier[t] - literal[int] ]))*( identifier[scores] [ identifier[t] - literal[int] ]+ literal[int] ) identifier[theta] [ identifier[t] ]+=( identifier[parm] [- literal[int] ]-( literal[int] / identifier[parm] [- literal[int] ]))* identifier[np] . identifier[exp] ( identifier[lmda] [ identifier[t] ]/ literal[int] )*( identifier[np] . identifier[sqrt] ( identifier[parm] [- literal[int] ])* identifier[sp] . identifier[gamma] (( identifier[parm] [- literal[int] ]- literal[int] )/ literal[int] ))/( identifier[np] . identifier[sqrt] ( identifier[np] . identifier[pi] )* identifier[sp] . identifier[gamma] ( identifier[parm] [- literal[int] ]/ literal[int] )) keyword[if] ( identifier[Y] [ identifier[t] ]- identifier[theta] [ identifier[t] ])>= literal[int] : identifier[scores] [ identifier[t] ]=((( identifier[parm] [- literal[int] ]+ literal[int] )* identifier[np] . identifier[power] ( identifier[Y] [ identifier[t] ]- identifier[theta] [ identifier[t] ], literal[int] ))/ identifier[float] ( identifier[np] . identifier[power] ( identifier[parm] [- literal[int] ], literal[int] )* identifier[parm] [- literal[int] ]* identifier[np] . identifier[exp] ( identifier[lmda] [ identifier[t] ])+ identifier[np] . identifier[power] ( identifier[Y] [ identifier[t] ]- identifier[theta] [ identifier[t] ], literal[int] )))- literal[int] keyword[else] : identifier[scores] [ identifier[t] ]=((( identifier[parm] [- literal[int] ]+ literal[int] )* identifier[np] . identifier[power] ( identifier[Y] [ identifier[t] ]- identifier[theta] [ identifier[t] ], literal[int] ))/ identifier[float] ( identifier[np] . identifier[power] ( identifier[parm] [- literal[int] ],- literal[int] )* identifier[parm] [- literal[int] ]* identifier[np] . identifier[exp] ( identifier[lmda] [ identifier[t] ])+ identifier[np] . identifier[power] ( identifier[Y] [ identifier[t] ]- identifier[theta] [ identifier[t] ], literal[int] )))- literal[int] keyword[return] identifier[lmda] , identifier[Y] , identifier[scores] , identifier[theta]
def _mb_model(self, beta, mini_batch): """ Creates the structure of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- lambda : np.array Contains the values for the conditional volatility series Y : np.array Contains the length-adjusted time series (accounting for lags) scores : np.array Contains the score terms for the time series """ # Transform latent variables parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) rand_int = np.random.randint(low=0, high=self.data_length - mini_batch + 1) sample = np.arange(start=rand_int, stop=rand_int + mini_batch) sampled_data = self.data[sample] Y = np.array(sampled_data[self.max_lag:]) X = np.ones(Y.shape[0]) scores = np.zeros(Y.shape[0]) lmda = np.ones(Y.shape[0]) * parm[0] theta = np.ones(Y.shape[0]) * parm[-1] # Loop over time series for t in range(0, Y.shape[0]): if t < self.max_lag: lmda[t] = parm[0] / (1 - np.sum(parm[1:self.p + 1])) theta[t] += (parm[-3] - 1.0 / parm[-3]) * np.exp(lmda[t]) * (np.sqrt(parm[-2]) * sp.gamma((parm[-2] - 1.0) / 2.0)) / (np.sqrt(np.pi) * sp.gamma(parm[-2] / 2.0)) # depends on [control=['if'], data=['t']] else: # Loop over GARCH terms for p_term in range(0, self.p): lmda[t] += parm[1 + p_term] * lmda[t - p_term - 1] # depends on [control=['for'], data=['p_term']] # Loop over Score terms for q_term in range(0, self.q): lmda[t] += parm[1 + self.p + q_term] * scores[t - q_term - 1] # depends on [control=['for'], data=['q_term']] if self.leverage is True: lmda[t] += parm[-4] * np.sign(-(Y[t - 1] - theta[t - 1])) * (scores[t - 1] + 1) # depends on [control=['if'], data=[]] theta[t] += (parm[-3] - 1.0 / parm[-3]) * np.exp(lmda[t] / 2.0) * (np.sqrt(parm[-2]) * sp.gamma((parm[-2] - 1.0) / 2.0)) / (np.sqrt(np.pi) * sp.gamma(parm[-2] / 2.0)) if Y[t] - theta[t] >= 0: scores[t] = (parm[-2] + 1.0) * np.power(Y[t] - theta[t], 2) / float(np.power(parm[-3], 2) * parm[-2] * np.exp(lmda[t]) + np.power(Y[t] - theta[t], 2)) - 1.0 # depends on [control=['if'], data=[]] else: scores[t] = (parm[-2] + 1.0) * np.power(Y[t] - theta[t], 2) / float(np.power(parm[-3], -2) * parm[-2] * np.exp(lmda[t]) + np.power(Y[t] - theta[t], 2)) - 1.0 # depends on [control=['for'], data=['t']] return (lmda, Y, scores, theta)
def p_procedure_def(t): """procedure_def : proc_return ID LPAREN proc_firstarg type_specifier_list RPAREN EQUALS constant SEMI""" global name_dict tid = t[2] value = t[8] lineno = t.lineno(1) if id_unique(tid, 'procedure', lineno): name_dict[tid] = const_info(tid, value, lineno)
def function[p_procedure_def, parameter[t]]: constant[procedure_def : proc_return ID LPAREN proc_firstarg type_specifier_list RPAREN EQUALS constant SEMI] <ast.Global object at 0x7da1b170dbd0> variable[tid] assign[=] call[name[t]][constant[2]] variable[value] assign[=] call[name[t]][constant[8]] variable[lineno] assign[=] call[name[t].lineno, parameter[constant[1]]] if call[name[id_unique], parameter[name[tid], constant[procedure], name[lineno]]] begin[:] call[name[name_dict]][name[tid]] assign[=] call[name[const_info], parameter[name[tid], name[value], name[lineno]]]
keyword[def] identifier[p_procedure_def] ( identifier[t] ): literal[string] keyword[global] identifier[name_dict] identifier[tid] = identifier[t] [ literal[int] ] identifier[value] = identifier[t] [ literal[int] ] identifier[lineno] = identifier[t] . identifier[lineno] ( literal[int] ) keyword[if] identifier[id_unique] ( identifier[tid] , literal[string] , identifier[lineno] ): identifier[name_dict] [ identifier[tid] ]= identifier[const_info] ( identifier[tid] , identifier[value] , identifier[lineno] )
def p_procedure_def(t): """procedure_def : proc_return ID LPAREN proc_firstarg type_specifier_list RPAREN EQUALS constant SEMI""" global name_dict tid = t[2] value = t[8] lineno = t.lineno(1) if id_unique(tid, 'procedure', lineno): name_dict[tid] = const_info(tid, value, lineno) # depends on [control=['if'], data=[]]
def resize_hess(self, func): """ Removes values with identical indices to fixed parameters from the output of func. func has to return the Hessian of a scalar function. :param func: Hessian function to be wrapped. Is assumed to be the Hessian of a scalar function. :return: Hessian corresponding to free parameters only. """ if func is None: return None @wraps(func) def resized(*args, **kwargs): out = func(*args, **kwargs) # Make two dimensional, corresponding to a scalar function. out = np.atleast_2d(np.squeeze(out)) mask = [p not in self._fixed_params for p in self.parameters] return np.atleast_2d(out[mask, mask]) return resized
def function[resize_hess, parameter[self, func]]: constant[ Removes values with identical indices to fixed parameters from the output of func. func has to return the Hessian of a scalar function. :param func: Hessian function to be wrapped. Is assumed to be the Hessian of a scalar function. :return: Hessian corresponding to free parameters only. ] if compare[name[func] is constant[None]] begin[:] return[constant[None]] def function[resized, parameter[]]: variable[out] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da1b2346b30>]] variable[out] assign[=] call[name[np].atleast_2d, parameter[call[name[np].squeeze, parameter[name[out]]]]] variable[mask] assign[=] <ast.ListComp object at 0x7da1b2347550> return[call[name[np].atleast_2d, parameter[call[name[out]][tuple[[<ast.Name object at 0x7da1b2344a00>, <ast.Name object at 0x7da1b2346140>]]]]]] return[name[resized]]
keyword[def] identifier[resize_hess] ( identifier[self] , identifier[func] ): literal[string] keyword[if] identifier[func] keyword[is] keyword[None] : keyword[return] keyword[None] @ identifier[wraps] ( identifier[func] ) keyword[def] identifier[resized] (* identifier[args] ,** identifier[kwargs] ): identifier[out] = identifier[func] (* identifier[args] ,** identifier[kwargs] ) identifier[out] = identifier[np] . identifier[atleast_2d] ( identifier[np] . identifier[squeeze] ( identifier[out] )) identifier[mask] =[ identifier[p] keyword[not] keyword[in] identifier[self] . identifier[_fixed_params] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[parameters] ] keyword[return] identifier[np] . identifier[atleast_2d] ( identifier[out] [ identifier[mask] , identifier[mask] ]) keyword[return] identifier[resized]
def resize_hess(self, func): """ Removes values with identical indices to fixed parameters from the output of func. func has to return the Hessian of a scalar function. :param func: Hessian function to be wrapped. Is assumed to be the Hessian of a scalar function. :return: Hessian corresponding to free parameters only. """ if func is None: return None # depends on [control=['if'], data=[]] @wraps(func) def resized(*args, **kwargs): out = func(*args, **kwargs) # Make two dimensional, corresponding to a scalar function. out = np.atleast_2d(np.squeeze(out)) mask = [p not in self._fixed_params for p in self.parameters] return np.atleast_2d(out[mask, mask]) return resized
def calculate_check_digit(gtin): '''Given a GTIN (8-14) or SSCC, calculate its appropriate check digit''' reverse_gtin = gtin[::-1] total = 0 count = 0 for char in reverse_gtin: digit = int(char) if count % 2 == 0: digit = digit * 3 total = total + digit count = count + 1 nearest_multiple_of_ten = int(math.ceil(total / 10.0) * 10) return nearest_multiple_of_ten - total
def function[calculate_check_digit, parameter[gtin]]: constant[Given a GTIN (8-14) or SSCC, calculate its appropriate check digit] variable[reverse_gtin] assign[=] call[name[gtin]][<ast.Slice object at 0x7da20e9b3fd0>] variable[total] assign[=] constant[0] variable[count] assign[=] constant[0] for taget[name[char]] in starred[name[reverse_gtin]] begin[:] variable[digit] assign[=] call[name[int], parameter[name[char]]] if compare[binary_operation[name[count] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[0]] begin[:] variable[digit] assign[=] binary_operation[name[digit] * constant[3]] variable[total] assign[=] binary_operation[name[total] + name[digit]] variable[count] assign[=] binary_operation[name[count] + constant[1]] variable[nearest_multiple_of_ten] assign[=] call[name[int], parameter[binary_operation[call[name[math].ceil, parameter[binary_operation[name[total] / constant[10.0]]]] * constant[10]]]] return[binary_operation[name[nearest_multiple_of_ten] - name[total]]]
keyword[def] identifier[calculate_check_digit] ( identifier[gtin] ): literal[string] identifier[reverse_gtin] = identifier[gtin] [::- literal[int] ] identifier[total] = literal[int] identifier[count] = literal[int] keyword[for] identifier[char] keyword[in] identifier[reverse_gtin] : identifier[digit] = identifier[int] ( identifier[char] ) keyword[if] identifier[count] % literal[int] == literal[int] : identifier[digit] = identifier[digit] * literal[int] identifier[total] = identifier[total] + identifier[digit] identifier[count] = identifier[count] + literal[int] identifier[nearest_multiple_of_ten] = identifier[int] ( identifier[math] . identifier[ceil] ( identifier[total] / literal[int] )* literal[int] ) keyword[return] identifier[nearest_multiple_of_ten] - identifier[total]
def calculate_check_digit(gtin): """Given a GTIN (8-14) or SSCC, calculate its appropriate check digit""" reverse_gtin = gtin[::-1] total = 0 count = 0 for char in reverse_gtin: digit = int(char) if count % 2 == 0: digit = digit * 3 # depends on [control=['if'], data=[]] total = total + digit count = count + 1 # depends on [control=['for'], data=['char']] nearest_multiple_of_ten = int(math.ceil(total / 10.0) * 10) return nearest_multiple_of_ten - total
def scrolldown(self, window_name, object_name): """ Scroll down @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer """ if not self.verifyscrollbarvertical(window_name, object_name): raise LdtpServerException('Object not vertical scrollbar') return self.setmax(window_name, object_name)
def function[scrolldown, parameter[self, window_name, object_name]]: constant[ Scroll down @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer ] if <ast.UnaryOp object at 0x7da18f09c6d0> begin[:] <ast.Raise object at 0x7da18f09e2f0> return[call[name[self].setmax, parameter[name[window_name], name[object_name]]]]
keyword[def] identifier[scrolldown] ( identifier[self] , identifier[window_name] , identifier[object_name] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[verifyscrollbarvertical] ( identifier[window_name] , identifier[object_name] ): keyword[raise] identifier[LdtpServerException] ( literal[string] ) keyword[return] identifier[self] . identifier[setmax] ( identifier[window_name] , identifier[object_name] )
def scrolldown(self, window_name, object_name): """ Scroll down @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer """ if not self.verifyscrollbarvertical(window_name, object_name): raise LdtpServerException('Object not vertical scrollbar') # depends on [control=['if'], data=[]] return self.setmax(window_name, object_name)
def aggregate_key(self, aggregate_key): """ Aggregate values from key and put them into the top-level dictionary """ aggregation = self.data_dict[aggregate_key] # List of dictionaries of numpy arrays/scalars # Aggregate sub batch data data_dict_keys = {y for x in aggregation for y in x.keys()} for key in data_dict_keys: # Just average all the statistics from the loss function stacked = np.stack([d[key] for d in aggregation], axis=0) self.data_dict[key] = np.mean(stacked, axis=0)
def function[aggregate_key, parameter[self, aggregate_key]]: constant[ Aggregate values from key and put them into the top-level dictionary ] variable[aggregation] assign[=] call[name[self].data_dict][name[aggregate_key]] variable[data_dict_keys] assign[=] <ast.SetComp object at 0x7da1b1601de0> for taget[name[key]] in starred[name[data_dict_keys]] begin[:] variable[stacked] assign[=] call[name[np].stack, parameter[<ast.ListComp object at 0x7da1b1600f10>]] call[name[self].data_dict][name[key]] assign[=] call[name[np].mean, parameter[name[stacked]]]
keyword[def] identifier[aggregate_key] ( identifier[self] , identifier[aggregate_key] ): literal[string] identifier[aggregation] = identifier[self] . identifier[data_dict] [ identifier[aggregate_key] ] identifier[data_dict_keys] ={ identifier[y] keyword[for] identifier[x] keyword[in] identifier[aggregation] keyword[for] identifier[y] keyword[in] identifier[x] . identifier[keys] ()} keyword[for] identifier[key] keyword[in] identifier[data_dict_keys] : identifier[stacked] = identifier[np] . identifier[stack] ([ identifier[d] [ identifier[key] ] keyword[for] identifier[d] keyword[in] identifier[aggregation] ], identifier[axis] = literal[int] ) identifier[self] . identifier[data_dict] [ identifier[key] ]= identifier[np] . identifier[mean] ( identifier[stacked] , identifier[axis] = literal[int] )
def aggregate_key(self, aggregate_key): """ Aggregate values from key and put them into the top-level dictionary """ aggregation = self.data_dict[aggregate_key] # List of dictionaries of numpy arrays/scalars # Aggregate sub batch data data_dict_keys = {y for x in aggregation for y in x.keys()} for key in data_dict_keys: # Just average all the statistics from the loss function stacked = np.stack([d[key] for d in aggregation], axis=0) self.data_dict[key] = np.mean(stacked, axis=0) # depends on [control=['for'], data=['key']]
def from_clauses(self, clauses): """ This methods copies a list of clauses into a CNF object. :param clauses: a list of clauses. :type clauses: list(list(int)) Example: .. code-block:: python >>> from pysat.formula import CNF >>> cnf = CNF(from_clauses=[[-1, 2], [1, -2], [5]]) >>> print cnf.clauses [[-1, 2], [1, -2], [5]] >>> print cnf.nv 5 """ self.clauses = copy.deepcopy(clauses) for cl in self.clauses: self.nv = max([abs(l) for l in cl] + [self.nv])
def function[from_clauses, parameter[self, clauses]]: constant[ This methods copies a list of clauses into a CNF object. :param clauses: a list of clauses. :type clauses: list(list(int)) Example: .. code-block:: python >>> from pysat.formula import CNF >>> cnf = CNF(from_clauses=[[-1, 2], [1, -2], [5]]) >>> print cnf.clauses [[-1, 2], [1, -2], [5]] >>> print cnf.nv 5 ] name[self].clauses assign[=] call[name[copy].deepcopy, parameter[name[clauses]]] for taget[name[cl]] in starred[name[self].clauses] begin[:] name[self].nv assign[=] call[name[max], parameter[binary_operation[<ast.ListComp object at 0x7da20e954e50> + list[[<ast.Attribute object at 0x7da20e957e20>]]]]]
keyword[def] identifier[from_clauses] ( identifier[self] , identifier[clauses] ): literal[string] identifier[self] . identifier[clauses] = identifier[copy] . identifier[deepcopy] ( identifier[clauses] ) keyword[for] identifier[cl] keyword[in] identifier[self] . identifier[clauses] : identifier[self] . identifier[nv] = identifier[max] ([ identifier[abs] ( identifier[l] ) keyword[for] identifier[l] keyword[in] identifier[cl] ]+[ identifier[self] . identifier[nv] ])
def from_clauses(self, clauses): """ This methods copies a list of clauses into a CNF object. :param clauses: a list of clauses. :type clauses: list(list(int)) Example: .. code-block:: python >>> from pysat.formula import CNF >>> cnf = CNF(from_clauses=[[-1, 2], [1, -2], [5]]) >>> print cnf.clauses [[-1, 2], [1, -2], [5]] >>> print cnf.nv 5 """ self.clauses = copy.deepcopy(clauses) for cl in self.clauses: self.nv = max([abs(l) for l in cl] + [self.nv]) # depends on [control=['for'], data=['cl']]
def validate_access_token(self, client_key, token, request): """Validates access token is available for client.""" log.debug('Validate access token %r for %r', token, client_key) tok = request.access_token or self._tokengetter( client_key=client_key, token=token, ) if tok: request.access_token = tok return True return False
def function[validate_access_token, parameter[self, client_key, token, request]]: constant[Validates access token is available for client.] call[name[log].debug, parameter[constant[Validate access token %r for %r], name[token], name[client_key]]] variable[tok] assign[=] <ast.BoolOp object at 0x7da1b0259e40> if name[tok] begin[:] name[request].access_token assign[=] name[tok] return[constant[True]] return[constant[False]]
keyword[def] identifier[validate_access_token] ( identifier[self] , identifier[client_key] , identifier[token] , identifier[request] ): literal[string] identifier[log] . identifier[debug] ( literal[string] , identifier[token] , identifier[client_key] ) identifier[tok] = identifier[request] . identifier[access_token] keyword[or] identifier[self] . identifier[_tokengetter] ( identifier[client_key] = identifier[client_key] , identifier[token] = identifier[token] , ) keyword[if] identifier[tok] : identifier[request] . identifier[access_token] = identifier[tok] keyword[return] keyword[True] keyword[return] keyword[False]
def validate_access_token(self, client_key, token, request): """Validates access token is available for client.""" log.debug('Validate access token %r for %r', token, client_key) tok = request.access_token or self._tokengetter(client_key=client_key, token=token) if tok: request.access_token = tok return True # depends on [control=['if'], data=[]] return False
def rotate(name, pattern=None, conf_file=default_conf, **kwargs): ''' Set up pattern for logging. name : string alias for entryname pattern : string alias for log_file conf_file : string optional path to alternative configuration file kwargs : boolean|string|int optional additional flags and parameters .. note:: ``name`` and ``pattern`` were kept for backwards compatibility reasons. ``name`` is an alias for the ``entryname`` argument, ``pattern`` is an alias for ``log_file``. These aliases will only be used if the ``entryname`` and ``log_file`` arguments are not passed. For a full list of arguments see ```logadm.show_args```. CLI Example: .. code-block:: bash salt '*' logadm.rotate myapplog pattern='/var/log/myapp/*.log' count=7 salt '*' logadm.rotate myapplog log_file='/var/log/myapp/*.log' count=4 owner=myappd mode='0700' ''' ## cleanup kwargs kwargs = salt.utils.args.clean_kwargs(**kwargs) ## inject name into kwargs if 'entryname' not in kwargs and name and not name.startswith('/'): kwargs['entryname'] = name ## inject pattern into kwargs if 'log_file' not in kwargs: if pattern and pattern.startswith('/'): kwargs['log_file'] = pattern # NOTE: for backwards compatibility check if name is a path elif name and name.startswith('/'): kwargs['log_file'] = name ## build command log.debug("logadm.rotate - kwargs: %s", kwargs) command = "logadm -f {}".format(conf_file) for arg, val in kwargs.items(): if arg in option_toggles.values() and val: command = "{} {}".format( command, _arg2opt(arg), ) elif arg in option_flags.values(): command = "{} {} {}".format( command, _arg2opt(arg), _quote_args(six.text_type(val)) ) elif arg != 'log_file': log.warning("Unknown argument %s, don't know how to map this!", arg) if 'log_file' in kwargs: # NOTE: except from ```man logadm``` # If no log file name is provided on a logadm command line, the entry # name is assumed to be the same as the log file name. For example, # the following two lines achieve the same thing, keeping two copies # of rotated log files: # # % logadm -C2 -w mylog /my/really/long/log/file/name # % logadm -C2 -w /my/really/long/log/file/name if 'entryname' not in kwargs: command = "{} -w {}".format(command, _quote_args(kwargs['log_file'])) else: command = "{} {}".format(command, _quote_args(kwargs['log_file'])) log.debug("logadm.rotate - command: %s", command) result = __salt__['cmd.run_all'](command, python_shell=False) if result['retcode'] != 0: return dict(Error='Failed in adding log', Output=result['stderr']) return dict(Result='Success')
def function[rotate, parameter[name, pattern, conf_file]]: constant[ Set up pattern for logging. name : string alias for entryname pattern : string alias for log_file conf_file : string optional path to alternative configuration file kwargs : boolean|string|int optional additional flags and parameters .. note:: ``name`` and ``pattern`` were kept for backwards compatibility reasons. ``name`` is an alias for the ``entryname`` argument, ``pattern`` is an alias for ``log_file``. These aliases will only be used if the ``entryname`` and ``log_file`` arguments are not passed. For a full list of arguments see ```logadm.show_args```. CLI Example: .. code-block:: bash salt '*' logadm.rotate myapplog pattern='/var/log/myapp/*.log' count=7 salt '*' logadm.rotate myapplog log_file='/var/log/myapp/*.log' count=4 owner=myappd mode='0700' ] variable[kwargs] assign[=] call[name[salt].utils.args.clean_kwargs, parameter[]] if <ast.BoolOp object at 0x7da204347f10> begin[:] call[name[kwargs]][constant[entryname]] assign[=] name[name] if compare[constant[log_file] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] if <ast.BoolOp object at 0x7da204345990> begin[:] call[name[kwargs]][constant[log_file]] assign[=] name[pattern] call[name[log].debug, parameter[constant[logadm.rotate - kwargs: %s], name[kwargs]]] variable[command] assign[=] call[constant[logadm -f {}].format, parameter[name[conf_file]]] for taget[tuple[[<ast.Name object at 0x7da20c6a8a30>, <ast.Name object at 0x7da20c6aac50>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da20c6abfa0> begin[:] variable[command] assign[=] call[constant[{} {}].format, parameter[name[command], call[name[_arg2opt], parameter[name[arg]]]]] if compare[constant[log_file] in name[kwargs]] begin[:] if compare[constant[entryname] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] variable[command] assign[=] call[constant[{} -w {}].format, parameter[name[command], call[name[_quote_args], parameter[call[name[kwargs]][constant[log_file]]]]]] call[name[log].debug, parameter[constant[logadm.rotate - command: %s], name[command]]] variable[result] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[command]]] if compare[call[name[result]][constant[retcode]] not_equal[!=] constant[0]] begin[:] return[call[name[dict], parameter[]]] return[call[name[dict], parameter[]]]
keyword[def] identifier[rotate] ( identifier[name] , identifier[pattern] = keyword[None] , identifier[conf_file] = identifier[default_conf] ,** identifier[kwargs] ): literal[string] identifier[kwargs] = identifier[salt] . identifier[utils] . identifier[args] . identifier[clean_kwargs] (** identifier[kwargs] ) keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] keyword[and] identifier[name] keyword[and] keyword[not] identifier[name] . identifier[startswith] ( literal[string] ): identifier[kwargs] [ literal[string] ]= identifier[name] keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : keyword[if] identifier[pattern] keyword[and] identifier[pattern] . identifier[startswith] ( literal[string] ): identifier[kwargs] [ literal[string] ]= identifier[pattern] keyword[elif] identifier[name] keyword[and] identifier[name] . identifier[startswith] ( literal[string] ): identifier[kwargs] [ literal[string] ]= identifier[name] identifier[log] . identifier[debug] ( literal[string] , identifier[kwargs] ) identifier[command] = literal[string] . identifier[format] ( identifier[conf_file] ) keyword[for] identifier[arg] , identifier[val] keyword[in] identifier[kwargs] . identifier[items] (): keyword[if] identifier[arg] keyword[in] identifier[option_toggles] . identifier[values] () keyword[and] identifier[val] : identifier[command] = literal[string] . identifier[format] ( identifier[command] , identifier[_arg2opt] ( identifier[arg] ), ) keyword[elif] identifier[arg] keyword[in] identifier[option_flags] . identifier[values] (): identifier[command] = literal[string] . identifier[format] ( identifier[command] , identifier[_arg2opt] ( identifier[arg] ), identifier[_quote_args] ( identifier[six] . identifier[text_type] ( identifier[val] )) ) keyword[elif] identifier[arg] != literal[string] : identifier[log] . identifier[warning] ( literal[string] , identifier[arg] ) keyword[if] literal[string] keyword[in] identifier[kwargs] : keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : identifier[command] = literal[string] . identifier[format] ( identifier[command] , identifier[_quote_args] ( identifier[kwargs] [ literal[string] ])) keyword[else] : identifier[command] = literal[string] . identifier[format] ( identifier[command] , identifier[_quote_args] ( identifier[kwargs] [ literal[string] ])) identifier[log] . identifier[debug] ( literal[string] , identifier[command] ) identifier[result] = identifier[__salt__] [ literal[string] ]( identifier[command] , identifier[python_shell] = keyword[False] ) keyword[if] identifier[result] [ literal[string] ]!= literal[int] : keyword[return] identifier[dict] ( identifier[Error] = literal[string] , identifier[Output] = identifier[result] [ literal[string] ]) keyword[return] identifier[dict] ( identifier[Result] = literal[string] )
def rotate(name, pattern=None, conf_file=default_conf, **kwargs): """ Set up pattern for logging. name : string alias for entryname pattern : string alias for log_file conf_file : string optional path to alternative configuration file kwargs : boolean|string|int optional additional flags and parameters .. note:: ``name`` and ``pattern`` were kept for backwards compatibility reasons. ``name`` is an alias for the ``entryname`` argument, ``pattern`` is an alias for ``log_file``. These aliases will only be used if the ``entryname`` and ``log_file`` arguments are not passed. For a full list of arguments see ```logadm.show_args```. CLI Example: .. code-block:: bash salt '*' logadm.rotate myapplog pattern='/var/log/myapp/*.log' count=7 salt '*' logadm.rotate myapplog log_file='/var/log/myapp/*.log' count=4 owner=myappd mode='0700' """ ## cleanup kwargs kwargs = salt.utils.args.clean_kwargs(**kwargs) ## inject name into kwargs if 'entryname' not in kwargs and name and (not name.startswith('/')): kwargs['entryname'] = name # depends on [control=['if'], data=[]] ## inject pattern into kwargs if 'log_file' not in kwargs: if pattern and pattern.startswith('/'): kwargs['log_file'] = pattern # depends on [control=['if'], data=[]] # NOTE: for backwards compatibility check if name is a path elif name and name.startswith('/'): kwargs['log_file'] = name # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['kwargs']] ## build command log.debug('logadm.rotate - kwargs: %s', kwargs) command = 'logadm -f {}'.format(conf_file) for (arg, val) in kwargs.items(): if arg in option_toggles.values() and val: command = '{} {}'.format(command, _arg2opt(arg)) # depends on [control=['if'], data=[]] elif arg in option_flags.values(): command = '{} {} {}'.format(command, _arg2opt(arg), _quote_args(six.text_type(val))) # depends on [control=['if'], data=['arg']] elif arg != 'log_file': log.warning("Unknown argument %s, don't know how to map this!", arg) # depends on [control=['if'], data=['arg']] # depends on [control=['for'], data=[]] if 'log_file' in kwargs: # NOTE: except from ```man logadm``` # If no log file name is provided on a logadm command line, the entry # name is assumed to be the same as the log file name. For example, # the following two lines achieve the same thing, keeping two copies # of rotated log files: # # % logadm -C2 -w mylog /my/really/long/log/file/name # % logadm -C2 -w /my/really/long/log/file/name if 'entryname' not in kwargs: command = '{} -w {}'.format(command, _quote_args(kwargs['log_file'])) # depends on [control=['if'], data=['kwargs']] else: command = '{} {}'.format(command, _quote_args(kwargs['log_file'])) # depends on [control=['if'], data=['kwargs']] log.debug('logadm.rotate - command: %s', command) result = __salt__['cmd.run_all'](command, python_shell=False) if result['retcode'] != 0: return dict(Error='Failed in adding log', Output=result['stderr']) # depends on [control=['if'], data=[]] return dict(Result='Success')
def update_file(self, content): """ It will convert json content to json string and update into file. Return: Boolean True/False """ updated_content = json.dumps(content) file_obj = open(self.file, 'r+') file_obj.write(str(updated_content)) file_obj.close() return True
def function[update_file, parameter[self, content]]: constant[ It will convert json content to json string and update into file. Return: Boolean True/False ] variable[updated_content] assign[=] call[name[json].dumps, parameter[name[content]]] variable[file_obj] assign[=] call[name[open], parameter[name[self].file, constant[r+]]] call[name[file_obj].write, parameter[call[name[str], parameter[name[updated_content]]]]] call[name[file_obj].close, parameter[]] return[constant[True]]
keyword[def] identifier[update_file] ( identifier[self] , identifier[content] ): literal[string] identifier[updated_content] = identifier[json] . identifier[dumps] ( identifier[content] ) identifier[file_obj] = identifier[open] ( identifier[self] . identifier[file] , literal[string] ) identifier[file_obj] . identifier[write] ( identifier[str] ( identifier[updated_content] )) identifier[file_obj] . identifier[close] () keyword[return] keyword[True]
def update_file(self, content): """ It will convert json content to json string and update into file. Return: Boolean True/False """ updated_content = json.dumps(content) file_obj = open(self.file, 'r+') file_obj.write(str(updated_content)) file_obj.close() return True
def xor_nonlocal_decompose(qubits: Iterable[raw_types.Qid], onto_qubit: raw_types.Qid ) -> Iterable[raw_types.Operation]: """Decomposition ignores connectivity.""" for qubit in qubits: if qubit != onto_qubit: yield common_gates.CNOT(qubit, onto_qubit)
def function[xor_nonlocal_decompose, parameter[qubits, onto_qubit]]: constant[Decomposition ignores connectivity.] for taget[name[qubit]] in starred[name[qubits]] begin[:] if compare[name[qubit] not_equal[!=] name[onto_qubit]] begin[:] <ast.Yield object at 0x7da1b1c3c640>
keyword[def] identifier[xor_nonlocal_decompose] ( identifier[qubits] : identifier[Iterable] [ identifier[raw_types] . identifier[Qid] ], identifier[onto_qubit] : identifier[raw_types] . identifier[Qid] )-> identifier[Iterable] [ identifier[raw_types] . identifier[Operation] ]: literal[string] keyword[for] identifier[qubit] keyword[in] identifier[qubits] : keyword[if] identifier[qubit] != identifier[onto_qubit] : keyword[yield] identifier[common_gates] . identifier[CNOT] ( identifier[qubit] , identifier[onto_qubit] )
def xor_nonlocal_decompose(qubits: Iterable[raw_types.Qid], onto_qubit: raw_types.Qid) -> Iterable[raw_types.Operation]: """Decomposition ignores connectivity.""" for qubit in qubits: if qubit != onto_qubit: yield common_gates.CNOT(qubit, onto_qubit) # depends on [control=['if'], data=['qubit', 'onto_qubit']] # depends on [control=['for'], data=['qubit']]
def find_common_type(types): """ Find a common data type among the given dtypes. Parameters ---------- types : list of dtypes Returns ------- pandas extension or numpy dtype See Also -------- numpy.find_common_type """ if len(types) == 0: raise ValueError('no types given') first = types[0] # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2) # => object if all(is_dtype_equal(first, t) for t in types[1:]): return first if any(isinstance(t, (PandasExtensionDtype, ExtensionDtype)) for t in types): return np.object # take lowest unit if all(is_datetime64_dtype(t) for t in types): return np.dtype('datetime64[ns]') if all(is_timedelta64_dtype(t) for t in types): return np.dtype('timedelta64[ns]') # don't mix bool / int or float or complex # this is different from numpy, which casts bool with float/int as int has_bools = any(is_bool_dtype(t) for t in types) if has_bools: for t in types: if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t): return np.object return np.find_common_type(types, [])
def function[find_common_type, parameter[types]]: constant[ Find a common data type among the given dtypes. Parameters ---------- types : list of dtypes Returns ------- pandas extension or numpy dtype See Also -------- numpy.find_common_type ] if compare[call[name[len], parameter[name[types]]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da20c991720> variable[first] assign[=] call[name[types]][constant[0]] if call[name[all], parameter[<ast.GeneratorExp object at 0x7da20c993610>]] begin[:] return[name[first]] if call[name[any], parameter[<ast.GeneratorExp object at 0x7da20c991480>]] begin[:] return[name[np].object] if call[name[all], parameter[<ast.GeneratorExp object at 0x7da20c990460>]] begin[:] return[call[name[np].dtype, parameter[constant[datetime64[ns]]]]] if call[name[all], parameter[<ast.GeneratorExp object at 0x7da20c990dc0>]] begin[:] return[call[name[np].dtype, parameter[constant[timedelta64[ns]]]]] variable[has_bools] assign[=] call[name[any], parameter[<ast.GeneratorExp object at 0x7da20c992230>]] if name[has_bools] begin[:] for taget[name[t]] in starred[name[types]] begin[:] if <ast.BoolOp object at 0x7da1b206afe0> begin[:] return[name[np].object] return[call[name[np].find_common_type, parameter[name[types], list[[]]]]]
keyword[def] identifier[find_common_type] ( identifier[types] ): literal[string] keyword[if] identifier[len] ( identifier[types] )== literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[first] = identifier[types] [ literal[int] ] keyword[if] identifier[all] ( identifier[is_dtype_equal] ( identifier[first] , identifier[t] ) keyword[for] identifier[t] keyword[in] identifier[types] [ literal[int] :]): keyword[return] identifier[first] keyword[if] identifier[any] ( identifier[isinstance] ( identifier[t] ,( identifier[PandasExtensionDtype] , identifier[ExtensionDtype] )) keyword[for] identifier[t] keyword[in] identifier[types] ): keyword[return] identifier[np] . identifier[object] keyword[if] identifier[all] ( identifier[is_datetime64_dtype] ( identifier[t] ) keyword[for] identifier[t] keyword[in] identifier[types] ): keyword[return] identifier[np] . identifier[dtype] ( literal[string] ) keyword[if] identifier[all] ( identifier[is_timedelta64_dtype] ( identifier[t] ) keyword[for] identifier[t] keyword[in] identifier[types] ): keyword[return] identifier[np] . identifier[dtype] ( literal[string] ) identifier[has_bools] = identifier[any] ( identifier[is_bool_dtype] ( identifier[t] ) keyword[for] identifier[t] keyword[in] identifier[types] ) keyword[if] identifier[has_bools] : keyword[for] identifier[t] keyword[in] identifier[types] : keyword[if] identifier[is_integer_dtype] ( identifier[t] ) keyword[or] identifier[is_float_dtype] ( identifier[t] ) keyword[or] identifier[is_complex_dtype] ( identifier[t] ): keyword[return] identifier[np] . identifier[object] keyword[return] identifier[np] . identifier[find_common_type] ( identifier[types] ,[])
def find_common_type(types): """ Find a common data type among the given dtypes. Parameters ---------- types : list of dtypes Returns ------- pandas extension or numpy dtype See Also -------- numpy.find_common_type """ if len(types) == 0: raise ValueError('no types given') # depends on [control=['if'], data=[]] first = types[0] # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2) # => object if all((is_dtype_equal(first, t) for t in types[1:])): return first # depends on [control=['if'], data=[]] if any((isinstance(t, (PandasExtensionDtype, ExtensionDtype)) for t in types)): return np.object # depends on [control=['if'], data=[]] # take lowest unit if all((is_datetime64_dtype(t) for t in types)): return np.dtype('datetime64[ns]') # depends on [control=['if'], data=[]] if all((is_timedelta64_dtype(t) for t in types)): return np.dtype('timedelta64[ns]') # depends on [control=['if'], data=[]] # don't mix bool / int or float or complex # this is different from numpy, which casts bool with float/int as int has_bools = any((is_bool_dtype(t) for t in types)) if has_bools: for t in types: if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t): return np.object # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']] # depends on [control=['if'], data=[]] return np.find_common_type(types, [])
def capture_video(self, length): """ Capture a video. This always writes to the memory card, since internal RAM is likely to run out of space very quickly. Currently this only works with Nikon cameras. :param length: Length of the video to capture in seconds. :type length: int :return: Video file :rtype: :py:class:`File` """ with self.capture_video_context() as ctx: time.sleep(length) return ctx.videofile
def function[capture_video, parameter[self, length]]: constant[ Capture a video. This always writes to the memory card, since internal RAM is likely to run out of space very quickly. Currently this only works with Nikon cameras. :param length: Length of the video to capture in seconds. :type length: int :return: Video file :rtype: :py:class:`File` ] with call[name[self].capture_video_context, parameter[]] begin[:] call[name[time].sleep, parameter[name[length]]] return[name[ctx].videofile]
keyword[def] identifier[capture_video] ( identifier[self] , identifier[length] ): literal[string] keyword[with] identifier[self] . identifier[capture_video_context] () keyword[as] identifier[ctx] : identifier[time] . identifier[sleep] ( identifier[length] ) keyword[return] identifier[ctx] . identifier[videofile]
def capture_video(self, length): """ Capture a video. This always writes to the memory card, since internal RAM is likely to run out of space very quickly. Currently this only works with Nikon cameras. :param length: Length of the video to capture in seconds. :type length: int :return: Video file :rtype: :py:class:`File` """ with self.capture_video_context() as ctx: time.sleep(length) # depends on [control=['with'], data=[]] return ctx.videofile
def show_history(self, status=None, nids=None, full_history=False, metadata=False): """ Print the history of the flow to stdout. Args: status: if not None, only the tasks with this status are select full_history: Print full info set, including nodes with an empty history. nids: optional list of node identifiers used to filter the tasks. metadata: print history metadata (experimental) """ nrows, ncols = get_terminal_size() works_done = [] # Loop on the tasks and show the history of the work is not in works_done for task in self.iflat_tasks(status=status, nids=nids): work = task.work if work not in works_done: works_done.append(work) if work.history or full_history: cprint(make_banner(str(work), width=ncols, mark="="), **work.status.color_opts) print(work.history.to_string(metadata=metadata)) if task.history or full_history: cprint(make_banner(str(task), width=ncols, mark="="), **task.status.color_opts) print(task.history.to_string(metadata=metadata)) # Print the history of the flow. if self.history or full_history: cprint(make_banner(str(self), width=ncols, mark="="), **self.status.color_opts) print(self.history.to_string(metadata=metadata))
def function[show_history, parameter[self, status, nids, full_history, metadata]]: constant[ Print the history of the flow to stdout. Args: status: if not None, only the tasks with this status are select full_history: Print full info set, including nodes with an empty history. nids: optional list of node identifiers used to filter the tasks. metadata: print history metadata (experimental) ] <ast.Tuple object at 0x7da2047eae60> assign[=] call[name[get_terminal_size], parameter[]] variable[works_done] assign[=] list[[]] for taget[name[task]] in starred[call[name[self].iflat_tasks, parameter[]]] begin[:] variable[work] assign[=] name[task].work if compare[name[work] <ast.NotIn object at 0x7da2590d7190> name[works_done]] begin[:] call[name[works_done].append, parameter[name[work]]] if <ast.BoolOp object at 0x7da18f00d8d0> begin[:] call[name[cprint], parameter[call[name[make_banner], parameter[call[name[str], parameter[name[work]]]]]]] call[name[print], parameter[call[name[work].history.to_string, parameter[]]]] if <ast.BoolOp object at 0x7da18f00fca0> begin[:] call[name[cprint], parameter[call[name[make_banner], parameter[call[name[str], parameter[name[task]]]]]]] call[name[print], parameter[call[name[task].history.to_string, parameter[]]]] if <ast.BoolOp object at 0x7da2047eb310> begin[:] call[name[cprint], parameter[call[name[make_banner], parameter[call[name[str], parameter[name[self]]]]]]] call[name[print], parameter[call[name[self].history.to_string, parameter[]]]]
keyword[def] identifier[show_history] ( identifier[self] , identifier[status] = keyword[None] , identifier[nids] = keyword[None] , identifier[full_history] = keyword[False] , identifier[metadata] = keyword[False] ): literal[string] identifier[nrows] , identifier[ncols] = identifier[get_terminal_size] () identifier[works_done] =[] keyword[for] identifier[task] keyword[in] identifier[self] . identifier[iflat_tasks] ( identifier[status] = identifier[status] , identifier[nids] = identifier[nids] ): identifier[work] = identifier[task] . identifier[work] keyword[if] identifier[work] keyword[not] keyword[in] identifier[works_done] : identifier[works_done] . identifier[append] ( identifier[work] ) keyword[if] identifier[work] . identifier[history] keyword[or] identifier[full_history] : identifier[cprint] ( identifier[make_banner] ( identifier[str] ( identifier[work] ), identifier[width] = identifier[ncols] , identifier[mark] = literal[string] ),** identifier[work] . identifier[status] . identifier[color_opts] ) identifier[print] ( identifier[work] . identifier[history] . identifier[to_string] ( identifier[metadata] = identifier[metadata] )) keyword[if] identifier[task] . identifier[history] keyword[or] identifier[full_history] : identifier[cprint] ( identifier[make_banner] ( identifier[str] ( identifier[task] ), identifier[width] = identifier[ncols] , identifier[mark] = literal[string] ),** identifier[task] . identifier[status] . identifier[color_opts] ) identifier[print] ( identifier[task] . identifier[history] . identifier[to_string] ( identifier[metadata] = identifier[metadata] )) keyword[if] identifier[self] . identifier[history] keyword[or] identifier[full_history] : identifier[cprint] ( identifier[make_banner] ( identifier[str] ( identifier[self] ), identifier[width] = identifier[ncols] , identifier[mark] = literal[string] ),** identifier[self] . identifier[status] . identifier[color_opts] ) identifier[print] ( identifier[self] . identifier[history] . identifier[to_string] ( identifier[metadata] = identifier[metadata] ))
def show_history(self, status=None, nids=None, full_history=False, metadata=False): """ Print the history of the flow to stdout. Args: status: if not None, only the tasks with this status are select full_history: Print full info set, including nodes with an empty history. nids: optional list of node identifiers used to filter the tasks. metadata: print history metadata (experimental) """ (nrows, ncols) = get_terminal_size() works_done = [] # Loop on the tasks and show the history of the work is not in works_done for task in self.iflat_tasks(status=status, nids=nids): work = task.work if work not in works_done: works_done.append(work) if work.history or full_history: cprint(make_banner(str(work), width=ncols, mark='='), **work.status.color_opts) print(work.history.to_string(metadata=metadata)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['work', 'works_done']] if task.history or full_history: cprint(make_banner(str(task), width=ncols, mark='='), **task.status.color_opts) print(task.history.to_string(metadata=metadata)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['task']] # Print the history of the flow. if self.history or full_history: cprint(make_banner(str(self), width=ncols, mark='='), **self.status.color_opts) print(self.history.to_string(metadata=metadata)) # depends on [control=['if'], data=[]]
def _parse_and_format(self): """ Parse and format the results returned by the ACS Zeropoint Calculator. Using ``beautifulsoup4``, find all the ``<tb> </tb>`` tags present in the response. Format the results into an astropy.table.QTable with corresponding units and assign it to the zpt_table attribute. """ soup = BeautifulSoup(self._response.read(), 'html.parser') # Grab all elements in the table returned by the ZPT calc. td = soup.find_all('td') # Remove the units attached to PHOTFLAM and PHOTPLAM column names. td = [val.text.split(' ')[0] for val in td] # Turn the single list into a 2-D numpy array data = np.reshape(td, (int(len(td) / self._block_size), self._block_size)) # Create the QTable, note that sometimes self._response will be empty # even though the return was successful; hence the try/except to catch # any potential index errors. Provide the user with a message and # set the zpt_table to None. try: tab = QTable(data[1:, :], names=data[0], dtype=[str, float, float, float, float, float]) except IndexError as e: msg = ('{}\n{}\n There was an issue parsing the request. ' 'Try resubmitting the query. If this issue persists, please ' 'submit a ticket to the Help Desk at' 'https://stsci.service-now.com/hst' .format(e, self._msg_div)) LOG.info(msg) self._zpt_table = None else: # If and only if no exception was raised, attach the units to each # column of the QTable. Note we skip the FILTER column because # Quantity objects in astropy must be numerical (i.e. not str) for col in tab.colnames: if col.lower() == 'filter': continue tab[col].unit = self._data_units[col] self._zpt_table = tab
def function[_parse_and_format, parameter[self]]: constant[ Parse and format the results returned by the ACS Zeropoint Calculator. Using ``beautifulsoup4``, find all the ``<tb> </tb>`` tags present in the response. Format the results into an astropy.table.QTable with corresponding units and assign it to the zpt_table attribute. ] variable[soup] assign[=] call[name[BeautifulSoup], parameter[call[name[self]._response.read, parameter[]], constant[html.parser]]] variable[td] assign[=] call[name[soup].find_all, parameter[constant[td]]] variable[td] assign[=] <ast.ListComp object at 0x7da1b10c6020> variable[data] assign[=] call[name[np].reshape, parameter[name[td], tuple[[<ast.Call object at 0x7da1b10c6e00>, <ast.Attribute object at 0x7da1b10c4430>]]]] <ast.Try object at 0x7da1b10c7a30>
keyword[def] identifier[_parse_and_format] ( identifier[self] ): literal[string] identifier[soup] = identifier[BeautifulSoup] ( identifier[self] . identifier[_response] . identifier[read] (), literal[string] ) identifier[td] = identifier[soup] . identifier[find_all] ( literal[string] ) identifier[td] =[ identifier[val] . identifier[text] . identifier[split] ( literal[string] )[ literal[int] ] keyword[for] identifier[val] keyword[in] identifier[td] ] identifier[data] = identifier[np] . identifier[reshape] ( identifier[td] , ( identifier[int] ( identifier[len] ( identifier[td] )/ identifier[self] . identifier[_block_size] ), identifier[self] . identifier[_block_size] )) keyword[try] : identifier[tab] = identifier[QTable] ( identifier[data] [ literal[int] :,:], identifier[names] = identifier[data] [ literal[int] ], identifier[dtype] =[ identifier[str] , identifier[float] , identifier[float] , identifier[float] , identifier[float] , identifier[float] ]) keyword[except] identifier[IndexError] keyword[as] identifier[e] : identifier[msg] =( literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[e] , identifier[self] . identifier[_msg_div] )) identifier[LOG] . identifier[info] ( identifier[msg] ) identifier[self] . identifier[_zpt_table] = keyword[None] keyword[else] : keyword[for] identifier[col] keyword[in] identifier[tab] . identifier[colnames] : keyword[if] identifier[col] . identifier[lower] ()== literal[string] : keyword[continue] identifier[tab] [ identifier[col] ]. identifier[unit] = identifier[self] . identifier[_data_units] [ identifier[col] ] identifier[self] . identifier[_zpt_table] = identifier[tab]
def _parse_and_format(self): """ Parse and format the results returned by the ACS Zeropoint Calculator. Using ``beautifulsoup4``, find all the ``<tb> </tb>`` tags present in the response. Format the results into an astropy.table.QTable with corresponding units and assign it to the zpt_table attribute. """ soup = BeautifulSoup(self._response.read(), 'html.parser') # Grab all elements in the table returned by the ZPT calc. td = soup.find_all('td') # Remove the units attached to PHOTFLAM and PHOTPLAM column names. td = [val.text.split(' ')[0] for val in td] # Turn the single list into a 2-D numpy array data = np.reshape(td, (int(len(td) / self._block_size), self._block_size)) # Create the QTable, note that sometimes self._response will be empty # even though the return was successful; hence the try/except to catch # any potential index errors. Provide the user with a message and # set the zpt_table to None. try: tab = QTable(data[1:, :], names=data[0], dtype=[str, float, float, float, float, float]) # depends on [control=['try'], data=[]] except IndexError as e: msg = '{}\n{}\n There was an issue parsing the request. Try resubmitting the query. If this issue persists, please submit a ticket to the Help Desk athttps://stsci.service-now.com/hst'.format(e, self._msg_div) LOG.info(msg) self._zpt_table = None # depends on [control=['except'], data=['e']] else: # If and only if no exception was raised, attach the units to each # column of the QTable. Note we skip the FILTER column because # Quantity objects in astropy must be numerical (i.e. not str) for col in tab.colnames: if col.lower() == 'filter': continue # depends on [control=['if'], data=[]] tab[col].unit = self._data_units[col] # depends on [control=['for'], data=['col']] self._zpt_table = tab
def _encode_timestamp(name, value, dummy0, dummy1): """Encode bson.timestamp.Timestamp.""" return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time)
def function[_encode_timestamp, parameter[name, value, dummy0, dummy1]]: constant[Encode bson.timestamp.Timestamp.] return[binary_operation[binary_operation[constant[b'\x11'] + name[name]] + call[name[_PACK_TIMESTAMP], parameter[name[value].inc, name[value].time]]]]
keyword[def] identifier[_encode_timestamp] ( identifier[name] , identifier[value] , identifier[dummy0] , identifier[dummy1] ): literal[string] keyword[return] literal[string] + identifier[name] + identifier[_PACK_TIMESTAMP] ( identifier[value] . identifier[inc] , identifier[value] . identifier[time] )
def _encode_timestamp(name, value, dummy0, dummy1): """Encode bson.timestamp.Timestamp.""" return b'\x11' + name + _PACK_TIMESTAMP(value.inc, value.time)
def GRUCell(units): """Builds a traditional GRU cell with dense internal transformations. Gated Recurrent Unit paper: https://arxiv.org/abs/1412.3555 Args: units: Number of hidden units. Returns: A Stax model representing a traditional GRU RNN cell. """ return GeneralGRUCell( candidate_transform=lambda: core.Dense(units=units), memory_transform=combinators.Identity, gate_nonlinearity=core.Sigmoid, candidate_nonlinearity=core.Tanh)
def function[GRUCell, parameter[units]]: constant[Builds a traditional GRU cell with dense internal transformations. Gated Recurrent Unit paper: https://arxiv.org/abs/1412.3555 Args: units: Number of hidden units. Returns: A Stax model representing a traditional GRU RNN cell. ] return[call[name[GeneralGRUCell], parameter[]]]
keyword[def] identifier[GRUCell] ( identifier[units] ): literal[string] keyword[return] identifier[GeneralGRUCell] ( identifier[candidate_transform] = keyword[lambda] : identifier[core] . identifier[Dense] ( identifier[units] = identifier[units] ), identifier[memory_transform] = identifier[combinators] . identifier[Identity] , identifier[gate_nonlinearity] = identifier[core] . identifier[Sigmoid] , identifier[candidate_nonlinearity] = identifier[core] . identifier[Tanh] )
def GRUCell(units): """Builds a traditional GRU cell with dense internal transformations. Gated Recurrent Unit paper: https://arxiv.org/abs/1412.3555 Args: units: Number of hidden units. Returns: A Stax model representing a traditional GRU RNN cell. """ return GeneralGRUCell(candidate_transform=lambda : core.Dense(units=units), memory_transform=combinators.Identity, gate_nonlinearity=core.Sigmoid, candidate_nonlinearity=core.Tanh)
def rename_multireddit(self, current_name, new_name, *args, **kwargs): """Rename a Multireddit. :param current_name: The name of the multireddit to rename :param new_name: The new name to assign to this multireddit The additional parameters are passed directly into :meth:`~praw.__init__.BaseReddit.request_json` """ current_path = self.MULTI_PATH.format(self.user.name, current_name) new_path = self.MULTI_PATH.format(self.user.name, new_name) data = {'from': current_path, 'to': new_path} return self.request_json(self.config['multireddit_rename'], data=data, *args, **kwargs)
def function[rename_multireddit, parameter[self, current_name, new_name]]: constant[Rename a Multireddit. :param current_name: The name of the multireddit to rename :param new_name: The new name to assign to this multireddit The additional parameters are passed directly into :meth:`~praw.__init__.BaseReddit.request_json` ] variable[current_path] assign[=] call[name[self].MULTI_PATH.format, parameter[name[self].user.name, name[current_name]]] variable[new_path] assign[=] call[name[self].MULTI_PATH.format, parameter[name[self].user.name, name[new_name]]] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b23472b0>, <ast.Constant object at 0x7da1b2347bb0>], [<ast.Name object at 0x7da1b2344f10>, <ast.Name object at 0x7da1b2344190>]] return[call[name[self].request_json, parameter[call[name[self].config][constant[multireddit_rename]], <ast.Starred object at 0x7da1b2344640>]]]
keyword[def] identifier[rename_multireddit] ( identifier[self] , identifier[current_name] , identifier[new_name] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[current_path] = identifier[self] . identifier[MULTI_PATH] . identifier[format] ( identifier[self] . identifier[user] . identifier[name] , identifier[current_name] ) identifier[new_path] = identifier[self] . identifier[MULTI_PATH] . identifier[format] ( identifier[self] . identifier[user] . identifier[name] , identifier[new_name] ) identifier[data] ={ literal[string] : identifier[current_path] , literal[string] : identifier[new_path] } keyword[return] identifier[self] . identifier[request_json] ( identifier[self] . identifier[config] [ literal[string] ], identifier[data] = identifier[data] , * identifier[args] ,** identifier[kwargs] )
def rename_multireddit(self, current_name, new_name, *args, **kwargs): """Rename a Multireddit. :param current_name: The name of the multireddit to rename :param new_name: The new name to assign to this multireddit The additional parameters are passed directly into :meth:`~praw.__init__.BaseReddit.request_json` """ current_path = self.MULTI_PATH.format(self.user.name, current_name) new_path = self.MULTI_PATH.format(self.user.name, new_name) data = {'from': current_path, 'to': new_path} return self.request_json(self.config['multireddit_rename'], *args, data=data, **kwargs)
def get_healthcheck(self, service_id, version_number, name): """Get the healthcheck for a particular service and version.""" content = self._fetch("/service/%s/version/%d/healthcheck/%s" % (service_id, version_number, name)) return FastlyHealthCheck(self, content)
def function[get_healthcheck, parameter[self, service_id, version_number, name]]: constant[Get the healthcheck for a particular service and version.] variable[content] assign[=] call[name[self]._fetch, parameter[binary_operation[constant[/service/%s/version/%d/healthcheck/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b11390f0>, <ast.Name object at 0x7da1b113b220>, <ast.Name object at 0x7da1b113ad10>]]]]] return[call[name[FastlyHealthCheck], parameter[name[self], name[content]]]]
keyword[def] identifier[get_healthcheck] ( identifier[self] , identifier[service_id] , identifier[version_number] , identifier[name] ): literal[string] identifier[content] = identifier[self] . identifier[_fetch] ( literal[string] %( identifier[service_id] , identifier[version_number] , identifier[name] )) keyword[return] identifier[FastlyHealthCheck] ( identifier[self] , identifier[content] )
def get_healthcheck(self, service_id, version_number, name): """Get the healthcheck for a particular service and version.""" content = self._fetch('/service/%s/version/%d/healthcheck/%s' % (service_id, version_number, name)) return FastlyHealthCheck(self, content)
def fetch(dataset="mi", datadir=datadir): """Fetch example dataset. If the requested dataset is not found in the location specified by `datadir`, the function attempts to download it. Parameters ---------- dataset : str Which dataset to load. Currently only 'mi' is supported. datadir : str Path to the storage location of example datasets. Datasets are downloaded to this location if they cannot be found. If the directory does not exist it is created. Returns ------- data : list of dicts The data set is stored in a list, where each list element corresponds to data from one subject. Each list element is a dictionary with the following keys: "eeg" ... EEG signals "triggers" ... Trigger latencies "labels" ... Class labels "fs" ... Sample rate "locations" ... Channel locations """ if dataset not in datasets: raise ValueError("Example data '{}' not available.".format(dataset)) else: files = datasets[dataset]["files"] url = datasets[dataset]["url"] md5 = datasets[dataset]["md5"] if not isdir(datadir): makedirs(datadir) data = [] for n, filename in enumerate(files): fullfile = join(datadir, filename) if not isfile(fullfile): with open(fullfile, "wb") as f: response = get(join(url, filename)) f.write(response.content) with open(fullfile, "rb") as f: # check if MD5 of downloaded file matches original hash hash = hashlib.md5(f.read()).hexdigest() if hash != md5[n]: raise MD5MismatchError("MD5 hash of {} does not match {}.".format(fullfile, md5[n])) data.append(convert(dataset, loadmat(fullfile))) return data
def function[fetch, parameter[dataset, datadir]]: constant[Fetch example dataset. If the requested dataset is not found in the location specified by `datadir`, the function attempts to download it. Parameters ---------- dataset : str Which dataset to load. Currently only 'mi' is supported. datadir : str Path to the storage location of example datasets. Datasets are downloaded to this location if they cannot be found. If the directory does not exist it is created. Returns ------- data : list of dicts The data set is stored in a list, where each list element corresponds to data from one subject. Each list element is a dictionary with the following keys: "eeg" ... EEG signals "triggers" ... Trigger latencies "labels" ... Class labels "fs" ... Sample rate "locations" ... Channel locations ] if compare[name[dataset] <ast.NotIn object at 0x7da2590d7190> name[datasets]] begin[:] <ast.Raise object at 0x7da1b26caa70> if <ast.UnaryOp object at 0x7da1b26c9e10> begin[:] call[name[makedirs], parameter[name[datadir]]] variable[data] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b26c8970>, <ast.Name object at 0x7da1b26cb310>]]] in starred[call[name[enumerate], parameter[name[files]]]] begin[:] variable[fullfile] assign[=] call[name[join], parameter[name[datadir], name[filename]]] if <ast.UnaryOp object at 0x7da1b26c9120> begin[:] with call[name[open], parameter[name[fullfile], constant[wb]]] begin[:] variable[response] assign[=] call[name[get], parameter[call[name[join], parameter[name[url], name[filename]]]]] call[name[f].write, parameter[name[response].content]] with call[name[open], parameter[name[fullfile], constant[rb]]] begin[:] variable[hash] assign[=] call[call[name[hashlib].md5, parameter[call[name[f].read, parameter[]]]].hexdigest, parameter[]] if compare[name[hash] not_equal[!=] call[name[md5]][name[n]]] begin[:] <ast.Raise object at 0x7da1b265fe20> call[name[data].append, parameter[call[name[convert], parameter[name[dataset], call[name[loadmat], parameter[name[fullfile]]]]]]] return[name[data]]
keyword[def] identifier[fetch] ( identifier[dataset] = literal[string] , identifier[datadir] = identifier[datadir] ): literal[string] keyword[if] identifier[dataset] keyword[not] keyword[in] identifier[datasets] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[dataset] )) keyword[else] : identifier[files] = identifier[datasets] [ identifier[dataset] ][ literal[string] ] identifier[url] = identifier[datasets] [ identifier[dataset] ][ literal[string] ] identifier[md5] = identifier[datasets] [ identifier[dataset] ][ literal[string] ] keyword[if] keyword[not] identifier[isdir] ( identifier[datadir] ): identifier[makedirs] ( identifier[datadir] ) identifier[data] =[] keyword[for] identifier[n] , identifier[filename] keyword[in] identifier[enumerate] ( identifier[files] ): identifier[fullfile] = identifier[join] ( identifier[datadir] , identifier[filename] ) keyword[if] keyword[not] identifier[isfile] ( identifier[fullfile] ): keyword[with] identifier[open] ( identifier[fullfile] , literal[string] ) keyword[as] identifier[f] : identifier[response] = identifier[get] ( identifier[join] ( identifier[url] , identifier[filename] )) identifier[f] . identifier[write] ( identifier[response] . identifier[content] ) keyword[with] identifier[open] ( identifier[fullfile] , literal[string] ) keyword[as] identifier[f] : identifier[hash] = identifier[hashlib] . identifier[md5] ( identifier[f] . identifier[read] ()). identifier[hexdigest] () keyword[if] identifier[hash] != identifier[md5] [ identifier[n] ]: keyword[raise] identifier[MD5MismatchError] ( literal[string] . identifier[format] ( identifier[fullfile] , identifier[md5] [ identifier[n] ])) identifier[data] . identifier[append] ( identifier[convert] ( identifier[dataset] , identifier[loadmat] ( identifier[fullfile] ))) keyword[return] identifier[data]
def fetch(dataset='mi', datadir=datadir): """Fetch example dataset. If the requested dataset is not found in the location specified by `datadir`, the function attempts to download it. Parameters ---------- dataset : str Which dataset to load. Currently only 'mi' is supported. datadir : str Path to the storage location of example datasets. Datasets are downloaded to this location if they cannot be found. If the directory does not exist it is created. Returns ------- data : list of dicts The data set is stored in a list, where each list element corresponds to data from one subject. Each list element is a dictionary with the following keys: "eeg" ... EEG signals "triggers" ... Trigger latencies "labels" ... Class labels "fs" ... Sample rate "locations" ... Channel locations """ if dataset not in datasets: raise ValueError("Example data '{}' not available.".format(dataset)) # depends on [control=['if'], data=['dataset']] else: files = datasets[dataset]['files'] url = datasets[dataset]['url'] md5 = datasets[dataset]['md5'] if not isdir(datadir): makedirs(datadir) # depends on [control=['if'], data=[]] data = [] for (n, filename) in enumerate(files): fullfile = join(datadir, filename) if not isfile(fullfile): with open(fullfile, 'wb') as f: response = get(join(url, filename)) f.write(response.content) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] with open(fullfile, 'rb') as f: # check if MD5 of downloaded file matches original hash hash = hashlib.md5(f.read()).hexdigest() # depends on [control=['with'], data=['f']] if hash != md5[n]: raise MD5MismatchError('MD5 hash of {} does not match {}.'.format(fullfile, md5[n])) # depends on [control=['if'], data=[]] data.append(convert(dataset, loadmat(fullfile))) # depends on [control=['for'], data=[]] return data
def _found_barcode(self, record, sample, barcode=None): """Hook called when barcode is found""" assert record.id == self.current_record['sequence_name'] self.current_record['sample'] = sample
def function[_found_barcode, parameter[self, record, sample, barcode]]: constant[Hook called when barcode is found] assert[compare[name[record].id equal[==] call[name[self].current_record][constant[sequence_name]]]] call[name[self].current_record][constant[sample]] assign[=] name[sample]
keyword[def] identifier[_found_barcode] ( identifier[self] , identifier[record] , identifier[sample] , identifier[barcode] = keyword[None] ): literal[string] keyword[assert] identifier[record] . identifier[id] == identifier[self] . identifier[current_record] [ literal[string] ] identifier[self] . identifier[current_record] [ literal[string] ]= identifier[sample]
def _found_barcode(self, record, sample, barcode=None): """Hook called when barcode is found""" assert record.id == self.current_record['sequence_name'] self.current_record['sample'] = sample
def search(self, origin, backend_name, category, archived_after): """Search archives. Get the archives which store data based on the given parameters. These parameters define which the origin was (`origin`), how data was fetched (`backend_name`) and data type ('category'). Only those archives created on or after `archived_after` will be returned. The method returns a list with the file paths to those archives. The list is sorted by the date of creation of each archive. :param origin: data origin :param backend_name: backed used to fetch data :param category: type of the items fetched by the backend :param archived_after: get archives created on or after this date :returns: a list with archive names which match the search criteria """ archives = self._search_archives(origin, backend_name, category, archived_after) archives = [(fp, date) for fp, date in archives] archives = [fp for fp, _ in sorted(archives, key=lambda x: x[1])] return archives
def function[search, parameter[self, origin, backend_name, category, archived_after]]: constant[Search archives. Get the archives which store data based on the given parameters. These parameters define which the origin was (`origin`), how data was fetched (`backend_name`) and data type ('category'). Only those archives created on or after `archived_after` will be returned. The method returns a list with the file paths to those archives. The list is sorted by the date of creation of each archive. :param origin: data origin :param backend_name: backed used to fetch data :param category: type of the items fetched by the backend :param archived_after: get archives created on or after this date :returns: a list with archive names which match the search criteria ] variable[archives] assign[=] call[name[self]._search_archives, parameter[name[origin], name[backend_name], name[category], name[archived_after]]] variable[archives] assign[=] <ast.ListComp object at 0x7da1b020e920> variable[archives] assign[=] <ast.ListComp object at 0x7da1b020f0a0> return[name[archives]]
keyword[def] identifier[search] ( identifier[self] , identifier[origin] , identifier[backend_name] , identifier[category] , identifier[archived_after] ): literal[string] identifier[archives] = identifier[self] . identifier[_search_archives] ( identifier[origin] , identifier[backend_name] , identifier[category] , identifier[archived_after] ) identifier[archives] =[( identifier[fp] , identifier[date] ) keyword[for] identifier[fp] , identifier[date] keyword[in] identifier[archives] ] identifier[archives] =[ identifier[fp] keyword[for] identifier[fp] , identifier[_] keyword[in] identifier[sorted] ( identifier[archives] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ])] keyword[return] identifier[archives]
def search(self, origin, backend_name, category, archived_after): """Search archives. Get the archives which store data based on the given parameters. These parameters define which the origin was (`origin`), how data was fetched (`backend_name`) and data type ('category'). Only those archives created on or after `archived_after` will be returned. The method returns a list with the file paths to those archives. The list is sorted by the date of creation of each archive. :param origin: data origin :param backend_name: backed used to fetch data :param category: type of the items fetched by the backend :param archived_after: get archives created on or after this date :returns: a list with archive names which match the search criteria """ archives = self._search_archives(origin, backend_name, category, archived_after) archives = [(fp, date) for (fp, date) in archives] archives = [fp for (fp, _) in sorted(archives, key=lambda x: x[1])] return archives
def ovsdb_server_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ovsdb_server = ET.SubElement(config, "ovsdb-server", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(ovsdb_server, "name") name_key.text = kwargs.pop('name') port = ET.SubElement(ovsdb_server, "port") port.text = kwargs.pop('port') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[ovsdb_server_port, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[ovsdb_server] assign[=] call[name[ET].SubElement, parameter[name[config], constant[ovsdb-server]]] variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[ovsdb_server], constant[name]]] name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]] variable[port] assign[=] call[name[ET].SubElement, parameter[name[ovsdb_server], constant[port]]] name[port].text assign[=] call[name[kwargs].pop, parameter[constant[port]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[ovsdb_server_port] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[ovsdb_server] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[ovsdb_server] , literal[string] ) identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[port] = identifier[ET] . identifier[SubElement] ( identifier[ovsdb_server] , literal[string] ) identifier[port] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def ovsdb_server_port(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') ovsdb_server = ET.SubElement(config, 'ovsdb-server', xmlns='urn:brocade.com:mgmt:brocade-tunnels') name_key = ET.SubElement(ovsdb_server, 'name') name_key.text = kwargs.pop('name') port = ET.SubElement(ovsdb_server, 'port') port.text = kwargs.pop('port') callback = kwargs.pop('callback', self._callback) return callback(config)
def get_method_list(): ''' Include manual methods by default ''' methods = [str(_('Cash')),str(_('Check')),str(_('Bank/Debit Card')),str(_('Other'))] methods += ExpenseItem.objects.order_by().values_list('paymentMethod',flat=True).distinct() methods += RevenueItem.objects.order_by().values_list('paymentMethod',flat=True).distinct() methods_list = list(set(methods)) if None in methods_list: methods_list.remove(None) return methods_list
def function[get_method_list, parameter[]]: constant[ Include manual methods by default ] variable[methods] assign[=] list[[<ast.Call object at 0x7da1b159b9d0>, <ast.Call object at 0x7da1b159a410>, <ast.Call object at 0x7da1b159bdf0>, <ast.Call object at 0x7da1b1599810>]] <ast.AugAssign object at 0x7da1b159a6b0> <ast.AugAssign object at 0x7da1b15997b0> variable[methods_list] assign[=] call[name[list], parameter[call[name[set], parameter[name[methods]]]]] if compare[constant[None] in name[methods_list]] begin[:] call[name[methods_list].remove, parameter[constant[None]]] return[name[methods_list]]
keyword[def] identifier[get_method_list] (): literal[string] identifier[methods] =[ identifier[str] ( identifier[_] ( literal[string] )), identifier[str] ( identifier[_] ( literal[string] )), identifier[str] ( identifier[_] ( literal[string] )), identifier[str] ( identifier[_] ( literal[string] ))] identifier[methods] += identifier[ExpenseItem] . identifier[objects] . identifier[order_by] (). identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] ). identifier[distinct] () identifier[methods] += identifier[RevenueItem] . identifier[objects] . identifier[order_by] (). identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] ). identifier[distinct] () identifier[methods_list] = identifier[list] ( identifier[set] ( identifier[methods] )) keyword[if] keyword[None] keyword[in] identifier[methods_list] : identifier[methods_list] . identifier[remove] ( keyword[None] ) keyword[return] identifier[methods_list]
def get_method_list(): """ Include manual methods by default """ methods = [str(_('Cash')), str(_('Check')), str(_('Bank/Debit Card')), str(_('Other'))] methods += ExpenseItem.objects.order_by().values_list('paymentMethod', flat=True).distinct() methods += RevenueItem.objects.order_by().values_list('paymentMethod', flat=True).distinct() methods_list = list(set(methods)) if None in methods_list: methods_list.remove(None) # depends on [control=['if'], data=['methods_list']] return methods_list
def post(self, *args, **kwargs): """Create the SSH file & then return the normal get method...""" existing_ssh = models.SSHConfig.objects.all() if existing_ssh.exists(): return self.get_view() remote_user = self.request.POST.get('remote_user', 'root') create_ssh_config(remote_user=remote_user) return self.get_view()
def function[post, parameter[self]]: constant[Create the SSH file & then return the normal get method...] variable[existing_ssh] assign[=] call[name[models].SSHConfig.objects.all, parameter[]] if call[name[existing_ssh].exists, parameter[]] begin[:] return[call[name[self].get_view, parameter[]]] variable[remote_user] assign[=] call[name[self].request.POST.get, parameter[constant[remote_user], constant[root]]] call[name[create_ssh_config], parameter[]] return[call[name[self].get_view, parameter[]]]
keyword[def] identifier[post] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[existing_ssh] = identifier[models] . identifier[SSHConfig] . identifier[objects] . identifier[all] () keyword[if] identifier[existing_ssh] . identifier[exists] (): keyword[return] identifier[self] . identifier[get_view] () identifier[remote_user] = identifier[self] . identifier[request] . identifier[POST] . identifier[get] ( literal[string] , literal[string] ) identifier[create_ssh_config] ( identifier[remote_user] = identifier[remote_user] ) keyword[return] identifier[self] . identifier[get_view] ()
def post(self, *args, **kwargs): """Create the SSH file & then return the normal get method...""" existing_ssh = models.SSHConfig.objects.all() if existing_ssh.exists(): return self.get_view() # depends on [control=['if'], data=[]] remote_user = self.request.POST.get('remote_user', 'root') create_ssh_config(remote_user=remote_user) return self.get_view()
def get_node(self,obj,cls=type(None),hist={}): """ Returns the node corresponding to obj. If does not already exist then it will create it. """ #~ ident = getattr(obj,'ident',obj) if obj in self.modules and is_module(obj,cls): return self.modules[obj] elif obj in self.submodules and is_submodule(obj,cls): return self.submodules[obj] elif obj in self.types and is_type(obj,cls): return self.types[obj] elif obj in self.procedures and is_proc(obj,cls): return self.procedures[obj] elif obj in self.programs and is_program(obj,cls): return self.programs[obj] elif obj in self.sourcefiles and is_sourcefile(obj,cls): return self.sourcefiles[obj] elif obj in self.blockdata and is_blockdata(obj,cls): return self.blockdata[obj] else: self.register(obj,cls,hist) return self.get_node(obj,cls,hist)
def function[get_node, parameter[self, obj, cls, hist]]: constant[ Returns the node corresponding to obj. If does not already exist then it will create it. ] if <ast.BoolOp object at 0x7da18f58f580> begin[:] return[call[name[self].modules][name[obj]]]
keyword[def] identifier[get_node] ( identifier[self] , identifier[obj] , identifier[cls] = identifier[type] ( keyword[None] ), identifier[hist] ={}): literal[string] keyword[if] identifier[obj] keyword[in] identifier[self] . identifier[modules] keyword[and] identifier[is_module] ( identifier[obj] , identifier[cls] ): keyword[return] identifier[self] . identifier[modules] [ identifier[obj] ] keyword[elif] identifier[obj] keyword[in] identifier[self] . identifier[submodules] keyword[and] identifier[is_submodule] ( identifier[obj] , identifier[cls] ): keyword[return] identifier[self] . identifier[submodules] [ identifier[obj] ] keyword[elif] identifier[obj] keyword[in] identifier[self] . identifier[types] keyword[and] identifier[is_type] ( identifier[obj] , identifier[cls] ): keyword[return] identifier[self] . identifier[types] [ identifier[obj] ] keyword[elif] identifier[obj] keyword[in] identifier[self] . identifier[procedures] keyword[and] identifier[is_proc] ( identifier[obj] , identifier[cls] ): keyword[return] identifier[self] . identifier[procedures] [ identifier[obj] ] keyword[elif] identifier[obj] keyword[in] identifier[self] . identifier[programs] keyword[and] identifier[is_program] ( identifier[obj] , identifier[cls] ): keyword[return] identifier[self] . identifier[programs] [ identifier[obj] ] keyword[elif] identifier[obj] keyword[in] identifier[self] . identifier[sourcefiles] keyword[and] identifier[is_sourcefile] ( identifier[obj] , identifier[cls] ): keyword[return] identifier[self] . identifier[sourcefiles] [ identifier[obj] ] keyword[elif] identifier[obj] keyword[in] identifier[self] . identifier[blockdata] keyword[and] identifier[is_blockdata] ( identifier[obj] , identifier[cls] ): keyword[return] identifier[self] . identifier[blockdata] [ identifier[obj] ] keyword[else] : identifier[self] . identifier[register] ( identifier[obj] , identifier[cls] , identifier[hist] ) keyword[return] identifier[self] . identifier[get_node] ( identifier[obj] , identifier[cls] , identifier[hist] )
def get_node(self, obj, cls=type(None), hist={}): """ Returns the node corresponding to obj. If does not already exist then it will create it. """ #~ ident = getattr(obj,'ident',obj) if obj in self.modules and is_module(obj, cls): return self.modules[obj] # depends on [control=['if'], data=[]] elif obj in self.submodules and is_submodule(obj, cls): return self.submodules[obj] # depends on [control=['if'], data=[]] elif obj in self.types and is_type(obj, cls): return self.types[obj] # depends on [control=['if'], data=[]] elif obj in self.procedures and is_proc(obj, cls): return self.procedures[obj] # depends on [control=['if'], data=[]] elif obj in self.programs and is_program(obj, cls): return self.programs[obj] # depends on [control=['if'], data=[]] elif obj in self.sourcefiles and is_sourcefile(obj, cls): return self.sourcefiles[obj] # depends on [control=['if'], data=[]] elif obj in self.blockdata and is_blockdata(obj, cls): return self.blockdata[obj] # depends on [control=['if'], data=[]] else: self.register(obj, cls, hist) return self.get_node(obj, cls, hist)
def dq_argument(self) -> str: """Parse double-quoted argument. Raises: EndOfInput: If past the end of input. """ def escape(): self._escape = True return 1 self._escape = False # any escaped chars? self.offset += 1 start = self.offset self.dfa([ { # state 0: argument "": lambda: 0, '"': lambda: -1, "\\": escape }, { # state 1: after escape "": lambda: 0 }]) self._arg += (self.unescape(self.input[start:self.offset]) if self._escape else self.input[start:self.offset]) self.offset += 1
def function[dq_argument, parameter[self]]: constant[Parse double-quoted argument. Raises: EndOfInput: If past the end of input. ] def function[escape, parameter[]]: name[self]._escape assign[=] constant[True] return[constant[1]] name[self]._escape assign[=] constant[False] <ast.AugAssign object at 0x7da1b0490070> variable[start] assign[=] name[self].offset call[name[self].dfa, parameter[list[[<ast.Dict object at 0x7da1b0492050>, <ast.Dict object at 0x7da1b04903a0>]]]] <ast.AugAssign object at 0x7da1b0492ec0> <ast.AugAssign object at 0x7da1b0490610>
keyword[def] identifier[dq_argument] ( identifier[self] )-> identifier[str] : literal[string] keyword[def] identifier[escape] (): identifier[self] . identifier[_escape] = keyword[True] keyword[return] literal[int] identifier[self] . identifier[_escape] = keyword[False] identifier[self] . identifier[offset] += literal[int] identifier[start] = identifier[self] . identifier[offset] identifier[self] . identifier[dfa] ([ { literal[string] : keyword[lambda] : literal[int] , literal[string] : keyword[lambda] :- literal[int] , literal[string] : identifier[escape] }, { literal[string] : keyword[lambda] : literal[int] }]) identifier[self] . identifier[_arg] +=( identifier[self] . identifier[unescape] ( identifier[self] . identifier[input] [ identifier[start] : identifier[self] . identifier[offset] ]) keyword[if] identifier[self] . identifier[_escape] keyword[else] identifier[self] . identifier[input] [ identifier[start] : identifier[self] . identifier[offset] ]) identifier[self] . identifier[offset] += literal[int]
def dq_argument(self) -> str: """Parse double-quoted argument. Raises: EndOfInput: If past the end of input. """ def escape(): self._escape = True return 1 self._escape = False # any escaped chars? self.offset += 1 start = self.offset # state 0: argument # state 1: after escape self.dfa([{'': lambda : 0, '"': lambda : -1, '\\': escape}, {'': lambda : 0}]) self._arg += self.unescape(self.input[start:self.offset]) if self._escape else self.input[start:self.offset] self.offset += 1
def crunch_dir(name, n=50): """Puts "..." in the middle of a directory name if lengh > n.""" if len(name) > n + 3: name = "..." + name[-n:] return name
def function[crunch_dir, parameter[name, n]]: constant[Puts "..." in the middle of a directory name if lengh > n.] if compare[call[name[len], parameter[name[name]]] greater[>] binary_operation[name[n] + constant[3]]] begin[:] variable[name] assign[=] binary_operation[constant[...] + call[name[name]][<ast.Slice object at 0x7da18f721e10>]] return[name[name]]
keyword[def] identifier[crunch_dir] ( identifier[name] , identifier[n] = literal[int] ): literal[string] keyword[if] identifier[len] ( identifier[name] )> identifier[n] + literal[int] : identifier[name] = literal[string] + identifier[name] [- identifier[n] :] keyword[return] identifier[name]
def crunch_dir(name, n=50): """Puts "..." in the middle of a directory name if lengh > n.""" if len(name) > n + 3: name = '...' + name[-n:] # depends on [control=['if'], data=[]] return name
def time(self, t=None): """Set/get actor's absolute time.""" if t is None: return self._time self._time = t return self
def function[time, parameter[self, t]]: constant[Set/get actor's absolute time.] if compare[name[t] is constant[None]] begin[:] return[name[self]._time] name[self]._time assign[=] name[t] return[name[self]]
keyword[def] identifier[time] ( identifier[self] , identifier[t] = keyword[None] ): literal[string] keyword[if] identifier[t] keyword[is] keyword[None] : keyword[return] identifier[self] . identifier[_time] identifier[self] . identifier[_time] = identifier[t] keyword[return] identifier[self]
def time(self, t=None): """Set/get actor's absolute time.""" if t is None: return self._time # depends on [control=['if'], data=[]] self._time = t return self
def get_proficiencies(self): """Gets all ``Proficiencies``. return: (osid.learning.ProficiencyList) - a list of ``Proficiencies`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('learning', collection='Proficiency', runtime=self._runtime) result = collection.find(self._view_filter()).sort('_id', DESCENDING) return objects.ProficiencyList(result, runtime=self._runtime, proxy=self._proxy)
def function[get_proficiencies, parameter[self]]: constant[Gets all ``Proficiencies``. return: (osid.learning.ProficiencyList) - a list of ``Proficiencies`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* ] variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[learning]]] variable[result] assign[=] call[call[name[collection].find, parameter[call[name[self]._view_filter, parameter[]]]].sort, parameter[constant[_id], name[DESCENDING]]] return[call[name[objects].ProficiencyList, parameter[name[result]]]]
keyword[def] identifier[get_proficiencies] ( identifier[self] ): literal[string] identifier[collection] = identifier[JSONClientValidated] ( literal[string] , identifier[collection] = literal[string] , identifier[runtime] = identifier[self] . identifier[_runtime] ) identifier[result] = identifier[collection] . identifier[find] ( identifier[self] . identifier[_view_filter] ()). identifier[sort] ( literal[string] , identifier[DESCENDING] ) keyword[return] identifier[objects] . identifier[ProficiencyList] ( identifier[result] , identifier[runtime] = identifier[self] . identifier[_runtime] , identifier[proxy] = identifier[self] . identifier[_proxy] )
def get_proficiencies(self): """Gets all ``Proficiencies``. return: (osid.learning.ProficiencyList) - a list of ``Proficiencies`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('learning', collection='Proficiency', runtime=self._runtime) result = collection.find(self._view_filter()).sort('_id', DESCENDING) return objects.ProficiencyList(result, runtime=self._runtime, proxy=self._proxy)
def supports_file(cls, meta): """Check if the loader has a supported file extension""" path = Path(meta.path) for ext in cls.file_extensions: if path.suffixes[:len(ext)] == ext: return True return False
def function[supports_file, parameter[cls, meta]]: constant[Check if the loader has a supported file extension] variable[path] assign[=] call[name[Path], parameter[name[meta].path]] for taget[name[ext]] in starred[name[cls].file_extensions] begin[:] if compare[call[name[path].suffixes][<ast.Slice object at 0x7da18f58f700>] equal[==] name[ext]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[supports_file] ( identifier[cls] , identifier[meta] ): literal[string] identifier[path] = identifier[Path] ( identifier[meta] . identifier[path] ) keyword[for] identifier[ext] keyword[in] identifier[cls] . identifier[file_extensions] : keyword[if] identifier[path] . identifier[suffixes] [: identifier[len] ( identifier[ext] )]== identifier[ext] : keyword[return] keyword[True] keyword[return] keyword[False]
def supports_file(cls, meta): """Check if the loader has a supported file extension""" path = Path(meta.path) for ext in cls.file_extensions: if path.suffixes[:len(ext)] == ext: return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ext']] return False
def is_ip_addr_list(value, min=None, max=None): """ Check that the value is a list of IP addresses. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an IP address. >>> vtor = Validator() >>> vtor.check('ip_addr_list', ()) [] >>> vtor.check('ip_addr_list', []) [] >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8')) ['1.2.3.4', '5.6.7.8'] >>> vtor.check('ip_addr_list', ['a']) # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "a" is unacceptable. """ return [is_ip_addr(mem) for mem in is_list(value, min, max)]
def function[is_ip_addr_list, parameter[value, min, max]]: constant[ Check that the value is a list of IP addresses. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an IP address. >>> vtor = Validator() >>> vtor.check('ip_addr_list', ()) [] >>> vtor.check('ip_addr_list', []) [] >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8')) ['1.2.3.4', '5.6.7.8'] >>> vtor.check('ip_addr_list', ['a']) # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "a" is unacceptable. ] return[<ast.ListComp object at 0x7da1b0e49240>]
keyword[def] identifier[is_ip_addr_list] ( identifier[value] , identifier[min] = keyword[None] , identifier[max] = keyword[None] ): literal[string] keyword[return] [ identifier[is_ip_addr] ( identifier[mem] ) keyword[for] identifier[mem] keyword[in] identifier[is_list] ( identifier[value] , identifier[min] , identifier[max] )]
def is_ip_addr_list(value, min=None, max=None): """ Check that the value is a list of IP addresses. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an IP address. >>> vtor = Validator() >>> vtor.check('ip_addr_list', ()) [] >>> vtor.check('ip_addr_list', []) [] >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8')) ['1.2.3.4', '5.6.7.8'] >>> vtor.check('ip_addr_list', ['a']) # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "a" is unacceptable. """ return [is_ip_addr(mem) for mem in is_list(value, min, max)]
def _mfpts(tprob, populations, sinks, lag_time): """ Gets the Mean First Passage Time (MFPT) for all states to a *set* of sinks. Parameters ---------- tprob : np.ndarray Transition matrix populations : np.ndarray, (n_states,) MSM populations sinks : array_like, int, optional Indices of the sink states. There are two use-cases: - None [default] : All MFPTs will be calculated, and the result is a matrix of the MFPT from state i to state j. This uses the fundamental matrix formalism. - list of ints or int : Only the MFPTs into these sink states will be computed. The result is a vector, with entry i corresponding to the average time it takes to first get to *any* sink state from state i lag_time : float, optional Lag time for the model. The MFPT will be reported in whatever units are given here. Default is (1) which is in units of the lag time of the MSM. Returns ------- mfpts : np.ndarray, float MFPT in time units of lag_time, which depends on the input value of sinks: - If sinks is None, then mfpts's shape is (n_states, n_states). Where mfpts[i, j] is the mean first passage time to state j from state i. - If sinks contains one or more states, then mfpts's shape is (n_states,). Where mfpts[i] is the mean first passage time from state i to any state in sinks. References ---------- .. [1] Grinstead, C. M. and Snell, J. L. Introduction to Probability. American Mathematical Soc., 1998. As of November 2014, this chapter was available for free online: http://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf """ n_states = np.shape(populations)[0] if sinks is None: # Use Thm 11.16 in [1] limiting_matrix = np.vstack([populations] * n_states) # Fundamental matrix fund_matrix = scipy.linalg.inv(np.eye(n_states) - tprob + limiting_matrix) # mfpt[i,j] = (fund_matrix[j,j] - fund_matrix[i,j]) / populations[j] mfpts = fund_matrix * -1 for j in xrange(n_states): mfpts[:, j] += fund_matrix[j, j] mfpts[:, j] /= populations[j] mfpts *= lag_time else: # See section 11.5, and use Thm 11.5 # Turn our ergodic MSM into an absorbing one (all sink # states are absorbing). Then calculate the mean time # to absorption. # Note: we are slightly modifying the description in # 11.5 so that we also get the mfpts[sink] = 0.0 sinks = np.array(sinks, dtype=int).reshape((-1,)) absorb_tprob = copy.copy(tprob) for state in sinks: absorb_tprob[state, :] = 0.0 absorb_tprob[state, state] = 2.0 # note it has to be 2 because we subtract # the identity below. lhs = np.eye(n_states) - absorb_tprob rhs = np.ones(n_states) for state in sinks: rhs[state] = 0.0 mfpts = lag_time * np.linalg.solve(lhs, rhs) return mfpts
def function[_mfpts, parameter[tprob, populations, sinks, lag_time]]: constant[ Gets the Mean First Passage Time (MFPT) for all states to a *set* of sinks. Parameters ---------- tprob : np.ndarray Transition matrix populations : np.ndarray, (n_states,) MSM populations sinks : array_like, int, optional Indices of the sink states. There are two use-cases: - None [default] : All MFPTs will be calculated, and the result is a matrix of the MFPT from state i to state j. This uses the fundamental matrix formalism. - list of ints or int : Only the MFPTs into these sink states will be computed. The result is a vector, with entry i corresponding to the average time it takes to first get to *any* sink state from state i lag_time : float, optional Lag time for the model. The MFPT will be reported in whatever units are given here. Default is (1) which is in units of the lag time of the MSM. Returns ------- mfpts : np.ndarray, float MFPT in time units of lag_time, which depends on the input value of sinks: - If sinks is None, then mfpts's shape is (n_states, n_states). Where mfpts[i, j] is the mean first passage time to state j from state i. - If sinks contains one or more states, then mfpts's shape is (n_states,). Where mfpts[i] is the mean first passage time from state i to any state in sinks. References ---------- .. [1] Grinstead, C. M. and Snell, J. L. Introduction to Probability. American Mathematical Soc., 1998. As of November 2014, this chapter was available for free online: http://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf ] variable[n_states] assign[=] call[call[name[np].shape, parameter[name[populations]]]][constant[0]] if compare[name[sinks] is constant[None]] begin[:] variable[limiting_matrix] assign[=] call[name[np].vstack, parameter[binary_operation[list[[<ast.Name object at 0x7da1b07ae770>]] * name[n_states]]]] variable[fund_matrix] assign[=] call[name[scipy].linalg.inv, parameter[binary_operation[binary_operation[call[name[np].eye, parameter[name[n_states]]] - name[tprob]] + name[limiting_matrix]]]] variable[mfpts] assign[=] binary_operation[name[fund_matrix] * <ast.UnaryOp object at 0x7da1b07ad450>] for taget[name[j]] in starred[call[name[xrange], parameter[name[n_states]]]] begin[:] <ast.AugAssign object at 0x7da1b07aebf0> <ast.AugAssign object at 0x7da1b07af8b0> <ast.AugAssign object at 0x7da1b07ad330> return[name[mfpts]]
keyword[def] identifier[_mfpts] ( identifier[tprob] , identifier[populations] , identifier[sinks] , identifier[lag_time] ): literal[string] identifier[n_states] = identifier[np] . identifier[shape] ( identifier[populations] )[ literal[int] ] keyword[if] identifier[sinks] keyword[is] keyword[None] : identifier[limiting_matrix] = identifier[np] . identifier[vstack] ([ identifier[populations] ]* identifier[n_states] ) identifier[fund_matrix] = identifier[scipy] . identifier[linalg] . identifier[inv] ( identifier[np] . identifier[eye] ( identifier[n_states] )- identifier[tprob] + identifier[limiting_matrix] ) identifier[mfpts] = identifier[fund_matrix] *- literal[int] keyword[for] identifier[j] keyword[in] identifier[xrange] ( identifier[n_states] ): identifier[mfpts] [:, identifier[j] ]+= identifier[fund_matrix] [ identifier[j] , identifier[j] ] identifier[mfpts] [:, identifier[j] ]/= identifier[populations] [ identifier[j] ] identifier[mfpts] *= identifier[lag_time] keyword[else] : identifier[sinks] = identifier[np] . identifier[array] ( identifier[sinks] , identifier[dtype] = identifier[int] ). identifier[reshape] ((- literal[int] ,)) identifier[absorb_tprob] = identifier[copy] . identifier[copy] ( identifier[tprob] ) keyword[for] identifier[state] keyword[in] identifier[sinks] : identifier[absorb_tprob] [ identifier[state] ,:]= literal[int] identifier[absorb_tprob] [ identifier[state] , identifier[state] ]= literal[int] identifier[lhs] = identifier[np] . identifier[eye] ( identifier[n_states] )- identifier[absorb_tprob] identifier[rhs] = identifier[np] . identifier[ones] ( identifier[n_states] ) keyword[for] identifier[state] keyword[in] identifier[sinks] : identifier[rhs] [ identifier[state] ]= literal[int] identifier[mfpts] = identifier[lag_time] * identifier[np] . identifier[linalg] . identifier[solve] ( identifier[lhs] , identifier[rhs] ) keyword[return] identifier[mfpts]
def _mfpts(tprob, populations, sinks, lag_time): """ Gets the Mean First Passage Time (MFPT) for all states to a *set* of sinks. Parameters ---------- tprob : np.ndarray Transition matrix populations : np.ndarray, (n_states,) MSM populations sinks : array_like, int, optional Indices of the sink states. There are two use-cases: - None [default] : All MFPTs will be calculated, and the result is a matrix of the MFPT from state i to state j. This uses the fundamental matrix formalism. - list of ints or int : Only the MFPTs into these sink states will be computed. The result is a vector, with entry i corresponding to the average time it takes to first get to *any* sink state from state i lag_time : float, optional Lag time for the model. The MFPT will be reported in whatever units are given here. Default is (1) which is in units of the lag time of the MSM. Returns ------- mfpts : np.ndarray, float MFPT in time units of lag_time, which depends on the input value of sinks: - If sinks is None, then mfpts's shape is (n_states, n_states). Where mfpts[i, j] is the mean first passage time to state j from state i. - If sinks contains one or more states, then mfpts's shape is (n_states,). Where mfpts[i] is the mean first passage time from state i to any state in sinks. References ---------- .. [1] Grinstead, C. M. and Snell, J. L. Introduction to Probability. American Mathematical Soc., 1998. As of November 2014, this chapter was available for free online: http://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf """ n_states = np.shape(populations)[0] if sinks is None: # Use Thm 11.16 in [1] limiting_matrix = np.vstack([populations] * n_states) # Fundamental matrix fund_matrix = scipy.linalg.inv(np.eye(n_states) - tprob + limiting_matrix) # mfpt[i,j] = (fund_matrix[j,j] - fund_matrix[i,j]) / populations[j] mfpts = fund_matrix * -1 for j in xrange(n_states): mfpts[:, j] += fund_matrix[j, j] mfpts[:, j] /= populations[j] # depends on [control=['for'], data=['j']] mfpts *= lag_time # depends on [control=['if'], data=[]] else: # See section 11.5, and use Thm 11.5 # Turn our ergodic MSM into an absorbing one (all sink # states are absorbing). Then calculate the mean time # to absorption. # Note: we are slightly modifying the description in # 11.5 so that we also get the mfpts[sink] = 0.0 sinks = np.array(sinks, dtype=int).reshape((-1,)) absorb_tprob = copy.copy(tprob) for state in sinks: absorb_tprob[state, :] = 0.0 absorb_tprob[state, state] = 2.0 # depends on [control=['for'], data=['state']] # note it has to be 2 because we subtract # the identity below. lhs = np.eye(n_states) - absorb_tprob rhs = np.ones(n_states) for state in sinks: rhs[state] = 0.0 # depends on [control=['for'], data=['state']] mfpts = lag_time * np.linalg.solve(lhs, rhs) return mfpts
def open_url(self, url, stale_after, parse_as_html = True, **kwargs): """ Download or retrieve from cache. url -- The URL to be downloaded, as a string. stale_after -- A network request for the url will be performed if the cached copy does not exist or if it exists but its age (in days) is larger or equal to the stale_after value. A non-positive value will force re-download. parse_as_html -- Parse the resource downloaded as HTML. This uses the lxml.html package to parse the resource leniently, thus it will not fail even for reasonably invalid HTML. This argument also decides the return type of this method; if True, then the return type is an ElementTree.Element root object; if False, the content of the resource is returned as a bytestring. Exceptions raised: BannedException -- If does_show_ban returns True. HTTPCodeNotOKError -- If the returned HTTP status code is not equal to 200. """ _LOGGER.info('open_url() received url: %s', url) today = datetime.date.today() threshold_date = today - datetime.timedelta(stale_after) downloaded = False with self._get_conn() as conn: rs = conn.execute(''' select content from cache where url = ? and date > ? ''', (url, _date_to_sqlite_str(threshold_date)) ) row = rs.fetchone() retry_run = kwargs.get('retry_run', False) assert (not retry_run) or (retry_run and row is None) if row is None: file_obj = self._download(url).get_file_obj() downloaded = True else: file_obj = cStringIO.StringIO(zlib.decompress(row[0])) if parse_as_html: tree = lxml.html.parse(file_obj) tree.getroot().url = url appears_to_be_banned = False if self.does_show_ban(tree.getroot()): appears_to_be_banned = True if downloaded: message = ('Function {f} claims we have been banned, ' 'it was called with an element parsed from url ' '(downloaded, not from cache): {u}' .format(f = self.does_show_ban, u = url)) _LOGGER.error(message) _LOGGER.info('Deleting url %s from the cache (if it exists) ' 'because it triggered ban page cache poisoning ' 'exception', url) with self._get_conn() as conn: conn.execute('delete from cache where url = ?', [str(url)]) if downloaded: raise BannedException(message) else: return self.open_url(url, stale_after, retry_run = True) else: tree = file_obj.read() if downloaded: # make_links_absolute should only be called when the document has a base_url # attribute, which it has not when it has been loaded from the database. So, # this "if" is needed: if parse_as_html: tree.getroot().make_links_absolute(tree.getroot().base_url) to_store = lxml.html.tostring( tree, pretty_print = True, encoding = 'utf-8' ) else: to_store = tree to_store = zlib.compress(to_store, 8) with self._get_conn() as conn: conn.execute(''' insert or replace into cache (url, date, content) values (?, ?, ?) ''', ( str(url), _date_to_sqlite_str(today), sqlite3.Binary(to_store) ) ) return tree
def function[open_url, parameter[self, url, stale_after, parse_as_html]]: constant[ Download or retrieve from cache. url -- The URL to be downloaded, as a string. stale_after -- A network request for the url will be performed if the cached copy does not exist or if it exists but its age (in days) is larger or equal to the stale_after value. A non-positive value will force re-download. parse_as_html -- Parse the resource downloaded as HTML. This uses the lxml.html package to parse the resource leniently, thus it will not fail even for reasonably invalid HTML. This argument also decides the return type of this method; if True, then the return type is an ElementTree.Element root object; if False, the content of the resource is returned as a bytestring. Exceptions raised: BannedException -- If does_show_ban returns True. HTTPCodeNotOKError -- If the returned HTTP status code is not equal to 200. ] call[name[_LOGGER].info, parameter[constant[open_url() received url: %s], name[url]]] variable[today] assign[=] call[name[datetime].date.today, parameter[]] variable[threshold_date] assign[=] binary_operation[name[today] - call[name[datetime].timedelta, parameter[name[stale_after]]]] variable[downloaded] assign[=] constant[False] with call[name[self]._get_conn, parameter[]] begin[:] variable[rs] assign[=] call[name[conn].execute, parameter[constant[ select content from cache where url = ? and date > ? ], tuple[[<ast.Name object at 0x7da18f09e620>, <ast.Call object at 0x7da18f09e9b0>]]]] variable[row] assign[=] call[name[rs].fetchone, parameter[]] variable[retry_run] assign[=] call[name[kwargs].get, parameter[constant[retry_run], constant[False]]] assert[<ast.BoolOp object at 0x7da1b0ae1a50>] if compare[name[row] is constant[None]] begin[:] variable[file_obj] assign[=] call[call[name[self]._download, parameter[name[url]]].get_file_obj, parameter[]] variable[downloaded] assign[=] constant[True] if name[parse_as_html] begin[:] variable[tree] assign[=] call[name[lxml].html.parse, parameter[name[file_obj]]] call[name[tree].getroot, parameter[]].url assign[=] name[url] variable[appears_to_be_banned] assign[=] constant[False] if call[name[self].does_show_ban, parameter[call[name[tree].getroot, parameter[]]]] begin[:] variable[appears_to_be_banned] assign[=] constant[True] if name[downloaded] begin[:] variable[message] assign[=] call[constant[Function {f} claims we have been banned, it was called with an element parsed from url (downloaded, not from cache): {u}].format, parameter[]] call[name[_LOGGER].error, parameter[name[message]]] call[name[_LOGGER].info, parameter[constant[Deleting url %s from the cache (if it exists) because it triggered ban page cache poisoning exception], name[url]]] with call[name[self]._get_conn, parameter[]] begin[:] call[name[conn].execute, parameter[constant[delete from cache where url = ?], list[[<ast.Call object at 0x7da1b0ac9780>]]]] if name[downloaded] begin[:] <ast.Raise object at 0x7da1b0ac9450> if name[downloaded] begin[:] if name[parse_as_html] begin[:] call[call[name[tree].getroot, parameter[]].make_links_absolute, parameter[call[name[tree].getroot, parameter[]].base_url]] variable[to_store] assign[=] call[name[lxml].html.tostring, parameter[name[tree]]] variable[to_store] assign[=] call[name[zlib].compress, parameter[name[to_store], constant[8]]] with call[name[self]._get_conn, parameter[]] begin[:] call[name[conn].execute, parameter[constant[ insert or replace into cache (url, date, content) values (?, ?, ?) ], tuple[[<ast.Call object at 0x7da1b0ac9720>, <ast.Call object at 0x7da1b0ac9540>, <ast.Call object at 0x7da1b0acb9d0>]]]] return[name[tree]]
keyword[def] identifier[open_url] ( identifier[self] , identifier[url] , identifier[stale_after] , identifier[parse_as_html] = keyword[True] ,** identifier[kwargs] ): literal[string] identifier[_LOGGER] . identifier[info] ( literal[string] , identifier[url] ) identifier[today] = identifier[datetime] . identifier[date] . identifier[today] () identifier[threshold_date] = identifier[today] - identifier[datetime] . identifier[timedelta] ( identifier[stale_after] ) identifier[downloaded] = keyword[False] keyword[with] identifier[self] . identifier[_get_conn] () keyword[as] identifier[conn] : identifier[rs] = identifier[conn] . identifier[execute] ( literal[string] , ( identifier[url] , identifier[_date_to_sqlite_str] ( identifier[threshold_date] )) ) identifier[row] = identifier[rs] . identifier[fetchone] () identifier[retry_run] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ) keyword[assert] ( keyword[not] identifier[retry_run] ) keyword[or] ( identifier[retry_run] keyword[and] identifier[row] keyword[is] keyword[None] ) keyword[if] identifier[row] keyword[is] keyword[None] : identifier[file_obj] = identifier[self] . identifier[_download] ( identifier[url] ). identifier[get_file_obj] () identifier[downloaded] = keyword[True] keyword[else] : identifier[file_obj] = identifier[cStringIO] . identifier[StringIO] ( identifier[zlib] . identifier[decompress] ( identifier[row] [ literal[int] ])) keyword[if] identifier[parse_as_html] : identifier[tree] = identifier[lxml] . identifier[html] . identifier[parse] ( identifier[file_obj] ) identifier[tree] . identifier[getroot] (). identifier[url] = identifier[url] identifier[appears_to_be_banned] = keyword[False] keyword[if] identifier[self] . identifier[does_show_ban] ( identifier[tree] . identifier[getroot] ()): identifier[appears_to_be_banned] = keyword[True] keyword[if] identifier[downloaded] : identifier[message] =( literal[string] literal[string] literal[string] . identifier[format] ( identifier[f] = identifier[self] . identifier[does_show_ban] , identifier[u] = identifier[url] )) identifier[_LOGGER] . identifier[error] ( identifier[message] ) identifier[_LOGGER] . identifier[info] ( literal[string] literal[string] literal[string] , identifier[url] ) keyword[with] identifier[self] . identifier[_get_conn] () keyword[as] identifier[conn] : identifier[conn] . identifier[execute] ( literal[string] ,[ identifier[str] ( identifier[url] )]) keyword[if] identifier[downloaded] : keyword[raise] identifier[BannedException] ( identifier[message] ) keyword[else] : keyword[return] identifier[self] . identifier[open_url] ( identifier[url] , identifier[stale_after] , identifier[retry_run] = keyword[True] ) keyword[else] : identifier[tree] = identifier[file_obj] . identifier[read] () keyword[if] identifier[downloaded] : keyword[if] identifier[parse_as_html] : identifier[tree] . identifier[getroot] (). identifier[make_links_absolute] ( identifier[tree] . identifier[getroot] (). identifier[base_url] ) identifier[to_store] = identifier[lxml] . identifier[html] . identifier[tostring] ( identifier[tree] , identifier[pretty_print] = keyword[True] , identifier[encoding] = literal[string] ) keyword[else] : identifier[to_store] = identifier[tree] identifier[to_store] = identifier[zlib] . identifier[compress] ( identifier[to_store] , literal[int] ) keyword[with] identifier[self] . identifier[_get_conn] () keyword[as] identifier[conn] : identifier[conn] . identifier[execute] ( literal[string] , ( identifier[str] ( identifier[url] ), identifier[_date_to_sqlite_str] ( identifier[today] ), identifier[sqlite3] . identifier[Binary] ( identifier[to_store] ) ) ) keyword[return] identifier[tree]
def open_url(self, url, stale_after, parse_as_html=True, **kwargs): """ Download or retrieve from cache. url -- The URL to be downloaded, as a string. stale_after -- A network request for the url will be performed if the cached copy does not exist or if it exists but its age (in days) is larger or equal to the stale_after value. A non-positive value will force re-download. parse_as_html -- Parse the resource downloaded as HTML. This uses the lxml.html package to parse the resource leniently, thus it will not fail even for reasonably invalid HTML. This argument also decides the return type of this method; if True, then the return type is an ElementTree.Element root object; if False, the content of the resource is returned as a bytestring. Exceptions raised: BannedException -- If does_show_ban returns True. HTTPCodeNotOKError -- If the returned HTTP status code is not equal to 200. """ _LOGGER.info('open_url() received url: %s', url) today = datetime.date.today() threshold_date = today - datetime.timedelta(stale_after) downloaded = False with self._get_conn() as conn: rs = conn.execute('\n\t\t\t\tselect content\n\t\t\t\tfrom cache\n\t\t\t\twhere url = ?\n\t\t\t\tand date > ?\n\t\t\t\t', (url, _date_to_sqlite_str(threshold_date))) # depends on [control=['with'], data=['conn']] row = rs.fetchone() retry_run = kwargs.get('retry_run', False) assert not retry_run or (retry_run and row is None) if row is None: file_obj = self._download(url).get_file_obj() downloaded = True # depends on [control=['if'], data=[]] else: file_obj = cStringIO.StringIO(zlib.decompress(row[0])) if parse_as_html: tree = lxml.html.parse(file_obj) tree.getroot().url = url appears_to_be_banned = False if self.does_show_ban(tree.getroot()): appears_to_be_banned = True if downloaded: message = 'Function {f} claims we have been banned, it was called with an element parsed from url (downloaded, not from cache): {u}'.format(f=self.does_show_ban, u=url) _LOGGER.error(message) # depends on [control=['if'], data=[]] _LOGGER.info('Deleting url %s from the cache (if it exists) because it triggered ban page cache poisoning exception', url) with self._get_conn() as conn: conn.execute('delete from cache where url = ?', [str(url)]) # depends on [control=['with'], data=['conn']] if downloaded: raise BannedException(message) # depends on [control=['if'], data=[]] else: return self.open_url(url, stale_after, retry_run=True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: tree = file_obj.read() if downloaded: # make_links_absolute should only be called when the document has a base_url # attribute, which it has not when it has been loaded from the database. So, # this "if" is needed: if parse_as_html: tree.getroot().make_links_absolute(tree.getroot().base_url) to_store = lxml.html.tostring(tree, pretty_print=True, encoding='utf-8') # depends on [control=['if'], data=[]] else: to_store = tree to_store = zlib.compress(to_store, 8) with self._get_conn() as conn: conn.execute('\n\t\t\t\t\tinsert or replace \n\t\t\t\t\tinto cache\n\t\t\t\t\t(url, date, content)\n\t\t\t\t\tvalues\n\t\t\t\t\t(?, ?, ?)\n\t\t\t\t\t', (str(url), _date_to_sqlite_str(today), sqlite3.Binary(to_store))) # depends on [control=['with'], data=['conn']] # depends on [control=['if'], data=[]] return tree
def add_tags_to_bookmark(self, bookmark_id, tags): """ Add tags to to a bookmark. The identified bookmark must belong to the current user. :param bookmark_id: ID of the bookmark to delete. :param tags: Comma separated tags to be applied. """ url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id)) params = dict(tags=tags) return self.post(url, params)
def function[add_tags_to_bookmark, parameter[self, bookmark_id, tags]]: constant[ Add tags to to a bookmark. The identified bookmark must belong to the current user. :param bookmark_id: ID of the bookmark to delete. :param tags: Comma separated tags to be applied. ] variable[url] assign[=] call[name[self]._generate_url, parameter[call[constant[bookmarks/{0}/tags].format, parameter[name[bookmark_id]]]]] variable[params] assign[=] call[name[dict], parameter[]] return[call[name[self].post, parameter[name[url], name[params]]]]
keyword[def] identifier[add_tags_to_bookmark] ( identifier[self] , identifier[bookmark_id] , identifier[tags] ): literal[string] identifier[url] = identifier[self] . identifier[_generate_url] ( literal[string] . identifier[format] ( identifier[bookmark_id] )) identifier[params] = identifier[dict] ( identifier[tags] = identifier[tags] ) keyword[return] identifier[self] . identifier[post] ( identifier[url] , identifier[params] )
def add_tags_to_bookmark(self, bookmark_id, tags): """ Add tags to to a bookmark. The identified bookmark must belong to the current user. :param bookmark_id: ID of the bookmark to delete. :param tags: Comma separated tags to be applied. """ url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id)) params = dict(tags=tags) return self.post(url, params)
def _ParseMRUListValue(self, registry_key): """Parses the MRUList value in a given Registry key. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains the MRUList value. Returns: mrulist_entries: MRUList entries or None if not available. """ mrulist_value = registry_key.GetValueByName('MRUList') # The key exists but does not contain a value named "MRUList". if not mrulist_value: return None mrulist_entries_map = self._GetDataTypeMap('mrulist_entries') context = dtfabric_data_maps.DataTypeMapContext(values={ 'data_size': len(mrulist_value.data)}) return self._ReadStructureFromByteStream( mrulist_value.data, 0, mrulist_entries_map, context=context)
def function[_ParseMRUListValue, parameter[self, registry_key]]: constant[Parses the MRUList value in a given Registry key. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains the MRUList value. Returns: mrulist_entries: MRUList entries or None if not available. ] variable[mrulist_value] assign[=] call[name[registry_key].GetValueByName, parameter[constant[MRUList]]] if <ast.UnaryOp object at 0x7da207f9bfa0> begin[:] return[constant[None]] variable[mrulist_entries_map] assign[=] call[name[self]._GetDataTypeMap, parameter[constant[mrulist_entries]]] variable[context] assign[=] call[name[dtfabric_data_maps].DataTypeMapContext, parameter[]] return[call[name[self]._ReadStructureFromByteStream, parameter[name[mrulist_value].data, constant[0], name[mrulist_entries_map]]]]
keyword[def] identifier[_ParseMRUListValue] ( identifier[self] , identifier[registry_key] ): literal[string] identifier[mrulist_value] = identifier[registry_key] . identifier[GetValueByName] ( literal[string] ) keyword[if] keyword[not] identifier[mrulist_value] : keyword[return] keyword[None] identifier[mrulist_entries_map] = identifier[self] . identifier[_GetDataTypeMap] ( literal[string] ) identifier[context] = identifier[dtfabric_data_maps] . identifier[DataTypeMapContext] ( identifier[values] ={ literal[string] : identifier[len] ( identifier[mrulist_value] . identifier[data] )}) keyword[return] identifier[self] . identifier[_ReadStructureFromByteStream] ( identifier[mrulist_value] . identifier[data] , literal[int] , identifier[mrulist_entries_map] , identifier[context] = identifier[context] )
def _ParseMRUListValue(self, registry_key): """Parses the MRUList value in a given Registry key. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains the MRUList value. Returns: mrulist_entries: MRUList entries or None if not available. """ mrulist_value = registry_key.GetValueByName('MRUList') # The key exists but does not contain a value named "MRUList". if not mrulist_value: return None # depends on [control=['if'], data=[]] mrulist_entries_map = self._GetDataTypeMap('mrulist_entries') context = dtfabric_data_maps.DataTypeMapContext(values={'data_size': len(mrulist_value.data)}) return self._ReadStructureFromByteStream(mrulist_value.data, 0, mrulist_entries_map, context=context)
def merge_ticket(self, ticket_id, into_id): """ Merge ticket into another (undocumented API feature). :param ticket_id: ID of ticket to be merged :param into: ID of destination ticket :returns: ``True`` Operation was successful ``False`` Either origin or destination ticket does not exist or user does not have ModifyTicket permission. """ msg = self.__request('ticket/{}/merge/{}'.format(str(ticket_id), str(into_id))) state = msg.split('\n')[2] return self.RE_PATTERNS['merge_successful_pattern'].match(state) is not None
def function[merge_ticket, parameter[self, ticket_id, into_id]]: constant[ Merge ticket into another (undocumented API feature). :param ticket_id: ID of ticket to be merged :param into: ID of destination ticket :returns: ``True`` Operation was successful ``False`` Either origin or destination ticket does not exist or user does not have ModifyTicket permission. ] variable[msg] assign[=] call[name[self].__request, parameter[call[constant[ticket/{}/merge/{}].format, parameter[call[name[str], parameter[name[ticket_id]]], call[name[str], parameter[name[into_id]]]]]]] variable[state] assign[=] call[call[name[msg].split, parameter[constant[ ]]]][constant[2]] return[compare[call[call[name[self].RE_PATTERNS][constant[merge_successful_pattern]].match, parameter[name[state]]] is_not constant[None]]]
keyword[def] identifier[merge_ticket] ( identifier[self] , identifier[ticket_id] , identifier[into_id] ): literal[string] identifier[msg] = identifier[self] . identifier[__request] ( literal[string] . identifier[format] ( identifier[str] ( identifier[ticket_id] ), identifier[str] ( identifier[into_id] ))) identifier[state] = identifier[msg] . identifier[split] ( literal[string] )[ literal[int] ] keyword[return] identifier[self] . identifier[RE_PATTERNS] [ literal[string] ]. identifier[match] ( identifier[state] ) keyword[is] keyword[not] keyword[None]
def merge_ticket(self, ticket_id, into_id): """ Merge ticket into another (undocumented API feature). :param ticket_id: ID of ticket to be merged :param into: ID of destination ticket :returns: ``True`` Operation was successful ``False`` Either origin or destination ticket does not exist or user does not have ModifyTicket permission. """ msg = self.__request('ticket/{}/merge/{}'.format(str(ticket_id), str(into_id))) state = msg.split('\n')[2] return self.RE_PATTERNS['merge_successful_pattern'].match(state) is not None
def mark_job_as_failed(self, job_id, exception, traceback): """ Mark the job as failed, and record the traceback and exception. Args: job_id: The job_id of the job that failed. exception: The exception object thrown by the job. traceback: The traceback, if any. Note (aron): Not implemented yet. We need to find a way for the conncurrent.futures workers to throw back the error to us. Returns: None """ session = self.sessionmaker() job, orm_job = self._update_job_state( job_id, State.FAILED, session=session) # Note (aron): looks like SQLAlchemy doesn't automatically # save any pickletype fields even if we re-set (orm_job.obj = job) that # field. My hunch is that it's tracking the id of the object, # and if that doesn't change, then SQLAlchemy doesn't repickle the object # and save to the DB. # Our hack here is to just copy the job object, and then set thespecific # field we want to edit, in this case the job.state. That forces # SQLAlchemy to re-pickle the object, thus setting it to the correct state. job = copy(job) job.exception = exception job.traceback = traceback orm_job.obj = job session.add(orm_job) session.commit() session.close()
def function[mark_job_as_failed, parameter[self, job_id, exception, traceback]]: constant[ Mark the job as failed, and record the traceback and exception. Args: job_id: The job_id of the job that failed. exception: The exception object thrown by the job. traceback: The traceback, if any. Note (aron): Not implemented yet. We need to find a way for the conncurrent.futures workers to throw back the error to us. Returns: None ] variable[session] assign[=] call[name[self].sessionmaker, parameter[]] <ast.Tuple object at 0x7da1b0416020> assign[=] call[name[self]._update_job_state, parameter[name[job_id], name[State].FAILED]] variable[job] assign[=] call[name[copy], parameter[name[job]]] name[job].exception assign[=] name[exception] name[job].traceback assign[=] name[traceback] name[orm_job].obj assign[=] name[job] call[name[session].add, parameter[name[orm_job]]] call[name[session].commit, parameter[]] call[name[session].close, parameter[]]
keyword[def] identifier[mark_job_as_failed] ( identifier[self] , identifier[job_id] , identifier[exception] , identifier[traceback] ): literal[string] identifier[session] = identifier[self] . identifier[sessionmaker] () identifier[job] , identifier[orm_job] = identifier[self] . identifier[_update_job_state] ( identifier[job_id] , identifier[State] . identifier[FAILED] , identifier[session] = identifier[session] ) identifier[job] = identifier[copy] ( identifier[job] ) identifier[job] . identifier[exception] = identifier[exception] identifier[job] . identifier[traceback] = identifier[traceback] identifier[orm_job] . identifier[obj] = identifier[job] identifier[session] . identifier[add] ( identifier[orm_job] ) identifier[session] . identifier[commit] () identifier[session] . identifier[close] ()
def mark_job_as_failed(self, job_id, exception, traceback): """ Mark the job as failed, and record the traceback and exception. Args: job_id: The job_id of the job that failed. exception: The exception object thrown by the job. traceback: The traceback, if any. Note (aron): Not implemented yet. We need to find a way for the conncurrent.futures workers to throw back the error to us. Returns: None """ session = self.sessionmaker() (job, orm_job) = self._update_job_state(job_id, State.FAILED, session=session) # Note (aron): looks like SQLAlchemy doesn't automatically # save any pickletype fields even if we re-set (orm_job.obj = job) that # field. My hunch is that it's tracking the id of the object, # and if that doesn't change, then SQLAlchemy doesn't repickle the object # and save to the DB. # Our hack here is to just copy the job object, and then set thespecific # field we want to edit, in this case the job.state. That forces # SQLAlchemy to re-pickle the object, thus setting it to the correct state. job = copy(job) job.exception = exception job.traceback = traceback orm_job.obj = job session.add(orm_job) session.commit() session.close()
def zimbra_to_python(zimbra_dict, key_attribute="n", content_attribute="_content"): """ Converts single level Zimbra dicts to a standard python dict :param zimbra_dict: The dictionary in Zimbra-Format :return: A native python dict """ local_dict = {} for item in zimbra_dict: local_dict[item[key_attribute]] = item[content_attribute] return local_dict
def function[zimbra_to_python, parameter[zimbra_dict, key_attribute, content_attribute]]: constant[ Converts single level Zimbra dicts to a standard python dict :param zimbra_dict: The dictionary in Zimbra-Format :return: A native python dict ] variable[local_dict] assign[=] dictionary[[], []] for taget[name[item]] in starred[name[zimbra_dict]] begin[:] call[name[local_dict]][call[name[item]][name[key_attribute]]] assign[=] call[name[item]][name[content_attribute]] return[name[local_dict]]
keyword[def] identifier[zimbra_to_python] ( identifier[zimbra_dict] , identifier[key_attribute] = literal[string] , identifier[content_attribute] = literal[string] ): literal[string] identifier[local_dict] ={} keyword[for] identifier[item] keyword[in] identifier[zimbra_dict] : identifier[local_dict] [ identifier[item] [ identifier[key_attribute] ]]= identifier[item] [ identifier[content_attribute] ] keyword[return] identifier[local_dict]
def zimbra_to_python(zimbra_dict, key_attribute='n', content_attribute='_content'): """ Converts single level Zimbra dicts to a standard python dict :param zimbra_dict: The dictionary in Zimbra-Format :return: A native python dict """ local_dict = {} for item in zimbra_dict: local_dict[item[key_attribute]] = item[content_attribute] # depends on [control=['for'], data=['item']] return local_dict
def DrawIconAndLabel(self, dc, node, x, y, w, h, depth): ''' Draw the icon, if any, and the label, if any, of the node. ''' if w-2 < self._em_size_//2 or h-2 < self._em_size_ //2: return dc.SetClippingRegion(x+1, y+1, w-2, h-2) # Don't draw outside the box try: icon = self.adapter.icon(node, node==self.selectedNode) if icon and h >= icon.GetHeight() and w >= icon.GetWidth(): iconWidth = icon.GetWidth() + 2 dc.DrawIcon(icon, x+2, y+2) else: iconWidth = 0 if self.labels and h >= dc.GetTextExtent('ABC')[1]: dc.SetTextForeground(self.TextForegroundForNode(node, depth)) dc.DrawText(self.adapter.label(node), x + iconWidth + 2, y+2) finally: dc.DestroyClippingRegion()
def function[DrawIconAndLabel, parameter[self, dc, node, x, y, w, h, depth]]: constant[ Draw the icon, if any, and the label, if any, of the node. ] if <ast.BoolOp object at 0x7da20c6e58a0> begin[:] return[None] call[name[dc].SetClippingRegion, parameter[binary_operation[name[x] + constant[1]], binary_operation[name[y] + constant[1]], binary_operation[name[w] - constant[2]], binary_operation[name[h] - constant[2]]]] <ast.Try object at 0x7da20c6e41c0>
keyword[def] identifier[DrawIconAndLabel] ( identifier[self] , identifier[dc] , identifier[node] , identifier[x] , identifier[y] , identifier[w] , identifier[h] , identifier[depth] ): literal[string] keyword[if] identifier[w] - literal[int] < identifier[self] . identifier[_em_size_] // literal[int] keyword[or] identifier[h] - literal[int] < identifier[self] . identifier[_em_size_] // literal[int] : keyword[return] identifier[dc] . identifier[SetClippingRegion] ( identifier[x] + literal[int] , identifier[y] + literal[int] , identifier[w] - literal[int] , identifier[h] - literal[int] ) keyword[try] : identifier[icon] = identifier[self] . identifier[adapter] . identifier[icon] ( identifier[node] , identifier[node] == identifier[self] . identifier[selectedNode] ) keyword[if] identifier[icon] keyword[and] identifier[h] >= identifier[icon] . identifier[GetHeight] () keyword[and] identifier[w] >= identifier[icon] . identifier[GetWidth] (): identifier[iconWidth] = identifier[icon] . identifier[GetWidth] ()+ literal[int] identifier[dc] . identifier[DrawIcon] ( identifier[icon] , identifier[x] + literal[int] , identifier[y] + literal[int] ) keyword[else] : identifier[iconWidth] = literal[int] keyword[if] identifier[self] . identifier[labels] keyword[and] identifier[h] >= identifier[dc] . identifier[GetTextExtent] ( literal[string] )[ literal[int] ]: identifier[dc] . identifier[SetTextForeground] ( identifier[self] . identifier[TextForegroundForNode] ( identifier[node] , identifier[depth] )) identifier[dc] . identifier[DrawText] ( identifier[self] . identifier[adapter] . identifier[label] ( identifier[node] ), identifier[x] + identifier[iconWidth] + literal[int] , identifier[y] + literal[int] ) keyword[finally] : identifier[dc] . identifier[DestroyClippingRegion] ()
def DrawIconAndLabel(self, dc, node, x, y, w, h, depth): """ Draw the icon, if any, and the label, if any, of the node. """ if w - 2 < self._em_size_ // 2 or h - 2 < self._em_size_ // 2: return # depends on [control=['if'], data=[]] dc.SetClippingRegion(x + 1, y + 1, w - 2, h - 2) # Don't draw outside the box try: icon = self.adapter.icon(node, node == self.selectedNode) if icon and h >= icon.GetHeight() and (w >= icon.GetWidth()): iconWidth = icon.GetWidth() + 2 dc.DrawIcon(icon, x + 2, y + 2) # depends on [control=['if'], data=[]] else: iconWidth = 0 if self.labels and h >= dc.GetTextExtent('ABC')[1]: dc.SetTextForeground(self.TextForegroundForNode(node, depth)) dc.DrawText(self.adapter.label(node), x + iconWidth + 2, y + 2) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] finally: dc.DestroyClippingRegion()
def GenerateFileData(self): """Generates the file data for a chunk encoded file.""" # Handle chunked encoding: # https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1 while 1: line = self.rfile.readline() # We do not support chunked extensions, just ignore them. chunk_size = int(line.split(";")[0], 16) if chunk_size == 0: break for chunk in self._GenerateChunk(chunk_size): yield chunk # Chunk is followed by \r\n. lf = self.rfile.read(2) if lf != "\r\n": raise IOError("Unable to parse chunk.") # Skip entity headers. for header in self.rfile.readline(): if not header: break
def function[GenerateFileData, parameter[self]]: constant[Generates the file data for a chunk encoded file.] while constant[1] begin[:] variable[line] assign[=] call[name[self].rfile.readline, parameter[]] variable[chunk_size] assign[=] call[name[int], parameter[call[call[name[line].split, parameter[constant[;]]]][constant[0]], constant[16]]] if compare[name[chunk_size] equal[==] constant[0]] begin[:] break for taget[name[chunk]] in starred[call[name[self]._GenerateChunk, parameter[name[chunk_size]]]] begin[:] <ast.Yield object at 0x7da2054a7d00> variable[lf] assign[=] call[name[self].rfile.read, parameter[constant[2]]] if compare[name[lf] not_equal[!=] constant[ ]] begin[:] <ast.Raise object at 0x7da2054a7a30> for taget[name[header]] in starred[call[name[self].rfile.readline, parameter[]]] begin[:] if <ast.UnaryOp object at 0x7da2054a6800> begin[:] break
keyword[def] identifier[GenerateFileData] ( identifier[self] ): literal[string] keyword[while] literal[int] : identifier[line] = identifier[self] . identifier[rfile] . identifier[readline] () identifier[chunk_size] = identifier[int] ( identifier[line] . identifier[split] ( literal[string] )[ literal[int] ], literal[int] ) keyword[if] identifier[chunk_size] == literal[int] : keyword[break] keyword[for] identifier[chunk] keyword[in] identifier[self] . identifier[_GenerateChunk] ( identifier[chunk_size] ): keyword[yield] identifier[chunk] identifier[lf] = identifier[self] . identifier[rfile] . identifier[read] ( literal[int] ) keyword[if] identifier[lf] != literal[string] : keyword[raise] identifier[IOError] ( literal[string] ) keyword[for] identifier[header] keyword[in] identifier[self] . identifier[rfile] . identifier[readline] (): keyword[if] keyword[not] identifier[header] : keyword[break]
def GenerateFileData(self): """Generates the file data for a chunk encoded file.""" # Handle chunked encoding: # https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1 while 1: line = self.rfile.readline() # We do not support chunked extensions, just ignore them. chunk_size = int(line.split(';')[0], 16) if chunk_size == 0: break # depends on [control=['if'], data=[]] for chunk in self._GenerateChunk(chunk_size): yield chunk # depends on [control=['for'], data=['chunk']] # Chunk is followed by \r\n. lf = self.rfile.read(2) if lf != '\r\n': raise IOError('Unable to parse chunk.') # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # Skip entity headers. for header in self.rfile.readline(): if not header: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['header']]
def list_domains(): ''' Return a list of virtual machine names on the minion CLI Example: .. code-block:: bash salt '*' virt.list_domains ''' data = __salt__['vmadm.list'](keyed=True) vms = ["UUID TYPE RAM STATE ALIAS"] for vm in data: vms.append("{vmuuid}{vmtype}{vmram}{vmstate}{vmalias}".format( vmuuid=vm.ljust(38), vmtype=data[vm]['type'].ljust(6), vmram=data[vm]['ram'].ljust(9), vmstate=data[vm]['state'].ljust(18), vmalias=data[vm]['alias'], )) return vms
def function[list_domains, parameter[]]: constant[ Return a list of virtual machine names on the minion CLI Example: .. code-block:: bash salt '*' virt.list_domains ] variable[data] assign[=] call[call[name[__salt__]][constant[vmadm.list]], parameter[]] variable[vms] assign[=] list[[<ast.Constant object at 0x7da1b2044a90>]] for taget[name[vm]] in starred[name[data]] begin[:] call[name[vms].append, parameter[call[constant[{vmuuid}{vmtype}{vmram}{vmstate}{vmalias}].format, parameter[]]]] return[name[vms]]
keyword[def] identifier[list_domains] (): literal[string] identifier[data] = identifier[__salt__] [ literal[string] ]( identifier[keyed] = keyword[True] ) identifier[vms] =[ literal[string] ] keyword[for] identifier[vm] keyword[in] identifier[data] : identifier[vms] . identifier[append] ( literal[string] . identifier[format] ( identifier[vmuuid] = identifier[vm] . identifier[ljust] ( literal[int] ), identifier[vmtype] = identifier[data] [ identifier[vm] ][ literal[string] ]. identifier[ljust] ( literal[int] ), identifier[vmram] = identifier[data] [ identifier[vm] ][ literal[string] ]. identifier[ljust] ( literal[int] ), identifier[vmstate] = identifier[data] [ identifier[vm] ][ literal[string] ]. identifier[ljust] ( literal[int] ), identifier[vmalias] = identifier[data] [ identifier[vm] ][ literal[string] ], )) keyword[return] identifier[vms]
def list_domains(): """ Return a list of virtual machine names on the minion CLI Example: .. code-block:: bash salt '*' virt.list_domains """ data = __salt__['vmadm.list'](keyed=True) vms = ['UUID TYPE RAM STATE ALIAS'] for vm in data: vms.append('{vmuuid}{vmtype}{vmram}{vmstate}{vmalias}'.format(vmuuid=vm.ljust(38), vmtype=data[vm]['type'].ljust(6), vmram=data[vm]['ram'].ljust(9), vmstate=data[vm]['state'].ljust(18), vmalias=data[vm]['alias'])) # depends on [control=['for'], data=['vm']] return vms
def create_subvariant (self, root_targets, all_targets, build_request, sources, rproperties, usage_requirements): """Creates a new subvariant-dg instances for 'targets' - 'root-targets' the virtual targets will be returned to dependents - 'all-targets' all virtual targets created while building this main target - 'build-request' is property-set instance with requested build properties""" assert is_iterable_typed(root_targets, virtual_target.VirtualTarget) assert is_iterable_typed(all_targets, virtual_target.VirtualTarget) assert isinstance(build_request, property_set.PropertySet) assert is_iterable_typed(sources, virtual_target.VirtualTarget) assert isinstance(rproperties, property_set.PropertySet) assert isinstance(usage_requirements, property_set.PropertySet) for e in root_targets: e.root (True) s = Subvariant (self, build_request, sources, rproperties, usage_requirements, all_targets) for v in all_targets: if not v.creating_subvariant(): v.creating_subvariant(s) return s
def function[create_subvariant, parameter[self, root_targets, all_targets, build_request, sources, rproperties, usage_requirements]]: constant[Creates a new subvariant-dg instances for 'targets' - 'root-targets' the virtual targets will be returned to dependents - 'all-targets' all virtual targets created while building this main target - 'build-request' is property-set instance with requested build properties] assert[call[name[is_iterable_typed], parameter[name[root_targets], name[virtual_target].VirtualTarget]]] assert[call[name[is_iterable_typed], parameter[name[all_targets], name[virtual_target].VirtualTarget]]] assert[call[name[isinstance], parameter[name[build_request], name[property_set].PropertySet]]] assert[call[name[is_iterable_typed], parameter[name[sources], name[virtual_target].VirtualTarget]]] assert[call[name[isinstance], parameter[name[rproperties], name[property_set].PropertySet]]] assert[call[name[isinstance], parameter[name[usage_requirements], name[property_set].PropertySet]]] for taget[name[e]] in starred[name[root_targets]] begin[:] call[name[e].root, parameter[constant[True]]] variable[s] assign[=] call[name[Subvariant], parameter[name[self], name[build_request], name[sources], name[rproperties], name[usage_requirements], name[all_targets]]] for taget[name[v]] in starred[name[all_targets]] begin[:] if <ast.UnaryOp object at 0x7da1b1f0b940> begin[:] call[name[v].creating_subvariant, parameter[name[s]]] return[name[s]]
keyword[def] identifier[create_subvariant] ( identifier[self] , identifier[root_targets] , identifier[all_targets] , identifier[build_request] , identifier[sources] , identifier[rproperties] , identifier[usage_requirements] ): literal[string] keyword[assert] identifier[is_iterable_typed] ( identifier[root_targets] , identifier[virtual_target] . identifier[VirtualTarget] ) keyword[assert] identifier[is_iterable_typed] ( identifier[all_targets] , identifier[virtual_target] . identifier[VirtualTarget] ) keyword[assert] identifier[isinstance] ( identifier[build_request] , identifier[property_set] . identifier[PropertySet] ) keyword[assert] identifier[is_iterable_typed] ( identifier[sources] , identifier[virtual_target] . identifier[VirtualTarget] ) keyword[assert] identifier[isinstance] ( identifier[rproperties] , identifier[property_set] . identifier[PropertySet] ) keyword[assert] identifier[isinstance] ( identifier[usage_requirements] , identifier[property_set] . identifier[PropertySet] ) keyword[for] identifier[e] keyword[in] identifier[root_targets] : identifier[e] . identifier[root] ( keyword[True] ) identifier[s] = identifier[Subvariant] ( identifier[self] , identifier[build_request] , identifier[sources] , identifier[rproperties] , identifier[usage_requirements] , identifier[all_targets] ) keyword[for] identifier[v] keyword[in] identifier[all_targets] : keyword[if] keyword[not] identifier[v] . identifier[creating_subvariant] (): identifier[v] . identifier[creating_subvariant] ( identifier[s] ) keyword[return] identifier[s]
def create_subvariant(self, root_targets, all_targets, build_request, sources, rproperties, usage_requirements): """Creates a new subvariant-dg instances for 'targets' - 'root-targets' the virtual targets will be returned to dependents - 'all-targets' all virtual targets created while building this main target - 'build-request' is property-set instance with requested build properties""" assert is_iterable_typed(root_targets, virtual_target.VirtualTarget) assert is_iterable_typed(all_targets, virtual_target.VirtualTarget) assert isinstance(build_request, property_set.PropertySet) assert is_iterable_typed(sources, virtual_target.VirtualTarget) assert isinstance(rproperties, property_set.PropertySet) assert isinstance(usage_requirements, property_set.PropertySet) for e in root_targets: e.root(True) # depends on [control=['for'], data=['e']] s = Subvariant(self, build_request, sources, rproperties, usage_requirements, all_targets) for v in all_targets: if not v.creating_subvariant(): v.creating_subvariant(s) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']] return s
def similar_datetime(anon, obj, field, val): """ Returns a datetime that is within plus/minus two years of the original datetime """ return anon.faker.datetime(field=field, val=val)
def function[similar_datetime, parameter[anon, obj, field, val]]: constant[ Returns a datetime that is within plus/minus two years of the original datetime ] return[call[name[anon].faker.datetime, parameter[]]]
keyword[def] identifier[similar_datetime] ( identifier[anon] , identifier[obj] , identifier[field] , identifier[val] ): literal[string] keyword[return] identifier[anon] . identifier[faker] . identifier[datetime] ( identifier[field] = identifier[field] , identifier[val] = identifier[val] )
def similar_datetime(anon, obj, field, val): """ Returns a datetime that is within plus/minus two years of the original datetime """ return anon.faker.datetime(field=field, val=val)
def _make_histogram(values, bins): """Converts values into a histogram proto using logic from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/histogram/histogram.cc""" values = values.reshape(-1) counts, limits = np.histogram(values, bins=bins) limits = limits[1:] sum_sq = values.dot(values) return HistogramProto(min=values.min(), max=values.max(), num=len(values), sum=values.sum(), sum_squares=sum_sq, bucket_limit=limits, bucket=counts)
def function[_make_histogram, parameter[values, bins]]: constant[Converts values into a histogram proto using logic from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/histogram/histogram.cc] variable[values] assign[=] call[name[values].reshape, parameter[<ast.UnaryOp object at 0x7da18dc052d0>]] <ast.Tuple object at 0x7da18dc05960> assign[=] call[name[np].histogram, parameter[name[values]]] variable[limits] assign[=] call[name[limits]][<ast.Slice object at 0x7da18dc07c10>] variable[sum_sq] assign[=] call[name[values].dot, parameter[name[values]]] return[call[name[HistogramProto], parameter[]]]
keyword[def] identifier[_make_histogram] ( identifier[values] , identifier[bins] ): literal[string] identifier[values] = identifier[values] . identifier[reshape] (- literal[int] ) identifier[counts] , identifier[limits] = identifier[np] . identifier[histogram] ( identifier[values] , identifier[bins] = identifier[bins] ) identifier[limits] = identifier[limits] [ literal[int] :] identifier[sum_sq] = identifier[values] . identifier[dot] ( identifier[values] ) keyword[return] identifier[HistogramProto] ( identifier[min] = identifier[values] . identifier[min] (), identifier[max] = identifier[values] . identifier[max] (), identifier[num] = identifier[len] ( identifier[values] ), identifier[sum] = identifier[values] . identifier[sum] (), identifier[sum_squares] = identifier[sum_sq] , identifier[bucket_limit] = identifier[limits] , identifier[bucket] = identifier[counts] )
def _make_histogram(values, bins): """Converts values into a histogram proto using logic from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/histogram/histogram.cc""" values = values.reshape(-1) (counts, limits) = np.histogram(values, bins=bins) limits = limits[1:] sum_sq = values.dot(values) return HistogramProto(min=values.min(), max=values.max(), num=len(values), sum=values.sum(), sum_squares=sum_sq, bucket_limit=limits, bucket=counts)
def do_create_tool_item(self): """This is called by the UIManager when it is time to instantiate the proxy""" proxy = SpinToolItem(*self._args_for_toolitem) self.connect_proxy(proxy) return proxy
def function[do_create_tool_item, parameter[self]]: constant[This is called by the UIManager when it is time to instantiate the proxy] variable[proxy] assign[=] call[name[SpinToolItem], parameter[<ast.Starred object at 0x7da18c4cc730>]] call[name[self].connect_proxy, parameter[name[proxy]]] return[name[proxy]]
keyword[def] identifier[do_create_tool_item] ( identifier[self] ): literal[string] identifier[proxy] = identifier[SpinToolItem] (* identifier[self] . identifier[_args_for_toolitem] ) identifier[self] . identifier[connect_proxy] ( identifier[proxy] ) keyword[return] identifier[proxy]
def do_create_tool_item(self): """This is called by the UIManager when it is time to instantiate the proxy""" proxy = SpinToolItem(*self._args_for_toolitem) self.connect_proxy(proxy) return proxy
def copy_submission_locally(self, cloud_path): """Copies submission from Google Cloud Storage to local directory. Args: cloud_path: path of the submission in Google Cloud Storage Returns: name of the local file where submission is copied to """ local_path = os.path.join(self.download_dir, os.path.basename(cloud_path)) cmd = ['gsutil', 'cp', cloud_path, local_path] if subprocess.call(cmd) != 0: logging.error('Can\'t copy submission locally') return None return local_path
def function[copy_submission_locally, parameter[self, cloud_path]]: constant[Copies submission from Google Cloud Storage to local directory. Args: cloud_path: path of the submission in Google Cloud Storage Returns: name of the local file where submission is copied to ] variable[local_path] assign[=] call[name[os].path.join, parameter[name[self].download_dir, call[name[os].path.basename, parameter[name[cloud_path]]]]] variable[cmd] assign[=] list[[<ast.Constant object at 0x7da204344670>, <ast.Constant object at 0x7da204344a60>, <ast.Name object at 0x7da204347070>, <ast.Name object at 0x7da204346ec0>]] if compare[call[name[subprocess].call, parameter[name[cmd]]] not_equal[!=] constant[0]] begin[:] call[name[logging].error, parameter[constant[Can't copy submission locally]]] return[constant[None]] return[name[local_path]]
keyword[def] identifier[copy_submission_locally] ( identifier[self] , identifier[cloud_path] ): literal[string] identifier[local_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[download_dir] , identifier[os] . identifier[path] . identifier[basename] ( identifier[cloud_path] )) identifier[cmd] =[ literal[string] , literal[string] , identifier[cloud_path] , identifier[local_path] ] keyword[if] identifier[subprocess] . identifier[call] ( identifier[cmd] )!= literal[int] : identifier[logging] . identifier[error] ( literal[string] ) keyword[return] keyword[None] keyword[return] identifier[local_path]
def copy_submission_locally(self, cloud_path): """Copies submission from Google Cloud Storage to local directory. Args: cloud_path: path of the submission in Google Cloud Storage Returns: name of the local file where submission is copied to """ local_path = os.path.join(self.download_dir, os.path.basename(cloud_path)) cmd = ['gsutil', 'cp', cloud_path, local_path] if subprocess.call(cmd) != 0: logging.error("Can't copy submission locally") return None # depends on [control=['if'], data=[]] return local_path
def _drain_step(self, A, ids, area, done, edge_todo): """ Does a single step of the upstream contributing area calculation. Here the pixels in ids are drained downstream, the areas are updated and the next set of pixels to drain are determined for the next round. """ # Only drain to cells that have a contribution A_todo = A[:, ids.ravel()] colsum = np.array(A_todo.sum(1)).ravel() # Only touch cells that actually receive a contribution # during this stage ids_new = colsum != 0 # Is it possible that I may drain twice from my own cell? # -- No, I don't think so... # Is it possible that other cells may drain into me in # multiple iterations -- yes # Then say I check for when I'm done ensures that I don't drain until # everyone has drained into me area.ravel()[ids_new] += (A_todo[ids_new, :] * (area.ravel()[ids].ravel())) edge_todo.ravel()[ids_new] += (A_todo[ids_new, :] * (edge_todo.ravel()[ids].ravel())) # Figure out what's left to do. done.ravel()[ids] = True colsum = A * (~done.ravel()) ids = colsum == 0 # Figure out the new-undrained ids ids = ids & (~done.ravel()) return ids, area, done, edge_todo
def function[_drain_step, parameter[self, A, ids, area, done, edge_todo]]: constant[ Does a single step of the upstream contributing area calculation. Here the pixels in ids are drained downstream, the areas are updated and the next set of pixels to drain are determined for the next round. ] variable[A_todo] assign[=] call[name[A]][tuple[[<ast.Slice object at 0x7da2046233a0>, <ast.Call object at 0x7da2046202b0>]]] variable[colsum] assign[=] call[call[name[np].array, parameter[call[name[A_todo].sum, parameter[constant[1]]]]].ravel, parameter[]] variable[ids_new] assign[=] compare[name[colsum] not_equal[!=] constant[0]] <ast.AugAssign object at 0x7da204620f70> <ast.AugAssign object at 0x7da2046221d0> call[call[name[done].ravel, parameter[]]][name[ids]] assign[=] constant[True] variable[colsum] assign[=] binary_operation[name[A] * <ast.UnaryOp object at 0x7da204623e50>] variable[ids] assign[=] compare[name[colsum] equal[==] constant[0]] variable[ids] assign[=] binary_operation[name[ids] <ast.BitAnd object at 0x7da2590d6b60> <ast.UnaryOp object at 0x7da204622bc0>] return[tuple[[<ast.Name object at 0x7da204622a10>, <ast.Name object at 0x7da204620790>, <ast.Name object at 0x7da204623fa0>, <ast.Name object at 0x7da204620040>]]]
keyword[def] identifier[_drain_step] ( identifier[self] , identifier[A] , identifier[ids] , identifier[area] , identifier[done] , identifier[edge_todo] ): literal[string] identifier[A_todo] = identifier[A] [:, identifier[ids] . identifier[ravel] ()] identifier[colsum] = identifier[np] . identifier[array] ( identifier[A_todo] . identifier[sum] ( literal[int] )). identifier[ravel] () identifier[ids_new] = identifier[colsum] != literal[int] identifier[area] . identifier[ravel] ()[ identifier[ids_new] ]+=( identifier[A_todo] [ identifier[ids_new] ,:] *( identifier[area] . identifier[ravel] ()[ identifier[ids] ]. identifier[ravel] ())) identifier[edge_todo] . identifier[ravel] ()[ identifier[ids_new] ]+=( identifier[A_todo] [ identifier[ids_new] ,:] *( identifier[edge_todo] . identifier[ravel] ()[ identifier[ids] ]. identifier[ravel] ())) identifier[done] . identifier[ravel] ()[ identifier[ids] ]= keyword[True] identifier[colsum] = identifier[A] *(~ identifier[done] . identifier[ravel] ()) identifier[ids] = identifier[colsum] == literal[int] identifier[ids] = identifier[ids] &(~ identifier[done] . identifier[ravel] ()) keyword[return] identifier[ids] , identifier[area] , identifier[done] , identifier[edge_todo]
def _drain_step(self, A, ids, area, done, edge_todo): """ Does a single step of the upstream contributing area calculation. Here the pixels in ids are drained downstream, the areas are updated and the next set of pixels to drain are determined for the next round. """ # Only drain to cells that have a contribution A_todo = A[:, ids.ravel()] colsum = np.array(A_todo.sum(1)).ravel() # Only touch cells that actually receive a contribution # during this stage ids_new = colsum != 0 # Is it possible that I may drain twice from my own cell? # -- No, I don't think so... # Is it possible that other cells may drain into me in # multiple iterations -- yes # Then say I check for when I'm done ensures that I don't drain until # everyone has drained into me area.ravel()[ids_new] += A_todo[ids_new, :] * area.ravel()[ids].ravel() edge_todo.ravel()[ids_new] += A_todo[ids_new, :] * edge_todo.ravel()[ids].ravel() # Figure out what's left to do. done.ravel()[ids] = True colsum = A * ~done.ravel() ids = colsum == 0 # Figure out the new-undrained ids ids = ids & ~done.ravel() return (ids, area, done, edge_todo)
def fit(self, obs, pstates, unique_pstates=None): """ Estimate model parameters. """ obs = [np.array(o) for o in obs] pstates = [np.array(p) for p in pstates] # List or array of observation sequences assert len(obs) == len(pstates) assert obs[0].ndim == 2 assert pstates[0].ndim == 1 if unique_pstates is not None: self._init_pstates(unique_pstates) else: self._init_pstates(list(set(np.concatenate(pstates)))) # Map the partial states to a unique index pstates_idx = [np.array([self.e[p] for p in seq]) for seq in pstates] if self.init_method == 'rand': self._init_random() elif self.init_method == 'obs': self._init_from_obs(obs, pstates_idx) self._init_pstate_freqs(pstates_idx) self._smooth() logprob = [] for i in range(self.max_iter): # Expectation step stats = self._initialize_sufficient_statistics() curr_logprob = 0 for obs_i, pstates_idx_i in zip(obs, pstates_idx): framelogprob = self._compute_log_likelihood(obs_i, pstates_idx_i) lpr, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx_i) bwdlattice = self._do_backward_pass(framelogprob, pstates_idx_i) gamma = fwdlattice + bwdlattice posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T curr_logprob += lpr self._accumulate_sufficient_statistics(stats, obs_i, pstates_idx_i, framelogprob, posteriors, fwdlattice, bwdlattice) logprob.append(curr_logprob) self.logprob_ = curr_logprob # Check for convergence. self.n_iter_performed_ = i if i > 0: self.logprob_delta = logprob[-1] - logprob[-2] if self.logprob_delta < self.thresh: break # Maximization step self._do_mstep(stats) # Mix the parameters self._smooth() return self
def function[fit, parameter[self, obs, pstates, unique_pstates]]: constant[ Estimate model parameters. ] variable[obs] assign[=] <ast.ListComp object at 0x7da18f00e620> variable[pstates] assign[=] <ast.ListComp object at 0x7da18f00dba0> assert[compare[call[name[len], parameter[name[obs]]] equal[==] call[name[len], parameter[name[pstates]]]]] assert[compare[call[name[obs]][constant[0]].ndim equal[==] constant[2]]] assert[compare[call[name[pstates]][constant[0]].ndim equal[==] constant[1]]] if compare[name[unique_pstates] is_not constant[None]] begin[:] call[name[self]._init_pstates, parameter[name[unique_pstates]]] variable[pstates_idx] assign[=] <ast.ListComp object at 0x7da18f00c940> if compare[name[self].init_method equal[==] constant[rand]] begin[:] call[name[self]._init_random, parameter[]] call[name[self]._init_pstate_freqs, parameter[name[pstates_idx]]] call[name[self]._smooth, parameter[]] variable[logprob] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[name[self].max_iter]]] begin[:] variable[stats] assign[=] call[name[self]._initialize_sufficient_statistics, parameter[]] variable[curr_logprob] assign[=] constant[0] for taget[tuple[[<ast.Name object at 0x7da18f00e4d0>, <ast.Name object at 0x7da18f00e200>]]] in starred[call[name[zip], parameter[name[obs], name[pstates_idx]]]] begin[:] variable[framelogprob] assign[=] call[name[self]._compute_log_likelihood, parameter[name[obs_i], name[pstates_idx_i]]] <ast.Tuple object at 0x7da18f00d660> assign[=] call[name[self]._do_forward_pass, parameter[name[framelogprob], name[pstates_idx_i]]] variable[bwdlattice] assign[=] call[name[self]._do_backward_pass, parameter[name[framelogprob], name[pstates_idx_i]]] variable[gamma] assign[=] binary_operation[name[fwdlattice] + name[bwdlattice]] variable[posteriors] assign[=] call[name[np].exp, parameter[binary_operation[name[gamma].T - call[name[logsumexp], parameter[name[gamma]]]]]].T <ast.AugAssign object at 0x7da18f00d330> call[name[self]._accumulate_sufficient_statistics, parameter[name[stats], name[obs_i], name[pstates_idx_i], name[framelogprob], name[posteriors], name[fwdlattice], name[bwdlattice]]] call[name[logprob].append, parameter[name[curr_logprob]]] name[self].logprob_ assign[=] name[curr_logprob] name[self].n_iter_performed_ assign[=] name[i] if compare[name[i] greater[>] constant[0]] begin[:] name[self].logprob_delta assign[=] binary_operation[call[name[logprob]][<ast.UnaryOp object at 0x7da1b235ef50>] - call[name[logprob]][<ast.UnaryOp object at 0x7da1b235d270>]] if compare[name[self].logprob_delta less[<] name[self].thresh] begin[:] break call[name[self]._do_mstep, parameter[name[stats]]] call[name[self]._smooth, parameter[]] return[name[self]]
keyword[def] identifier[fit] ( identifier[self] , identifier[obs] , identifier[pstates] , identifier[unique_pstates] = keyword[None] ): literal[string] identifier[obs] =[ identifier[np] . identifier[array] ( identifier[o] ) keyword[for] identifier[o] keyword[in] identifier[obs] ] identifier[pstates] =[ identifier[np] . identifier[array] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[pstates] ] keyword[assert] identifier[len] ( identifier[obs] )== identifier[len] ( identifier[pstates] ) keyword[assert] identifier[obs] [ literal[int] ]. identifier[ndim] == literal[int] keyword[assert] identifier[pstates] [ literal[int] ]. identifier[ndim] == literal[int] keyword[if] identifier[unique_pstates] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[_init_pstates] ( identifier[unique_pstates] ) keyword[else] : identifier[self] . identifier[_init_pstates] ( identifier[list] ( identifier[set] ( identifier[np] . identifier[concatenate] ( identifier[pstates] )))) identifier[pstates_idx] =[ identifier[np] . identifier[array] ([ identifier[self] . identifier[e] [ identifier[p] ] keyword[for] identifier[p] keyword[in] identifier[seq] ]) keyword[for] identifier[seq] keyword[in] identifier[pstates] ] keyword[if] identifier[self] . identifier[init_method] == literal[string] : identifier[self] . identifier[_init_random] () keyword[elif] identifier[self] . identifier[init_method] == literal[string] : identifier[self] . identifier[_init_from_obs] ( identifier[obs] , identifier[pstates_idx] ) identifier[self] . identifier[_init_pstate_freqs] ( identifier[pstates_idx] ) identifier[self] . identifier[_smooth] () identifier[logprob] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[max_iter] ): identifier[stats] = identifier[self] . identifier[_initialize_sufficient_statistics] () identifier[curr_logprob] = literal[int] keyword[for] identifier[obs_i] , identifier[pstates_idx_i] keyword[in] identifier[zip] ( identifier[obs] , identifier[pstates_idx] ): identifier[framelogprob] = identifier[self] . identifier[_compute_log_likelihood] ( identifier[obs_i] , identifier[pstates_idx_i] ) identifier[lpr] , identifier[fwdlattice] = identifier[self] . identifier[_do_forward_pass] ( identifier[framelogprob] , identifier[pstates_idx_i] ) identifier[bwdlattice] = identifier[self] . identifier[_do_backward_pass] ( identifier[framelogprob] , identifier[pstates_idx_i] ) identifier[gamma] = identifier[fwdlattice] + identifier[bwdlattice] identifier[posteriors] = identifier[np] . identifier[exp] ( identifier[gamma] . identifier[T] - identifier[logsumexp] ( identifier[gamma] , identifier[axis] = literal[int] )). identifier[T] identifier[curr_logprob] += identifier[lpr] identifier[self] . identifier[_accumulate_sufficient_statistics] ( identifier[stats] , identifier[obs_i] , identifier[pstates_idx_i] , identifier[framelogprob] , identifier[posteriors] , identifier[fwdlattice] , identifier[bwdlattice] ) identifier[logprob] . identifier[append] ( identifier[curr_logprob] ) identifier[self] . identifier[logprob_] = identifier[curr_logprob] identifier[self] . identifier[n_iter_performed_] = identifier[i] keyword[if] identifier[i] > literal[int] : identifier[self] . identifier[logprob_delta] = identifier[logprob] [- literal[int] ]- identifier[logprob] [- literal[int] ] keyword[if] identifier[self] . identifier[logprob_delta] < identifier[self] . identifier[thresh] : keyword[break] identifier[self] . identifier[_do_mstep] ( identifier[stats] ) identifier[self] . identifier[_smooth] () keyword[return] identifier[self]
def fit(self, obs, pstates, unique_pstates=None): """ Estimate model parameters. """ obs = [np.array(o) for o in obs] pstates = [np.array(p) for p in pstates] # List or array of observation sequences assert len(obs) == len(pstates) assert obs[0].ndim == 2 assert pstates[0].ndim == 1 if unique_pstates is not None: self._init_pstates(unique_pstates) # depends on [control=['if'], data=['unique_pstates']] else: self._init_pstates(list(set(np.concatenate(pstates)))) # Map the partial states to a unique index pstates_idx = [np.array([self.e[p] for p in seq]) for seq in pstates] if self.init_method == 'rand': self._init_random() # depends on [control=['if'], data=[]] elif self.init_method == 'obs': self._init_from_obs(obs, pstates_idx) # depends on [control=['if'], data=[]] self._init_pstate_freqs(pstates_idx) self._smooth() logprob = [] for i in range(self.max_iter): # Expectation step stats = self._initialize_sufficient_statistics() curr_logprob = 0 for (obs_i, pstates_idx_i) in zip(obs, pstates_idx): framelogprob = self._compute_log_likelihood(obs_i, pstates_idx_i) (lpr, fwdlattice) = self._do_forward_pass(framelogprob, pstates_idx_i) bwdlattice = self._do_backward_pass(framelogprob, pstates_idx_i) gamma = fwdlattice + bwdlattice posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T curr_logprob += lpr self._accumulate_sufficient_statistics(stats, obs_i, pstates_idx_i, framelogprob, posteriors, fwdlattice, bwdlattice) # depends on [control=['for'], data=[]] logprob.append(curr_logprob) self.logprob_ = curr_logprob # Check for convergence. self.n_iter_performed_ = i if i > 0: self.logprob_delta = logprob[-1] - logprob[-2] if self.logprob_delta < self.thresh: break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # Maximization step self._do_mstep(stats) # Mix the parameters self._smooth() # depends on [control=['for'], data=['i']] return self
def dispatch_request(self): # type: () -> Response """Method that handles request verification and routing. This method can be used as a function to register on the URL rule. The request is verified through the registered list of verifiers, before invoking the request handlers. The method returns a JSON response for the Alexa service to respond to the request. :return: The skill response for the input request :rtype: flask.Response :raises: :py:class:`werkzeug.exceptions.MethodNotAllowed` if the method is invoked for other than HTTP POST request. :py:class:`werkzeug.exceptions.BadRequest` if the verification fails. :py:class:`werkzeug.exceptions.InternalServerError` for any internal exception. """ if flask_request.method != "POST": raise exceptions.MethodNotAllowed() try: content = flask_request.data.decode( verifier_constants.CHARACTER_ENCODING) response = self._webservice_handler.verify_request_and_dispatch( http_request_headers=flask_request.headers, http_request_body=content) return jsonify(response) except VerificationException: current_app.logger.error( "Request verification failed", exc_info=True) raise exceptions.BadRequest( description="Incoming request failed verification") except AskSdkException: current_app.logger.error( "Skill dispatch exception", exc_info=True) raise exceptions.InternalServerError( description="Exception occurred during skill dispatch")
def function[dispatch_request, parameter[self]]: constant[Method that handles request verification and routing. This method can be used as a function to register on the URL rule. The request is verified through the registered list of verifiers, before invoking the request handlers. The method returns a JSON response for the Alexa service to respond to the request. :return: The skill response for the input request :rtype: flask.Response :raises: :py:class:`werkzeug.exceptions.MethodNotAllowed` if the method is invoked for other than HTTP POST request. :py:class:`werkzeug.exceptions.BadRequest` if the verification fails. :py:class:`werkzeug.exceptions.InternalServerError` for any internal exception. ] if compare[name[flask_request].method not_equal[!=] constant[POST]] begin[:] <ast.Raise object at 0x7da1b188d240> <ast.Try object at 0x7da1b188cf40>
keyword[def] identifier[dispatch_request] ( identifier[self] ): literal[string] keyword[if] identifier[flask_request] . identifier[method] != literal[string] : keyword[raise] identifier[exceptions] . identifier[MethodNotAllowed] () keyword[try] : identifier[content] = identifier[flask_request] . identifier[data] . identifier[decode] ( identifier[verifier_constants] . identifier[CHARACTER_ENCODING] ) identifier[response] = identifier[self] . identifier[_webservice_handler] . identifier[verify_request_and_dispatch] ( identifier[http_request_headers] = identifier[flask_request] . identifier[headers] , identifier[http_request_body] = identifier[content] ) keyword[return] identifier[jsonify] ( identifier[response] ) keyword[except] identifier[VerificationException] : identifier[current_app] . identifier[logger] . identifier[error] ( literal[string] , identifier[exc_info] = keyword[True] ) keyword[raise] identifier[exceptions] . identifier[BadRequest] ( identifier[description] = literal[string] ) keyword[except] identifier[AskSdkException] : identifier[current_app] . identifier[logger] . identifier[error] ( literal[string] , identifier[exc_info] = keyword[True] ) keyword[raise] identifier[exceptions] . identifier[InternalServerError] ( identifier[description] = literal[string] )
def dispatch_request(self): # type: () -> Response 'Method that handles request verification and routing.\n\n This method can be used as a function to register on the URL\n rule. The request is verified through the registered list of\n verifiers, before invoking the request handlers. The method\n returns a JSON response for the Alexa service to respond to the\n request.\n\n :return: The skill response for the input request\n :rtype: flask.Response\n :raises: :py:class:`werkzeug.exceptions.MethodNotAllowed` if the\n method is invoked for other than HTTP POST request.\n :py:class:`werkzeug.exceptions.BadRequest` if the\n verification fails.\n :py:class:`werkzeug.exceptions.InternalServerError` for any\n internal exception.\n ' if flask_request.method != 'POST': raise exceptions.MethodNotAllowed() # depends on [control=['if'], data=[]] try: content = flask_request.data.decode(verifier_constants.CHARACTER_ENCODING) response = self._webservice_handler.verify_request_and_dispatch(http_request_headers=flask_request.headers, http_request_body=content) return jsonify(response) # depends on [control=['try'], data=[]] except VerificationException: current_app.logger.error('Request verification failed', exc_info=True) raise exceptions.BadRequest(description='Incoming request failed verification') # depends on [control=['except'], data=[]] except AskSdkException: current_app.logger.error('Skill dispatch exception', exc_info=True) raise exceptions.InternalServerError(description='Exception occurred during skill dispatch') # depends on [control=['except'], data=[]]
def transfer_funds(self, to, amount, denom, msg): ''' Transfer SBD or STEEM to the given account ''' try: self.steem_instance().commit.transfer(to, float(amount), denom, msg, self.mainaccount) except Exception as e: self.msg.error_message(e) return False else: return True
def function[transfer_funds, parameter[self, to, amount, denom, msg]]: constant[ Transfer SBD or STEEM to the given account ] <ast.Try object at 0x7da1b14611e0>
keyword[def] identifier[transfer_funds] ( identifier[self] , identifier[to] , identifier[amount] , identifier[denom] , identifier[msg] ): literal[string] keyword[try] : identifier[self] . identifier[steem_instance] (). identifier[commit] . identifier[transfer] ( identifier[to] , identifier[float] ( identifier[amount] ), identifier[denom] , identifier[msg] , identifier[self] . identifier[mainaccount] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[self] . identifier[msg] . identifier[error_message] ( identifier[e] ) keyword[return] keyword[False] keyword[else] : keyword[return] keyword[True]
def transfer_funds(self, to, amount, denom, msg): """ Transfer SBD or STEEM to the given account """ try: self.steem_instance().commit.transfer(to, float(amount), denom, msg, self.mainaccount) # depends on [control=['try'], data=[]] except Exception as e: self.msg.error_message(e) return False # depends on [control=['except'], data=['e']] else: return True
def GET_name_info( self, path_info, name ): """ Look up a name's zonefile, address, and last TXID Reply status, zonefile, zonefile hash, address, and last TXID. 'status' can be 'available', 'registered', 'revoked', or 'pending' """ if not check_name(name) and not check_subdomain(name): return self._reply_json({'error': 'Invalid name or subdomain'}, status_code=400) blockstackd_url = get_blockstackd_url() name_rec = None try: name_rec = blockstackd_client.get_name_record(name, include_history=False, hostport=blockstackd_url) except ValueError: return self._reply_json({'error': 'Invalid argument: not a well-formed name or subdomain'}, status_code=400) if 'error' in name_rec: if 'not found' in name_rec['error'].lower(): return self._reply_json({'status': 'available'}, status_code=404) elif 'failed to load subdomain' in name_rec['error'].lower(): # try to redirect to resolver, if given _, _, domain_name = blockstackd_scripts.is_address_subdomain(name) domain_rec = blockstackd_client.get_name_record(domain_name, include_history=False, hostport=blockstackd_url) if 'error' in domain_rec: # no resolver known for on-chain name return self._reply_json({'status': 'available', 'more': 'failed to look up parent domain'}, status_code=404) resolver_target = domain_rec.get('resolver', None) if resolver_target is None: # no _resolver return self._reply_json({'status': 'available', 'more': 'failed to find parent domain\'s resolver'}, status_code=404) redirect_location = resolver_target + '/v1/names/' + name log.debug("Redirect lookup on {} to {}".format(name, redirect_location)) self._send_headers(status_code=301, more_headers={ 'Location': redirect_location }) return self.wfile.write(json.dumps({'status': 'redirect'})) elif 'expired' in name_rec['error'].lower(): return self._reply_json({'error': name_rec['error']}, status_code=404) else: return self._reply_json({'error': 'Blockstack daemon error: {}'.format(name_rec['error'])}, status_code=name_rec.get('http_status', 502)) zonefile_txt = None if 'zonefile' in name_rec: zonefile_txt = base64.b64decode(name_rec['zonefile']) ret = {} if blockstackd_scripts.is_subdomain(name): # subdomain address = name_rec['address'] if address: address = virtualchain.address_reencode(str(address)) log.debug("{} is registered_subdomain".format(name)) ret = { 'status': 'registered_subdomain', 'zonefile': zonefile_txt, 'zonefile_hash': name_rec['value_hash'], 'address': name_rec['address'], 'blockchain': 'bitcoin', 'last_txid': name_rec['txid'], 'did': name_rec.get('did', {'error': 'Not supported for this name'}) } else: status = 'revoked' if name_rec['revoked'] else 'registered' address = name_rec['address'] if address: address = virtualchain.address_reencode(str(address)) log.debug("{} is {}".format(name, status)) ret = { 'status': status, 'zonefile': zonefile_txt, 'zonefile_hash': name_rec['value_hash'], 'address': address, 'last_txid': name_rec['txid'], 'blockchain': 'bitcoin', 'expire_block': name_rec['expire_block'], # expire_block is what blockstack.js expects 'renewal_deadline': name_rec['renewal_deadline'], 'grace_period': name_rec.get('grace_period', False), 'resolver': name_rec.get('resolver', None), 'did': name_rec.get('did', {'error': 'Not supported for this name'}) } return self._reply_json(ret)
def function[GET_name_info, parameter[self, path_info, name]]: constant[ Look up a name's zonefile, address, and last TXID Reply status, zonefile, zonefile hash, address, and last TXID. 'status' can be 'available', 'registered', 'revoked', or 'pending' ] if <ast.BoolOp object at 0x7da18f09ca00> begin[:] return[call[name[self]._reply_json, parameter[dictionary[[<ast.Constant object at 0x7da18f09fa00>], [<ast.Constant object at 0x7da18f09dc60>]]]]] variable[blockstackd_url] assign[=] call[name[get_blockstackd_url], parameter[]] variable[name_rec] assign[=] constant[None] <ast.Try object at 0x7da18f09ee90> if compare[constant[error] in name[name_rec]] begin[:] if compare[constant[not found] in call[call[name[name_rec]][constant[error]].lower, parameter[]]] begin[:] return[call[name[self]._reply_json, parameter[dictionary[[<ast.Constant object at 0x7da18f09d3c0>], [<ast.Constant object at 0x7da18f09f550>]]]]] variable[zonefile_txt] assign[=] constant[None] if compare[constant[zonefile] in name[name_rec]] begin[:] variable[zonefile_txt] assign[=] call[name[base64].b64decode, parameter[call[name[name_rec]][constant[zonefile]]]] variable[ret] assign[=] dictionary[[], []] if call[name[blockstackd_scripts].is_subdomain, parameter[name[name]]] begin[:] variable[address] assign[=] call[name[name_rec]][constant[address]] if name[address] begin[:] variable[address] assign[=] call[name[virtualchain].address_reencode, parameter[call[name[str], parameter[name[address]]]]] call[name[log].debug, parameter[call[constant[{} is registered_subdomain].format, parameter[name[name]]]]] variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18f09e9b0>, <ast.Constant object at 0x7da18f09c700>, <ast.Constant object at 0x7da18f09cbe0>, <ast.Constant object at 0x7da18f09d660>, <ast.Constant object at 0x7da18f09cf10>, <ast.Constant object at 0x7da18f09d510>, <ast.Constant object at 0x7da18f09d420>], [<ast.Constant object at 0x7da18f09cd60>, <ast.Name object at 0x7da18f09e950>, <ast.Subscript object at 0x7da18f09e0e0>, <ast.Subscript object at 0x7da18f09e8f0>, <ast.Constant object at 0x7da18f09e590>, <ast.Subscript object at 0x7da18f09ca60>, <ast.Call object at 0x7da18f09d5d0>]] return[call[name[self]._reply_json, parameter[name[ret]]]]
keyword[def] identifier[GET_name_info] ( identifier[self] , identifier[path_info] , identifier[name] ): literal[string] keyword[if] keyword[not] identifier[check_name] ( identifier[name] ) keyword[and] keyword[not] identifier[check_subdomain] ( identifier[name] ): keyword[return] identifier[self] . identifier[_reply_json] ({ literal[string] : literal[string] }, identifier[status_code] = literal[int] ) identifier[blockstackd_url] = identifier[get_blockstackd_url] () identifier[name_rec] = keyword[None] keyword[try] : identifier[name_rec] = identifier[blockstackd_client] . identifier[get_name_record] ( identifier[name] , identifier[include_history] = keyword[False] , identifier[hostport] = identifier[blockstackd_url] ) keyword[except] identifier[ValueError] : keyword[return] identifier[self] . identifier[_reply_json] ({ literal[string] : literal[string] }, identifier[status_code] = literal[int] ) keyword[if] literal[string] keyword[in] identifier[name_rec] : keyword[if] literal[string] keyword[in] identifier[name_rec] [ literal[string] ]. identifier[lower] (): keyword[return] identifier[self] . identifier[_reply_json] ({ literal[string] : literal[string] }, identifier[status_code] = literal[int] ) keyword[elif] literal[string] keyword[in] identifier[name_rec] [ literal[string] ]. identifier[lower] (): identifier[_] , identifier[_] , identifier[domain_name] = identifier[blockstackd_scripts] . identifier[is_address_subdomain] ( identifier[name] ) identifier[domain_rec] = identifier[blockstackd_client] . identifier[get_name_record] ( identifier[domain_name] , identifier[include_history] = keyword[False] , identifier[hostport] = identifier[blockstackd_url] ) keyword[if] literal[string] keyword[in] identifier[domain_rec] : keyword[return] identifier[self] . identifier[_reply_json] ({ literal[string] : literal[string] , literal[string] : literal[string] }, identifier[status_code] = literal[int] ) identifier[resolver_target] = identifier[domain_rec] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] identifier[resolver_target] keyword[is] keyword[None] : keyword[return] identifier[self] . identifier[_reply_json] ({ literal[string] : literal[string] , literal[string] : literal[string] }, identifier[status_code] = literal[int] ) identifier[redirect_location] = identifier[resolver_target] + literal[string] + identifier[name] identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[name] , identifier[redirect_location] )) identifier[self] . identifier[_send_headers] ( identifier[status_code] = literal[int] , identifier[more_headers] ={ literal[string] : identifier[redirect_location] }) keyword[return] identifier[self] . identifier[wfile] . identifier[write] ( identifier[json] . identifier[dumps] ({ literal[string] : literal[string] })) keyword[elif] literal[string] keyword[in] identifier[name_rec] [ literal[string] ]. identifier[lower] (): keyword[return] identifier[self] . identifier[_reply_json] ({ literal[string] : identifier[name_rec] [ literal[string] ]}, identifier[status_code] = literal[int] ) keyword[else] : keyword[return] identifier[self] . identifier[_reply_json] ({ literal[string] : literal[string] . identifier[format] ( identifier[name_rec] [ literal[string] ])}, identifier[status_code] = identifier[name_rec] . identifier[get] ( literal[string] , literal[int] )) identifier[zonefile_txt] = keyword[None] keyword[if] literal[string] keyword[in] identifier[name_rec] : identifier[zonefile_txt] = identifier[base64] . identifier[b64decode] ( identifier[name_rec] [ literal[string] ]) identifier[ret] ={} keyword[if] identifier[blockstackd_scripts] . identifier[is_subdomain] ( identifier[name] ): identifier[address] = identifier[name_rec] [ literal[string] ] keyword[if] identifier[address] : identifier[address] = identifier[virtualchain] . identifier[address_reencode] ( identifier[str] ( identifier[address] )) identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[name] )) identifier[ret] ={ literal[string] : literal[string] , literal[string] : identifier[zonefile_txt] , literal[string] : identifier[name_rec] [ literal[string] ], literal[string] : identifier[name_rec] [ literal[string] ], literal[string] : literal[string] , literal[string] : identifier[name_rec] [ literal[string] ], literal[string] : identifier[name_rec] . identifier[get] ( literal[string] ,{ literal[string] : literal[string] }) } keyword[else] : identifier[status] = literal[string] keyword[if] identifier[name_rec] [ literal[string] ] keyword[else] literal[string] identifier[address] = identifier[name_rec] [ literal[string] ] keyword[if] identifier[address] : identifier[address] = identifier[virtualchain] . identifier[address_reencode] ( identifier[str] ( identifier[address] )) identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[name] , identifier[status] )) identifier[ret] ={ literal[string] : identifier[status] , literal[string] : identifier[zonefile_txt] , literal[string] : identifier[name_rec] [ literal[string] ], literal[string] : identifier[address] , literal[string] : identifier[name_rec] [ literal[string] ], literal[string] : literal[string] , literal[string] : identifier[name_rec] [ literal[string] ], literal[string] : identifier[name_rec] [ literal[string] ], literal[string] : identifier[name_rec] . identifier[get] ( literal[string] , keyword[False] ), literal[string] : identifier[name_rec] . identifier[get] ( literal[string] , keyword[None] ), literal[string] : identifier[name_rec] . identifier[get] ( literal[string] ,{ literal[string] : literal[string] }) } keyword[return] identifier[self] . identifier[_reply_json] ( identifier[ret] )
def GET_name_info(self, path_info, name): """ Look up a name's zonefile, address, and last TXID Reply status, zonefile, zonefile hash, address, and last TXID. 'status' can be 'available', 'registered', 'revoked', or 'pending' """ if not check_name(name) and (not check_subdomain(name)): return self._reply_json({'error': 'Invalid name or subdomain'}, status_code=400) # depends on [control=['if'], data=[]] blockstackd_url = get_blockstackd_url() name_rec = None try: name_rec = blockstackd_client.get_name_record(name, include_history=False, hostport=blockstackd_url) # depends on [control=['try'], data=[]] except ValueError: return self._reply_json({'error': 'Invalid argument: not a well-formed name or subdomain'}, status_code=400) # depends on [control=['except'], data=[]] if 'error' in name_rec: if 'not found' in name_rec['error'].lower(): return self._reply_json({'status': 'available'}, status_code=404) # depends on [control=['if'], data=[]] elif 'failed to load subdomain' in name_rec['error'].lower(): # try to redirect to resolver, if given (_, _, domain_name) = blockstackd_scripts.is_address_subdomain(name) domain_rec = blockstackd_client.get_name_record(domain_name, include_history=False, hostport=blockstackd_url) if 'error' in domain_rec: # no resolver known for on-chain name return self._reply_json({'status': 'available', 'more': 'failed to look up parent domain'}, status_code=404) # depends on [control=['if'], data=[]] resolver_target = domain_rec.get('resolver', None) if resolver_target is None: # no _resolver return self._reply_json({'status': 'available', 'more': "failed to find parent domain's resolver"}, status_code=404) # depends on [control=['if'], data=[]] redirect_location = resolver_target + '/v1/names/' + name log.debug('Redirect lookup on {} to {}'.format(name, redirect_location)) self._send_headers(status_code=301, more_headers={'Location': redirect_location}) return self.wfile.write(json.dumps({'status': 'redirect'})) # depends on [control=['if'], data=[]] elif 'expired' in name_rec['error'].lower(): return self._reply_json({'error': name_rec['error']}, status_code=404) # depends on [control=['if'], data=[]] else: return self._reply_json({'error': 'Blockstack daemon error: {}'.format(name_rec['error'])}, status_code=name_rec.get('http_status', 502)) # depends on [control=['if'], data=['name_rec']] zonefile_txt = None if 'zonefile' in name_rec: zonefile_txt = base64.b64decode(name_rec['zonefile']) # depends on [control=['if'], data=['name_rec']] ret = {} if blockstackd_scripts.is_subdomain(name): # subdomain address = name_rec['address'] if address: address = virtualchain.address_reencode(str(address)) # depends on [control=['if'], data=[]] log.debug('{} is registered_subdomain'.format(name)) ret = {'status': 'registered_subdomain', 'zonefile': zonefile_txt, 'zonefile_hash': name_rec['value_hash'], 'address': name_rec['address'], 'blockchain': 'bitcoin', 'last_txid': name_rec['txid'], 'did': name_rec.get('did', {'error': 'Not supported for this name'})} # depends on [control=['if'], data=[]] else: status = 'revoked' if name_rec['revoked'] else 'registered' address = name_rec['address'] if address: address = virtualchain.address_reencode(str(address)) # depends on [control=['if'], data=[]] log.debug('{} is {}'.format(name, status)) # expire_block is what blockstack.js expects ret = {'status': status, 'zonefile': zonefile_txt, 'zonefile_hash': name_rec['value_hash'], 'address': address, 'last_txid': name_rec['txid'], 'blockchain': 'bitcoin', 'expire_block': name_rec['expire_block'], 'renewal_deadline': name_rec['renewal_deadline'], 'grace_period': name_rec.get('grace_period', False), 'resolver': name_rec.get('resolver', None), 'did': name_rec.get('did', {'error': 'Not supported for this name'})} return self._reply_json(ret)
def str2unixtime(ts, fmt='%Y-%m-%d %H:%M:%S'): """ 将固定格式的字符串转换成对应的时间戳到秒级别 - 使用: >>> str2unixtime('2016-01-01 01:01:01') 1451581261 :param ts: :type ts: :param fmt: :type fmt: :return: :rtype: """ t = time.strptime(ts, fmt) return int(time.mktime(t))
def function[str2unixtime, parameter[ts, fmt]]: constant[ 将固定格式的字符串转换成对应的时间戳到秒级别 - 使用: >>> str2unixtime('2016-01-01 01:01:01') 1451581261 :param ts: :type ts: :param fmt: :type fmt: :return: :rtype: ] variable[t] assign[=] call[name[time].strptime, parameter[name[ts], name[fmt]]] return[call[name[int], parameter[call[name[time].mktime, parameter[name[t]]]]]]
keyword[def] identifier[str2unixtime] ( identifier[ts] , identifier[fmt] = literal[string] ): literal[string] identifier[t] = identifier[time] . identifier[strptime] ( identifier[ts] , identifier[fmt] ) keyword[return] identifier[int] ( identifier[time] . identifier[mktime] ( identifier[t] ))
def str2unixtime(ts, fmt='%Y-%m-%d %H:%M:%S'): """ 将固定格式的字符串转换成对应的时间戳到秒级别 - 使用: >>> str2unixtime('2016-01-01 01:01:01') 1451581261 :param ts: :type ts: :param fmt: :type fmt: :return: :rtype: """ t = time.strptime(ts, fmt) return int(time.mktime(t))
def main(): """Create and use a logger.""" formatter = ColoredFormatter(log_colors={'TRACE': 'yellow'}) handler = logging.StreamHandler() handler.setFormatter(formatter) logger = logging.getLogger('example') logger.addHandler(handler) logger.setLevel('TRACE') logger.log(5, 'a message using a custom level')
def function[main, parameter[]]: constant[Create and use a logger.] variable[formatter] assign[=] call[name[ColoredFormatter], parameter[]] variable[handler] assign[=] call[name[logging].StreamHandler, parameter[]] call[name[handler].setFormatter, parameter[name[formatter]]] variable[logger] assign[=] call[name[logging].getLogger, parameter[constant[example]]] call[name[logger].addHandler, parameter[name[handler]]] call[name[logger].setLevel, parameter[constant[TRACE]]] call[name[logger].log, parameter[constant[5], constant[a message using a custom level]]]
keyword[def] identifier[main] (): literal[string] identifier[formatter] = identifier[ColoredFormatter] ( identifier[log_colors] ={ literal[string] : literal[string] }) identifier[handler] = identifier[logging] . identifier[StreamHandler] () identifier[handler] . identifier[setFormatter] ( identifier[formatter] ) identifier[logger] = identifier[logging] . identifier[getLogger] ( literal[string] ) identifier[logger] . identifier[addHandler] ( identifier[handler] ) identifier[logger] . identifier[setLevel] ( literal[string] ) identifier[logger] . identifier[log] ( literal[int] , literal[string] )
def main(): """Create and use a logger.""" formatter = ColoredFormatter(log_colors={'TRACE': 'yellow'}) handler = logging.StreamHandler() handler.setFormatter(formatter) logger = logging.getLogger('example') logger.addHandler(handler) logger.setLevel('TRACE') logger.log(5, 'a message using a custom level')
def get_network_adapter_type(adapter_type): ''' Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ''' if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() raise ValueError('An unknown network adapter object type name.')
def function[get_network_adapter_type, parameter[adapter_type]]: constant[ Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. ] if compare[name[adapter_type] equal[==] constant[vmxnet]] begin[:] return[call[name[vim].vm.device.VirtualVmxnet, parameter[]]] <ast.Raise object at 0x7da1b1f665f0>
keyword[def] identifier[get_network_adapter_type] ( identifier[adapter_type] ): literal[string] keyword[if] identifier[adapter_type] == literal[string] : keyword[return] identifier[vim] . identifier[vm] . identifier[device] . identifier[VirtualVmxnet] () keyword[elif] identifier[adapter_type] == literal[string] : keyword[return] identifier[vim] . identifier[vm] . identifier[device] . identifier[VirtualVmxnet2] () keyword[elif] identifier[adapter_type] == literal[string] : keyword[return] identifier[vim] . identifier[vm] . identifier[device] . identifier[VirtualVmxnet3] () keyword[elif] identifier[adapter_type] == literal[string] : keyword[return] identifier[vim] . identifier[vm] . identifier[device] . identifier[VirtualE1000] () keyword[elif] identifier[adapter_type] == literal[string] : keyword[return] identifier[vim] . identifier[vm] . identifier[device] . identifier[VirtualE1000e] () keyword[raise] identifier[ValueError] ( literal[string] )
def get_network_adapter_type(adapter_type): """ Return the network adapter type. adpater_type The adapter type from which to obtain the network adapter type. """ if adapter_type == 'vmxnet': return vim.vm.device.VirtualVmxnet() # depends on [control=['if'], data=[]] elif adapter_type == 'vmxnet2': return vim.vm.device.VirtualVmxnet2() # depends on [control=['if'], data=[]] elif adapter_type == 'vmxnet3': return vim.vm.device.VirtualVmxnet3() # depends on [control=['if'], data=[]] elif adapter_type == 'e1000': return vim.vm.device.VirtualE1000() # depends on [control=['if'], data=[]] elif adapter_type == 'e1000e': return vim.vm.device.VirtualE1000e() # depends on [control=['if'], data=[]] raise ValueError('An unknown network adapter object type name.')
def create_vocab(sentences, word_counts_output_file, min_word_count=1): """Creates the vocabulary of word to word_id. See ``tutorial_tfrecord3.py``. The vocabulary is saved to disk in a text file of word counts. The id of each word in the file is its corresponding 0-based line number. Parameters ------------ sentences : list of list of str All sentences for creating the vocabulary. word_counts_output_file : str The file name. min_word_count : int Minimum number of occurrences for a word. Returns -------- :class:`SimpleVocabulary` The simple vocabulary object, see :class:`Vocabulary` for more. Examples -------- Pre-process sentences >>> captions = ["one two , three", "four five five"] >>> processed_capts = [] >>> for c in captions: >>> c = tl.nlp.process_sentence(c, start_word="<S>", end_word="</S>") >>> processed_capts.append(c) >>> print(processed_capts) ...[['<S>', 'one', 'two', ',', 'three', '</S>'], ['<S>', 'four', 'five', 'five', '</S>']] Create vocabulary >>> tl.nlp.create_vocab(processed_capts, word_counts_output_file='vocab.txt', min_word_count=1) Creating vocabulary. Total words: 8 Words in vocabulary: 8 Wrote vocabulary file: vocab.txt Get vocabulary object >>> vocab = tl.nlp.Vocabulary('vocab.txt', start_word="<S>", end_word="</S>", unk_word="<UNK>") INFO:tensorflow:Initializing vocabulary from file: vocab.txt [TL] Vocabulary from vocab.txt : <S> </S> <UNK> vocabulary with 10 words (includes start_word, end_word, unk_word) start_id: 2 end_id: 3 unk_id: 9 pad_id: 0 """ tl.logging.info("Creating vocabulary.") counter = Counter() for c in sentences: counter.update(c) # tl.logging.info('c',c) tl.logging.info(" Total words: %d" % len(counter)) # Filter uncommon words and sort by descending count. word_counts = [x for x in counter.items() if x[1] >= min_word_count] word_counts.sort(key=lambda x: x[1], reverse=True) word_counts = [("<PAD>", 0)] + word_counts # 1st id should be reserved for padding # tl.logging.info(word_counts) tl.logging.info(" Words in vocabulary: %d" % len(word_counts)) # Write out the word counts file. with tf.gfile.FastGFile(word_counts_output_file, "w") as f: f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts])) tl.logging.info(" Wrote vocabulary file: %s" % word_counts_output_file) # Create the vocabulary dictionary. reverse_vocab = [x[0] for x in word_counts] unk_id = len(reverse_vocab) vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)]) vocab = SimpleVocabulary(vocab_dict, unk_id) return vocab
def function[create_vocab, parameter[sentences, word_counts_output_file, min_word_count]]: constant[Creates the vocabulary of word to word_id. See ``tutorial_tfrecord3.py``. The vocabulary is saved to disk in a text file of word counts. The id of each word in the file is its corresponding 0-based line number. Parameters ------------ sentences : list of list of str All sentences for creating the vocabulary. word_counts_output_file : str The file name. min_word_count : int Minimum number of occurrences for a word. Returns -------- :class:`SimpleVocabulary` The simple vocabulary object, see :class:`Vocabulary` for more. Examples -------- Pre-process sentences >>> captions = ["one two , three", "four five five"] >>> processed_capts = [] >>> for c in captions: >>> c = tl.nlp.process_sentence(c, start_word="<S>", end_word="</S>") >>> processed_capts.append(c) >>> print(processed_capts) ...[['<S>', 'one', 'two', ',', 'three', '</S>'], ['<S>', 'four', 'five', 'five', '</S>']] Create vocabulary >>> tl.nlp.create_vocab(processed_capts, word_counts_output_file='vocab.txt', min_word_count=1) Creating vocabulary. Total words: 8 Words in vocabulary: 8 Wrote vocabulary file: vocab.txt Get vocabulary object >>> vocab = tl.nlp.Vocabulary('vocab.txt', start_word="<S>", end_word="</S>", unk_word="<UNK>") INFO:tensorflow:Initializing vocabulary from file: vocab.txt [TL] Vocabulary from vocab.txt : <S> </S> <UNK> vocabulary with 10 words (includes start_word, end_word, unk_word) start_id: 2 end_id: 3 unk_id: 9 pad_id: 0 ] call[name[tl].logging.info, parameter[constant[Creating vocabulary.]]] variable[counter] assign[=] call[name[Counter], parameter[]] for taget[name[c]] in starred[name[sentences]] begin[:] call[name[counter].update, parameter[name[c]]] call[name[tl].logging.info, parameter[binary_operation[constant[ Total words: %d] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[counter]]]]]] variable[word_counts] assign[=] <ast.ListComp object at 0x7da20c992920> call[name[word_counts].sort, parameter[]] variable[word_counts] assign[=] binary_operation[list[[<ast.Tuple object at 0x7da20c9905b0>]] + name[word_counts]] call[name[tl].logging.info, parameter[binary_operation[constant[ Words in vocabulary: %d] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[word_counts]]]]]] with call[name[tf].gfile.FastGFile, parameter[name[word_counts_output_file], constant[w]]] begin[:] call[name[f].write, parameter[call[constant[ ].join, parameter[<ast.ListComp object at 0x7da20c991720>]]]] call[name[tl].logging.info, parameter[binary_operation[constant[ Wrote vocabulary file: %s] <ast.Mod object at 0x7da2590d6920> name[word_counts_output_file]]]] variable[reverse_vocab] assign[=] <ast.ListComp object at 0x7da20c991bd0> variable[unk_id] assign[=] call[name[len], parameter[name[reverse_vocab]]] variable[vocab_dict] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da20c9912a0>]] variable[vocab] assign[=] call[name[SimpleVocabulary], parameter[name[vocab_dict], name[unk_id]]] return[name[vocab]]
keyword[def] identifier[create_vocab] ( identifier[sentences] , identifier[word_counts_output_file] , identifier[min_word_count] = literal[int] ): literal[string] identifier[tl] . identifier[logging] . identifier[info] ( literal[string] ) identifier[counter] = identifier[Counter] () keyword[for] identifier[c] keyword[in] identifier[sentences] : identifier[counter] . identifier[update] ( identifier[c] ) identifier[tl] . identifier[logging] . identifier[info] ( literal[string] % identifier[len] ( identifier[counter] )) identifier[word_counts] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[counter] . identifier[items] () keyword[if] identifier[x] [ literal[int] ]>= identifier[min_word_count] ] identifier[word_counts] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ], identifier[reverse] = keyword[True] ) identifier[word_counts] =[( literal[string] , literal[int] )]+ identifier[word_counts] identifier[tl] . identifier[logging] . identifier[info] ( literal[string] % identifier[len] ( identifier[word_counts] )) keyword[with] identifier[tf] . identifier[gfile] . identifier[FastGFile] ( identifier[word_counts_output_file] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( literal[string] . identifier[join] ([ literal[string] %( identifier[w] , identifier[c] ) keyword[for] identifier[w] , identifier[c] keyword[in] identifier[word_counts] ])) identifier[tl] . identifier[logging] . identifier[info] ( literal[string] % identifier[word_counts_output_file] ) identifier[reverse_vocab] =[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[word_counts] ] identifier[unk_id] = identifier[len] ( identifier[reverse_vocab] ) identifier[vocab_dict] = identifier[dict] ([( identifier[x] , identifier[y] ) keyword[for] ( identifier[y] , identifier[x] ) keyword[in] identifier[enumerate] ( identifier[reverse_vocab] )]) identifier[vocab] = identifier[SimpleVocabulary] ( identifier[vocab_dict] , identifier[unk_id] ) keyword[return] identifier[vocab]
def create_vocab(sentences, word_counts_output_file, min_word_count=1): """Creates the vocabulary of word to word_id. See ``tutorial_tfrecord3.py``. The vocabulary is saved to disk in a text file of word counts. The id of each word in the file is its corresponding 0-based line number. Parameters ------------ sentences : list of list of str All sentences for creating the vocabulary. word_counts_output_file : str The file name. min_word_count : int Minimum number of occurrences for a word. Returns -------- :class:`SimpleVocabulary` The simple vocabulary object, see :class:`Vocabulary` for more. Examples -------- Pre-process sentences >>> captions = ["one two , three", "four five five"] >>> processed_capts = [] >>> for c in captions: >>> c = tl.nlp.process_sentence(c, start_word="<S>", end_word="</S>") >>> processed_capts.append(c) >>> print(processed_capts) ...[['<S>', 'one', 'two', ',', 'three', '</S>'], ['<S>', 'four', 'five', 'five', '</S>']] Create vocabulary >>> tl.nlp.create_vocab(processed_capts, word_counts_output_file='vocab.txt', min_word_count=1) Creating vocabulary. Total words: 8 Words in vocabulary: 8 Wrote vocabulary file: vocab.txt Get vocabulary object >>> vocab = tl.nlp.Vocabulary('vocab.txt', start_word="<S>", end_word="</S>", unk_word="<UNK>") INFO:tensorflow:Initializing vocabulary from file: vocab.txt [TL] Vocabulary from vocab.txt : <S> </S> <UNK> vocabulary with 10 words (includes start_word, end_word, unk_word) start_id: 2 end_id: 3 unk_id: 9 pad_id: 0 """ tl.logging.info('Creating vocabulary.') counter = Counter() for c in sentences: counter.update(c) # depends on [control=['for'], data=['c']] # tl.logging.info('c',c) tl.logging.info(' Total words: %d' % len(counter)) # Filter uncommon words and sort by descending count. word_counts = [x for x in counter.items() if x[1] >= min_word_count] word_counts.sort(key=lambda x: x[1], reverse=True) word_counts = [('<PAD>', 0)] + word_counts # 1st id should be reserved for padding # tl.logging.info(word_counts) tl.logging.info(' Words in vocabulary: %d' % len(word_counts)) # Write out the word counts file. with tf.gfile.FastGFile(word_counts_output_file, 'w') as f: f.write('\n'.join(['%s %d' % (w, c) for (w, c) in word_counts])) # depends on [control=['with'], data=['f']] tl.logging.info(' Wrote vocabulary file: %s' % word_counts_output_file) # Create the vocabulary dictionary. reverse_vocab = [x[0] for x in word_counts] unk_id = len(reverse_vocab) vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)]) vocab = SimpleVocabulary(vocab_dict, unk_id) return vocab
def create_reference_server_flask_app(cfg): """Create referece server Flask application with one or more IIIF handlers.""" # Create Flask app app = Flask(__name__) Flask.secret_key = "SECRET_HERE" app.debug = cfg.debug # Install request handlers client_prefixes = dict() for api_version in cfg.api_versions: handler_config = Config(cfg) handler_config.api_version = api_version handler_config.klass_name = 'pil' handler_config.auth_type = 'none' # Set same prefix on local server as expected on iiif.io handler_config.prefix = "api/image/%s/example/reference" % (api_version) handler_config.client_prefix = handler_config.prefix add_handler(app, handler_config) return app
def function[create_reference_server_flask_app, parameter[cfg]]: constant[Create referece server Flask application with one or more IIIF handlers.] variable[app] assign[=] call[name[Flask], parameter[name[__name__]]] name[Flask].secret_key assign[=] constant[SECRET_HERE] name[app].debug assign[=] name[cfg].debug variable[client_prefixes] assign[=] call[name[dict], parameter[]] for taget[name[api_version]] in starred[name[cfg].api_versions] begin[:] variable[handler_config] assign[=] call[name[Config], parameter[name[cfg]]] name[handler_config].api_version assign[=] name[api_version] name[handler_config].klass_name assign[=] constant[pil] name[handler_config].auth_type assign[=] constant[none] name[handler_config].prefix assign[=] binary_operation[constant[api/image/%s/example/reference] <ast.Mod object at 0x7da2590d6920> name[api_version]] name[handler_config].client_prefix assign[=] name[handler_config].prefix call[name[add_handler], parameter[name[app], name[handler_config]]] return[name[app]]
keyword[def] identifier[create_reference_server_flask_app] ( identifier[cfg] ): literal[string] identifier[app] = identifier[Flask] ( identifier[__name__] ) identifier[Flask] . identifier[secret_key] = literal[string] identifier[app] . identifier[debug] = identifier[cfg] . identifier[debug] identifier[client_prefixes] = identifier[dict] () keyword[for] identifier[api_version] keyword[in] identifier[cfg] . identifier[api_versions] : identifier[handler_config] = identifier[Config] ( identifier[cfg] ) identifier[handler_config] . identifier[api_version] = identifier[api_version] identifier[handler_config] . identifier[klass_name] = literal[string] identifier[handler_config] . identifier[auth_type] = literal[string] identifier[handler_config] . identifier[prefix] = literal[string] %( identifier[api_version] ) identifier[handler_config] . identifier[client_prefix] = identifier[handler_config] . identifier[prefix] identifier[add_handler] ( identifier[app] , identifier[handler_config] ) keyword[return] identifier[app]
def create_reference_server_flask_app(cfg): """Create referece server Flask application with one or more IIIF handlers.""" # Create Flask app app = Flask(__name__) Flask.secret_key = 'SECRET_HERE' app.debug = cfg.debug # Install request handlers client_prefixes = dict() for api_version in cfg.api_versions: handler_config = Config(cfg) handler_config.api_version = api_version handler_config.klass_name = 'pil' handler_config.auth_type = 'none' # Set same prefix on local server as expected on iiif.io handler_config.prefix = 'api/image/%s/example/reference' % api_version handler_config.client_prefix = handler_config.prefix add_handler(app, handler_config) # depends on [control=['for'], data=['api_version']] return app
def class_params(cls, hidden=True): """ Gets all class parameters, and their :class:`Parameter` instances. :return: dict of the form: ``{<name>: <Parameter instance>, ... }`` :rtype: :class:`dict` .. note:: The :class:`Parameter` instances returned do not have a value, only a default value. To get a list of an **instance's** parameters and values, use :meth:`params` instead. """ param_names = cls.class_param_names(hidden=hidden) return dict( (name, getattr(cls, name)) for name in param_names )
def function[class_params, parameter[cls, hidden]]: constant[ Gets all class parameters, and their :class:`Parameter` instances. :return: dict of the form: ``{<name>: <Parameter instance>, ... }`` :rtype: :class:`dict` .. note:: The :class:`Parameter` instances returned do not have a value, only a default value. To get a list of an **instance's** parameters and values, use :meth:`params` instead. ] variable[param_names] assign[=] call[name[cls].class_param_names, parameter[]] return[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18f58c0d0>]]]
keyword[def] identifier[class_params] ( identifier[cls] , identifier[hidden] = keyword[True] ): literal[string] identifier[param_names] = identifier[cls] . identifier[class_param_names] ( identifier[hidden] = identifier[hidden] ) keyword[return] identifier[dict] ( ( identifier[name] , identifier[getattr] ( identifier[cls] , identifier[name] )) keyword[for] identifier[name] keyword[in] identifier[param_names] )
def class_params(cls, hidden=True): """ Gets all class parameters, and their :class:`Parameter` instances. :return: dict of the form: ``{<name>: <Parameter instance>, ... }`` :rtype: :class:`dict` .. note:: The :class:`Parameter` instances returned do not have a value, only a default value. To get a list of an **instance's** parameters and values, use :meth:`params` instead. """ param_names = cls.class_param_names(hidden=hidden) return dict(((name, getattr(cls, name)) for name in param_names))
def sample_stats_prior_to_xarray(self): """Convert sample_stats_prior samples to xarray.""" data = self.sample_stats_prior if not isinstance(data, dict): raise TypeError("DictConverter.sample_stats_prior is not a dictionary") return dict_to_dataset(data, library=None, coords=self.coords, dims=self.dims)
def function[sample_stats_prior_to_xarray, parameter[self]]: constant[Convert sample_stats_prior samples to xarray.] variable[data] assign[=] name[self].sample_stats_prior if <ast.UnaryOp object at 0x7da1b1bbd1e0> begin[:] <ast.Raise object at 0x7da1b1bbc430> return[call[name[dict_to_dataset], parameter[name[data]]]]
keyword[def] identifier[sample_stats_prior_to_xarray] ( identifier[self] ): literal[string] identifier[data] = identifier[self] . identifier[sample_stats_prior] keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[dict] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[return] identifier[dict_to_dataset] ( identifier[data] , identifier[library] = keyword[None] , identifier[coords] = identifier[self] . identifier[coords] , identifier[dims] = identifier[self] . identifier[dims] )
def sample_stats_prior_to_xarray(self): """Convert sample_stats_prior samples to xarray.""" data = self.sample_stats_prior if not isinstance(data, dict): raise TypeError('DictConverter.sample_stats_prior is not a dictionary') # depends on [control=['if'], data=[]] return dict_to_dataset(data, library=None, coords=self.coords, dims=self.dims)
def token(self, id, **kwargs): """ Retrieve a service request ID from a token. >>> Three('api.city.gov').token('12345') {'service_request_id': {'for': {'token': '12345'}}} """ data = self.get('tokens', id, **kwargs) return data
def function[token, parameter[self, id]]: constant[ Retrieve a service request ID from a token. >>> Three('api.city.gov').token('12345') {'service_request_id': {'for': {'token': '12345'}}} ] variable[data] assign[=] call[name[self].get, parameter[constant[tokens], name[id]]] return[name[data]]
keyword[def] identifier[token] ( identifier[self] , identifier[id] ,** identifier[kwargs] ): literal[string] identifier[data] = identifier[self] . identifier[get] ( literal[string] , identifier[id] ,** identifier[kwargs] ) keyword[return] identifier[data]
def token(self, id, **kwargs): """ Retrieve a service request ID from a token. >>> Three('api.city.gov').token('12345') {'service_request_id': {'for': {'token': '12345'}}} """ data = self.get('tokens', id, **kwargs) return data
def numpy2gdalint(self): """ create a dictionary for mapping numpy data types to GDAL data type codes Returns ------- dict the type map """ if not hasattr(self, '__numpy2gdalint'): tmap = {} for group in ['int', 'uint', 'float', 'complex']: for dtype in np.sctypes[group]: code = gdal_array.NumericTypeCodeToGDALTypeCode(dtype) if code is not None: tmap[dtype().dtype.name] = code self.__numpy2gdalint = tmap return self.__numpy2gdalint
def function[numpy2gdalint, parameter[self]]: constant[ create a dictionary for mapping numpy data types to GDAL data type codes Returns ------- dict the type map ] if <ast.UnaryOp object at 0x7da18c4ce9e0> begin[:] variable[tmap] assign[=] dictionary[[], []] for taget[name[group]] in starred[list[[<ast.Constant object at 0x7da18c4cd5d0>, <ast.Constant object at 0x7da18c4ccd60>, <ast.Constant object at 0x7da18c4cc580>, <ast.Constant object at 0x7da18c4cffa0>]]] begin[:] for taget[name[dtype]] in starred[call[name[np].sctypes][name[group]]] begin[:] variable[code] assign[=] call[name[gdal_array].NumericTypeCodeToGDALTypeCode, parameter[name[dtype]]] if compare[name[code] is_not constant[None]] begin[:] call[name[tmap]][call[name[dtype], parameter[]].dtype.name] assign[=] name[code] name[self].__numpy2gdalint assign[=] name[tmap] return[name[self].__numpy2gdalint]
keyword[def] identifier[numpy2gdalint] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[tmap] ={} keyword[for] identifier[group] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[for] identifier[dtype] keyword[in] identifier[np] . identifier[sctypes] [ identifier[group] ]: identifier[code] = identifier[gdal_array] . identifier[NumericTypeCodeToGDALTypeCode] ( identifier[dtype] ) keyword[if] identifier[code] keyword[is] keyword[not] keyword[None] : identifier[tmap] [ identifier[dtype] (). identifier[dtype] . identifier[name] ]= identifier[code] identifier[self] . identifier[__numpy2gdalint] = identifier[tmap] keyword[return] identifier[self] . identifier[__numpy2gdalint]
def numpy2gdalint(self): """ create a dictionary for mapping numpy data types to GDAL data type codes Returns ------- dict the type map """ if not hasattr(self, '__numpy2gdalint'): tmap = {} for group in ['int', 'uint', 'float', 'complex']: for dtype in np.sctypes[group]: code = gdal_array.NumericTypeCodeToGDALTypeCode(dtype) if code is not None: tmap[dtype().dtype.name] = code # depends on [control=['if'], data=['code']] # depends on [control=['for'], data=['dtype']] # depends on [control=['for'], data=['group']] self.__numpy2gdalint = tmap # depends on [control=['if'], data=[]] return self.__numpy2gdalint
def get_per_identity_records(self, events: Iterable, data_processor: DataProcessor ) -> Generator[Tuple[str, TimeAndRecord], None, None]: """ Uses the given iteratable events and the data processor convert the event into a list of Records along with its identity and time. :param events: iteratable events. :param data_processor: DataProcessor to process each event in events. :return: yields Tuple[Identity, TimeAndRecord] for all Records in events, """ schema_loader = SchemaLoader() stream_bts_name = schema_loader.add_schema_spec(self._stream_bts) stream_transformer_schema: StreamingTransformerSchema = schema_loader.get_schema_object( stream_bts_name) for event in events: try: for record in data_processor.process_data(event): try: id = stream_transformer_schema.get_identity(record) time = stream_transformer_schema.get_time(record) yield (id, (time, record)) except Exception as err: logging.error('{} in parsing Record {}.'.format(err, record)) except Exception as err: logging.error('{} in parsing Event {}.'.format(err, event))
def function[get_per_identity_records, parameter[self, events, data_processor]]: constant[ Uses the given iteratable events and the data processor convert the event into a list of Records along with its identity and time. :param events: iteratable events. :param data_processor: DataProcessor to process each event in events. :return: yields Tuple[Identity, TimeAndRecord] for all Records in events, ] variable[schema_loader] assign[=] call[name[SchemaLoader], parameter[]] variable[stream_bts_name] assign[=] call[name[schema_loader].add_schema_spec, parameter[name[self]._stream_bts]] <ast.AnnAssign object at 0x7da20c7c8820> for taget[name[event]] in starred[name[events]] begin[:] <ast.Try object at 0x7da20c7cb2b0>
keyword[def] identifier[get_per_identity_records] ( identifier[self] , identifier[events] : identifier[Iterable] , identifier[data_processor] : identifier[DataProcessor] )-> identifier[Generator] [ identifier[Tuple] [ identifier[str] , identifier[TimeAndRecord] ], keyword[None] , keyword[None] ]: literal[string] identifier[schema_loader] = identifier[SchemaLoader] () identifier[stream_bts_name] = identifier[schema_loader] . identifier[add_schema_spec] ( identifier[self] . identifier[_stream_bts] ) identifier[stream_transformer_schema] : identifier[StreamingTransformerSchema] = identifier[schema_loader] . identifier[get_schema_object] ( identifier[stream_bts_name] ) keyword[for] identifier[event] keyword[in] identifier[events] : keyword[try] : keyword[for] identifier[record] keyword[in] identifier[data_processor] . identifier[process_data] ( identifier[event] ): keyword[try] : identifier[id] = identifier[stream_transformer_schema] . identifier[get_identity] ( identifier[record] ) identifier[time] = identifier[stream_transformer_schema] . identifier[get_time] ( identifier[record] ) keyword[yield] ( identifier[id] ,( identifier[time] , identifier[record] )) keyword[except] identifier[Exception] keyword[as] identifier[err] : identifier[logging] . identifier[error] ( literal[string] . identifier[format] ( identifier[err] , identifier[record] )) keyword[except] identifier[Exception] keyword[as] identifier[err] : identifier[logging] . identifier[error] ( literal[string] . identifier[format] ( identifier[err] , identifier[event] ))
def get_per_identity_records(self, events: Iterable, data_processor: DataProcessor) -> Generator[Tuple[str, TimeAndRecord], None, None]: """ Uses the given iteratable events and the data processor convert the event into a list of Records along with its identity and time. :param events: iteratable events. :param data_processor: DataProcessor to process each event in events. :return: yields Tuple[Identity, TimeAndRecord] for all Records in events, """ schema_loader = SchemaLoader() stream_bts_name = schema_loader.add_schema_spec(self._stream_bts) stream_transformer_schema: StreamingTransformerSchema = schema_loader.get_schema_object(stream_bts_name) for event in events: try: for record in data_processor.process_data(event): try: id = stream_transformer_schema.get_identity(record) time = stream_transformer_schema.get_time(record) yield (id, (time, record)) # depends on [control=['try'], data=[]] except Exception as err: logging.error('{} in parsing Record {}.'.format(err, record)) # depends on [control=['except'], data=['err']] # depends on [control=['for'], data=['record']] # depends on [control=['try'], data=[]] except Exception as err: logging.error('{} in parsing Event {}.'.format(err, event)) # depends on [control=['except'], data=['err']] # depends on [control=['for'], data=['event']]