code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None): """Generate raster mask for specified NLCD LULC filter """ print("Loading NLCD LULC") b = nlcd_ds.GetRasterBand(1) l = b.ReadAsArray() print("Filtering NLCD LULC with: %s" % filter) #Original nlcd products have nan as ndv #12 - ice #31 - rock #11 - open water, includes rivers #52 - shrub, <5 m tall, >20% #42 - evergreeen forest #Should use data dictionary here for general masking #Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes if filter == 'rock': mask = (l==31) elif filter == 'rock+ice': mask = np.logical_or((l==31),(l==12)) elif filter == 'rock+ice+water': mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11)) elif filter == 'not_forest': mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43))) elif filter == 'not_forest+not_water': mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11))) else: print("Invalid mask type") mask = None #Write out original data if out_fn is not None: print("Writing out %s" % out_fn) iolib.writeGTiff(l, out_fn, nlcd_ds) l = None return mask
def function[get_nlcd_mask, parameter[nlcd_ds, filter, out_fn]]: constant[Generate raster mask for specified NLCD LULC filter ] call[name[print], parameter[constant[Loading NLCD LULC]]] variable[b] assign[=] call[name[nlcd_ds].GetRasterBand, parameter[constant[1]]] variable[l] assign[=] call[name[b].ReadAsArray, parameter[]] call[name[print], parameter[binary_operation[constant[Filtering NLCD LULC with: %s] <ast.Mod object at 0x7da2590d6920> name[filter]]]] if compare[name[filter] equal[==] constant[rock]] begin[:] variable[mask] assign[=] compare[name[l] equal[==] constant[31]] if compare[name[out_fn] is_not constant[None]] begin[:] call[name[print], parameter[binary_operation[constant[Writing out %s] <ast.Mod object at 0x7da2590d6920> name[out_fn]]]] call[name[iolib].writeGTiff, parameter[name[l], name[out_fn], name[nlcd_ds]]] variable[l] assign[=] constant[None] return[name[mask]]
keyword[def] identifier[get_nlcd_mask] ( identifier[nlcd_ds] , identifier[filter] = literal[string] , identifier[out_fn] = keyword[None] ): literal[string] identifier[print] ( literal[string] ) identifier[b] = identifier[nlcd_ds] . identifier[GetRasterBand] ( literal[int] ) identifier[l] = identifier[b] . identifier[ReadAsArray] () identifier[print] ( literal[string] % identifier[filter] ) keyword[if] identifier[filter] == literal[string] : identifier[mask] =( identifier[l] == literal[int] ) keyword[elif] identifier[filter] == literal[string] : identifier[mask] = identifier[np] . identifier[logical_or] (( identifier[l] == literal[int] ),( identifier[l] == literal[int] )) keyword[elif] identifier[filter] == literal[string] : identifier[mask] = identifier[np] . identifier[logical_or] ( identifier[np] . identifier[logical_or] (( identifier[l] == literal[int] ),( identifier[l] == literal[int] )),( identifier[l] == literal[int] )) keyword[elif] identifier[filter] == literal[string] : identifier[mask] =~( identifier[np] . identifier[logical_or] ( identifier[np] . identifier[logical_or] (( identifier[l] == literal[int] ),( identifier[l] == literal[int] )),( identifier[l] == literal[int] ))) keyword[elif] identifier[filter] == literal[string] : identifier[mask] =~( identifier[np] . identifier[logical_or] ( identifier[np] . identifier[logical_or] ( identifier[np] . identifier[logical_or] (( identifier[l] == literal[int] ),( identifier[l] == literal[int] )),( identifier[l] == literal[int] )),( identifier[l] == literal[int] ))) keyword[else] : identifier[print] ( literal[string] ) identifier[mask] = keyword[None] keyword[if] identifier[out_fn] keyword[is] keyword[not] keyword[None] : identifier[print] ( literal[string] % identifier[out_fn] ) identifier[iolib] . identifier[writeGTiff] ( identifier[l] , identifier[out_fn] , identifier[nlcd_ds] ) identifier[l] = keyword[None] keyword[return] identifier[mask]
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None): """Generate raster mask for specified NLCD LULC filter """ print('Loading NLCD LULC') b = nlcd_ds.GetRasterBand(1) l = b.ReadAsArray() print('Filtering NLCD LULC with: %s' % filter) #Original nlcd products have nan as ndv #12 - ice #31 - rock #11 - open water, includes rivers #52 - shrub, <5 m tall, >20% #42 - evergreeen forest #Should use data dictionary here for general masking #Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes if filter == 'rock': mask = l == 31 # depends on [control=['if'], data=[]] elif filter == 'rock+ice': mask = np.logical_or(l == 31, l == 12) # depends on [control=['if'], data=[]] elif filter == 'rock+ice+water': mask = np.logical_or(np.logical_or(l == 31, l == 12), l == 11) # depends on [control=['if'], data=[]] elif filter == 'not_forest': mask = ~np.logical_or(np.logical_or(l == 41, l == 42), l == 43) # depends on [control=['if'], data=[]] elif filter == 'not_forest+not_water': mask = ~np.logical_or(np.logical_or(np.logical_or(l == 41, l == 42), l == 43), l == 11) # depends on [control=['if'], data=[]] else: print('Invalid mask type') mask = None #Write out original data if out_fn is not None: print('Writing out %s' % out_fn) iolib.writeGTiff(l, out_fn, nlcd_ds) # depends on [control=['if'], data=['out_fn']] l = None return mask
def solve_select(expr, vars): """Use IAssociative.select to get key (rhs) from the data (lhs). This operation supports both scalars and repeated values on the LHS - selecting from a repeated value implies a map-like operation and returns a new repeated value. """ data, _ = __solve_for_repeated(expr.lhs, vars) key = solve(expr.rhs, vars).value try: results = [associative.select(d, key) for d in repeated.getvalues(data)] except (KeyError, AttributeError): # Raise a better exception for accessing a non-existent key. raise errors.EfilterKeyError(root=expr, key=key, query=expr.source) except (TypeError, ValueError): # Raise a better exception for what is probably a null pointer error. if vars.locals is None: raise errors.EfilterNoneError( root=expr, query=expr.source, message="Cannot select key %r from a null." % key) else: raise except NotImplementedError: raise errors.EfilterError( root=expr, query=expr.source, message="Cannot select keys from a non-associative value.") return Result(repeated.meld(*results), ())
def function[solve_select, parameter[expr, vars]]: constant[Use IAssociative.select to get key (rhs) from the data (lhs). This operation supports both scalars and repeated values on the LHS - selecting from a repeated value implies a map-like operation and returns a new repeated value. ] <ast.Tuple object at 0x7da1b0f9d420> assign[=] call[name[__solve_for_repeated], parameter[name[expr].lhs, name[vars]]] variable[key] assign[=] call[name[solve], parameter[name[expr].rhs, name[vars]]].value <ast.Try object at 0x7da1b0f9de40> return[call[name[Result], parameter[call[name[repeated].meld, parameter[<ast.Starred object at 0x7da18c4cf9d0>]], tuple[[]]]]]
keyword[def] identifier[solve_select] ( identifier[expr] , identifier[vars] ): literal[string] identifier[data] , identifier[_] = identifier[__solve_for_repeated] ( identifier[expr] . identifier[lhs] , identifier[vars] ) identifier[key] = identifier[solve] ( identifier[expr] . identifier[rhs] , identifier[vars] ). identifier[value] keyword[try] : identifier[results] =[ identifier[associative] . identifier[select] ( identifier[d] , identifier[key] ) keyword[for] identifier[d] keyword[in] identifier[repeated] . identifier[getvalues] ( identifier[data] )] keyword[except] ( identifier[KeyError] , identifier[AttributeError] ): keyword[raise] identifier[errors] . identifier[EfilterKeyError] ( identifier[root] = identifier[expr] , identifier[key] = identifier[key] , identifier[query] = identifier[expr] . identifier[source] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[if] identifier[vars] . identifier[locals] keyword[is] keyword[None] : keyword[raise] identifier[errors] . identifier[EfilterNoneError] ( identifier[root] = identifier[expr] , identifier[query] = identifier[expr] . identifier[source] , identifier[message] = literal[string] % identifier[key] ) keyword[else] : keyword[raise] keyword[except] identifier[NotImplementedError] : keyword[raise] identifier[errors] . identifier[EfilterError] ( identifier[root] = identifier[expr] , identifier[query] = identifier[expr] . identifier[source] , identifier[message] = literal[string] ) keyword[return] identifier[Result] ( identifier[repeated] . identifier[meld] (* identifier[results] ),())
def solve_select(expr, vars): """Use IAssociative.select to get key (rhs) from the data (lhs). This operation supports both scalars and repeated values on the LHS - selecting from a repeated value implies a map-like operation and returns a new repeated value. """ (data, _) = __solve_for_repeated(expr.lhs, vars) key = solve(expr.rhs, vars).value try: results = [associative.select(d, key) for d in repeated.getvalues(data)] # depends on [control=['try'], data=[]] except (KeyError, AttributeError): # Raise a better exception for accessing a non-existent key. raise errors.EfilterKeyError(root=expr, key=key, query=expr.source) # depends on [control=['except'], data=[]] except (TypeError, ValueError): # Raise a better exception for what is probably a null pointer error. if vars.locals is None: raise errors.EfilterNoneError(root=expr, query=expr.source, message='Cannot select key %r from a null.' % key) # depends on [control=['if'], data=[]] else: raise # depends on [control=['except'], data=[]] except NotImplementedError: raise errors.EfilterError(root=expr, query=expr.source, message='Cannot select keys from a non-associative value.') # depends on [control=['except'], data=[]] return Result(repeated.meld(*results), ())
def _get_ipmitool_path(self, cmd='ipmitool'): """Get full path to the ipmitool command using the unix `which` command """ p = subprocess.Popen(["which", cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return out.strip()
def function[_get_ipmitool_path, parameter[self, cmd]]: constant[Get full path to the ipmitool command using the unix `which` command ] variable[p] assign[=] call[name[subprocess].Popen, parameter[list[[<ast.Constant object at 0x7da1b0a844f0>, <ast.Name object at 0x7da1b0a84580>]]]] <ast.Tuple object at 0x7da1b0a84730> assign[=] call[name[p].communicate, parameter[]] return[call[name[out].strip, parameter[]]]
keyword[def] identifier[_get_ipmitool_path] ( identifier[self] , identifier[cmd] = literal[string] ): literal[string] identifier[p] = identifier[subprocess] . identifier[Popen] ([ literal[string] , identifier[cmd] ], identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] ) identifier[out] , identifier[err] = identifier[p] . identifier[communicate] () keyword[return] identifier[out] . identifier[strip] ()
def _get_ipmitool_path(self, cmd='ipmitool'): """Get full path to the ipmitool command using the unix `which` command """ p = subprocess.Popen(['which', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate() return out.strip()
def _is_converted_class(java_class): """ Checks if the given Java class is one we *might* have set up """ if not java_class: return False return ( JAVA_MAPS_PATTERN.match(java_class) is not None or JAVA_LISTS_PATTERN.match(java_class) is not None or JAVA_SETS_PATTERN.match(java_class) is not None )
def function[_is_converted_class, parameter[java_class]]: constant[ Checks if the given Java class is one we *might* have set up ] if <ast.UnaryOp object at 0x7da1b03491e0> begin[:] return[constant[False]] return[<ast.BoolOp object at 0x7da1b0349e40>]
keyword[def] identifier[_is_converted_class] ( identifier[java_class] ): literal[string] keyword[if] keyword[not] identifier[java_class] : keyword[return] keyword[False] keyword[return] ( identifier[JAVA_MAPS_PATTERN] . identifier[match] ( identifier[java_class] ) keyword[is] keyword[not] keyword[None] keyword[or] identifier[JAVA_LISTS_PATTERN] . identifier[match] ( identifier[java_class] ) keyword[is] keyword[not] keyword[None] keyword[or] identifier[JAVA_SETS_PATTERN] . identifier[match] ( identifier[java_class] ) keyword[is] keyword[not] keyword[None] )
def _is_converted_class(java_class): """ Checks if the given Java class is one we *might* have set up """ if not java_class: return False # depends on [control=['if'], data=[]] return JAVA_MAPS_PATTERN.match(java_class) is not None or JAVA_LISTS_PATTERN.match(java_class) is not None or JAVA_SETS_PATTERN.match(java_class) is not None
async def forward(self, n, *, timeout=None) -> int: r"""Skip over the next *n* rows. :param float timeout: Optional timeout value in seconds. :return: A number of rows actually skipped over (<= *n*). """ self._check_ready() if n <= 0: raise exceptions.InterfaceError('n must be greater than zero') protocol = self._connection._protocol status = await protocol.query('MOVE FORWARD {:d} {}'.format( n, self._portal_name), timeout) advanced = int(status.split()[1]) if advanced < n: self._exhausted = True return advanced
<ast.AsyncFunctionDef object at 0x7da1b1957fd0>
keyword[async] keyword[def] identifier[forward] ( identifier[self] , identifier[n] ,*, identifier[timeout] = keyword[None] )-> identifier[int] : literal[string] identifier[self] . identifier[_check_ready] () keyword[if] identifier[n] <= literal[int] : keyword[raise] identifier[exceptions] . identifier[InterfaceError] ( literal[string] ) identifier[protocol] = identifier[self] . identifier[_connection] . identifier[_protocol] identifier[status] = keyword[await] identifier[protocol] . identifier[query] ( literal[string] . identifier[format] ( identifier[n] , identifier[self] . identifier[_portal_name] ), identifier[timeout] ) identifier[advanced] = identifier[int] ( identifier[status] . identifier[split] ()[ literal[int] ]) keyword[if] identifier[advanced] < identifier[n] : identifier[self] . identifier[_exhausted] = keyword[True] keyword[return] identifier[advanced]
async def forward(self, n, *, timeout=None) -> int: """Skip over the next *n* rows. :param float timeout: Optional timeout value in seconds. :return: A number of rows actually skipped over (<= *n*). """ self._check_ready() if n <= 0: raise exceptions.InterfaceError('n must be greater than zero') # depends on [control=['if'], data=[]] protocol = self._connection._protocol status = await protocol.query('MOVE FORWARD {:d} {}'.format(n, self._portal_name), timeout) advanced = int(status.split()[1]) if advanced < n: self._exhausted = True # depends on [control=['if'], data=[]] return advanced
def _get_policy_dict(policy): '''Returns a dictionary representation of a policy''' profile_dict = {'name': policy.name, 'description': policy.description, 'resource_type': policy.resourceType.resourceType} subprofile_dicts = [] if isinstance(policy, pbm.profile.CapabilityBasedProfile) and \ isinstance(policy.constraints, pbm.profile.SubProfileCapabilityConstraints): for subprofile in policy.constraints.subProfiles: subprofile_dict = {'name': subprofile.name, 'force_provision': subprofile.forceProvision} cap_dicts = [] for cap in subprofile.capability: cap_dict = {'namespace': cap.id.namespace, 'id': cap.id.id} # We assume there is one constraint with one value set val = cap.constraint[0].propertyInstance[0].value if isinstance(val, pbm.capability.types.Range): val_dict = {'type': 'range', 'min': val.min, 'max': val.max} elif isinstance(val, pbm.capability.types.DiscreteSet): val_dict = {'type': 'set', 'values': val.values} else: val_dict = {'type': 'scalar', 'value': val} cap_dict['setting'] = val_dict cap_dicts.append(cap_dict) subprofile_dict['capabilities'] = cap_dicts subprofile_dicts.append(subprofile_dict) profile_dict['subprofiles'] = subprofile_dicts return profile_dict
def function[_get_policy_dict, parameter[policy]]: constant[Returns a dictionary representation of a policy] variable[profile_dict] assign[=] dictionary[[<ast.Constant object at 0x7da207f00070>, <ast.Constant object at 0x7da207f023e0>, <ast.Constant object at 0x7da207f002e0>], [<ast.Attribute object at 0x7da207f00490>, <ast.Attribute object at 0x7da207f03970>, <ast.Attribute object at 0x7da207f00040>]] variable[subprofile_dicts] assign[=] list[[]] if <ast.BoolOp object at 0x7da207f004f0> begin[:] for taget[name[subprofile]] in starred[name[policy].constraints.subProfiles] begin[:] variable[subprofile_dict] assign[=] dictionary[[<ast.Constant object at 0x7da207f00790>, <ast.Constant object at 0x7da207f01120>], [<ast.Attribute object at 0x7da207f038e0>, <ast.Attribute object at 0x7da207f02f50>]] variable[cap_dicts] assign[=] list[[]] for taget[name[cap]] in starred[name[subprofile].capability] begin[:] variable[cap_dict] assign[=] dictionary[[<ast.Constant object at 0x7da207f03190>, <ast.Constant object at 0x7da207f03400>], [<ast.Attribute object at 0x7da207f01510>, <ast.Attribute object at 0x7da207f03460>]] variable[val] assign[=] call[call[name[cap].constraint][constant[0]].propertyInstance][constant[0]].value if call[name[isinstance], parameter[name[val], name[pbm].capability.types.Range]] begin[:] variable[val_dict] assign[=] dictionary[[<ast.Constant object at 0x7da207f01ba0>, <ast.Constant object at 0x7da207f02620>, <ast.Constant object at 0x7da207f02260>], [<ast.Constant object at 0x7da207f02a10>, <ast.Attribute object at 0x7da207f02050>, <ast.Attribute object at 0x7da207f03a00>]] call[name[cap_dict]][constant[setting]] assign[=] name[val_dict] call[name[cap_dicts].append, parameter[name[cap_dict]]] call[name[subprofile_dict]][constant[capabilities]] assign[=] name[cap_dicts] call[name[subprofile_dicts].append, parameter[name[subprofile_dict]]] call[name[profile_dict]][constant[subprofiles]] assign[=] name[subprofile_dicts] return[name[profile_dict]]
keyword[def] identifier[_get_policy_dict] ( identifier[policy] ): literal[string] identifier[profile_dict] ={ literal[string] : identifier[policy] . identifier[name] , literal[string] : identifier[policy] . identifier[description] , literal[string] : identifier[policy] . identifier[resourceType] . identifier[resourceType] } identifier[subprofile_dicts] =[] keyword[if] identifier[isinstance] ( identifier[policy] , identifier[pbm] . identifier[profile] . identifier[CapabilityBasedProfile] ) keyword[and] identifier[isinstance] ( identifier[policy] . identifier[constraints] , identifier[pbm] . identifier[profile] . identifier[SubProfileCapabilityConstraints] ): keyword[for] identifier[subprofile] keyword[in] identifier[policy] . identifier[constraints] . identifier[subProfiles] : identifier[subprofile_dict] ={ literal[string] : identifier[subprofile] . identifier[name] , literal[string] : identifier[subprofile] . identifier[forceProvision] } identifier[cap_dicts] =[] keyword[for] identifier[cap] keyword[in] identifier[subprofile] . identifier[capability] : identifier[cap_dict] ={ literal[string] : identifier[cap] . identifier[id] . identifier[namespace] , literal[string] : identifier[cap] . identifier[id] . identifier[id] } identifier[val] = identifier[cap] . identifier[constraint] [ literal[int] ]. identifier[propertyInstance] [ literal[int] ]. identifier[value] keyword[if] identifier[isinstance] ( identifier[val] , identifier[pbm] . identifier[capability] . identifier[types] . identifier[Range] ): identifier[val_dict] ={ literal[string] : literal[string] , literal[string] : identifier[val] . identifier[min] , literal[string] : identifier[val] . identifier[max] } keyword[elif] identifier[isinstance] ( identifier[val] , identifier[pbm] . identifier[capability] . identifier[types] . identifier[DiscreteSet] ): identifier[val_dict] ={ literal[string] : literal[string] , literal[string] : identifier[val] . identifier[values] } keyword[else] : identifier[val_dict] ={ literal[string] : literal[string] , literal[string] : identifier[val] } identifier[cap_dict] [ literal[string] ]= identifier[val_dict] identifier[cap_dicts] . identifier[append] ( identifier[cap_dict] ) identifier[subprofile_dict] [ literal[string] ]= identifier[cap_dicts] identifier[subprofile_dicts] . identifier[append] ( identifier[subprofile_dict] ) identifier[profile_dict] [ literal[string] ]= identifier[subprofile_dicts] keyword[return] identifier[profile_dict]
def _get_policy_dict(policy): """Returns a dictionary representation of a policy""" profile_dict = {'name': policy.name, 'description': policy.description, 'resource_type': policy.resourceType.resourceType} subprofile_dicts = [] if isinstance(policy, pbm.profile.CapabilityBasedProfile) and isinstance(policy.constraints, pbm.profile.SubProfileCapabilityConstraints): for subprofile in policy.constraints.subProfiles: subprofile_dict = {'name': subprofile.name, 'force_provision': subprofile.forceProvision} cap_dicts = [] for cap in subprofile.capability: cap_dict = {'namespace': cap.id.namespace, 'id': cap.id.id} # We assume there is one constraint with one value set val = cap.constraint[0].propertyInstance[0].value if isinstance(val, pbm.capability.types.Range): val_dict = {'type': 'range', 'min': val.min, 'max': val.max} # depends on [control=['if'], data=[]] elif isinstance(val, pbm.capability.types.DiscreteSet): val_dict = {'type': 'set', 'values': val.values} # depends on [control=['if'], data=[]] else: val_dict = {'type': 'scalar', 'value': val} cap_dict['setting'] = val_dict cap_dicts.append(cap_dict) # depends on [control=['for'], data=['cap']] subprofile_dict['capabilities'] = cap_dicts subprofile_dicts.append(subprofile_dict) # depends on [control=['for'], data=['subprofile']] # depends on [control=['if'], data=[]] profile_dict['subprofiles'] = subprofile_dicts return profile_dict
def make_image_converters(self, image, configuration, harpoon_spec): """Make converters for this image and add them to the configuration""" def convert_image(path, val): log.info("Converting %s", path) everything = path.configuration.root().wrapped() meta = Meta(everything, []) configuration.converters.started(path) base = path.configuration.root().wrapped() base.update(configuration.as_dict(ignore=["images"])) base.update(val.as_dict(ignore=["images"])) base["__image__"] = base everything["__image__"] = base base["harpoon"] = configuration["harpoon"] base["configuration"] = configuration return harpoon_spec.image_spec.normalise(meta.at("images").at(image), base) converter = Converter(convert=convert_image, convert_path=["images", image]) configuration.add_converter(converter) def convert_tasks(path, val): spec = harpoon_spec.tasks_spec(available_actions) meta = Meta(path.configuration.root(), []).at("images").at(image).at("tasks") configuration.converters.started(path) tasks = spec.normalise(meta, val) for task in tasks.values(): task.image = image return tasks converter = Converter(convert=convert_tasks, convert_path=["images", image, "tasks"]) configuration.add_converter(converter)
def function[make_image_converters, parameter[self, image, configuration, harpoon_spec]]: constant[Make converters for this image and add them to the configuration] def function[convert_image, parameter[path, val]]: call[name[log].info, parameter[constant[Converting %s], name[path]]] variable[everything] assign[=] call[call[name[path].configuration.root, parameter[]].wrapped, parameter[]] variable[meta] assign[=] call[name[Meta], parameter[name[everything], list[[]]]] call[name[configuration].converters.started, parameter[name[path]]] variable[base] assign[=] call[call[name[path].configuration.root, parameter[]].wrapped, parameter[]] call[name[base].update, parameter[call[name[configuration].as_dict, parameter[]]]] call[name[base].update, parameter[call[name[val].as_dict, parameter[]]]] call[name[base]][constant[__image__]] assign[=] name[base] call[name[everything]][constant[__image__]] assign[=] name[base] call[name[base]][constant[harpoon]] assign[=] call[name[configuration]][constant[harpoon]] call[name[base]][constant[configuration]] assign[=] name[configuration] return[call[name[harpoon_spec].image_spec.normalise, parameter[call[call[name[meta].at, parameter[constant[images]]].at, parameter[name[image]]], name[base]]]] variable[converter] assign[=] call[name[Converter], parameter[]] call[name[configuration].add_converter, parameter[name[converter]]] def function[convert_tasks, parameter[path, val]]: variable[spec] assign[=] call[name[harpoon_spec].tasks_spec, parameter[name[available_actions]]] variable[meta] assign[=] call[call[call[call[name[Meta], parameter[call[name[path].configuration.root, parameter[]], list[[]]]].at, parameter[constant[images]]].at, parameter[name[image]]].at, parameter[constant[tasks]]] call[name[configuration].converters.started, parameter[name[path]]] variable[tasks] assign[=] call[name[spec].normalise, parameter[name[meta], name[val]]] for taget[name[task]] in starred[call[name[tasks].values, parameter[]]] begin[:] name[task].image assign[=] name[image] return[name[tasks]] variable[converter] assign[=] call[name[Converter], parameter[]] call[name[configuration].add_converter, parameter[name[converter]]]
keyword[def] identifier[make_image_converters] ( identifier[self] , identifier[image] , identifier[configuration] , identifier[harpoon_spec] ): literal[string] keyword[def] identifier[convert_image] ( identifier[path] , identifier[val] ): identifier[log] . identifier[info] ( literal[string] , identifier[path] ) identifier[everything] = identifier[path] . identifier[configuration] . identifier[root] (). identifier[wrapped] () identifier[meta] = identifier[Meta] ( identifier[everything] ,[]) identifier[configuration] . identifier[converters] . identifier[started] ( identifier[path] ) identifier[base] = identifier[path] . identifier[configuration] . identifier[root] (). identifier[wrapped] () identifier[base] . identifier[update] ( identifier[configuration] . identifier[as_dict] ( identifier[ignore] =[ literal[string] ])) identifier[base] . identifier[update] ( identifier[val] . identifier[as_dict] ( identifier[ignore] =[ literal[string] ])) identifier[base] [ literal[string] ]= identifier[base] identifier[everything] [ literal[string] ]= identifier[base] identifier[base] [ literal[string] ]= identifier[configuration] [ literal[string] ] identifier[base] [ literal[string] ]= identifier[configuration] keyword[return] identifier[harpoon_spec] . identifier[image_spec] . identifier[normalise] ( identifier[meta] . identifier[at] ( literal[string] ). identifier[at] ( identifier[image] ), identifier[base] ) identifier[converter] = identifier[Converter] ( identifier[convert] = identifier[convert_image] , identifier[convert_path] =[ literal[string] , identifier[image] ]) identifier[configuration] . identifier[add_converter] ( identifier[converter] ) keyword[def] identifier[convert_tasks] ( identifier[path] , identifier[val] ): identifier[spec] = identifier[harpoon_spec] . identifier[tasks_spec] ( identifier[available_actions] ) identifier[meta] = identifier[Meta] ( identifier[path] . identifier[configuration] . identifier[root] (),[]). identifier[at] ( literal[string] ). identifier[at] ( identifier[image] ). identifier[at] ( literal[string] ) identifier[configuration] . identifier[converters] . identifier[started] ( identifier[path] ) identifier[tasks] = identifier[spec] . identifier[normalise] ( identifier[meta] , identifier[val] ) keyword[for] identifier[task] keyword[in] identifier[tasks] . identifier[values] (): identifier[task] . identifier[image] = identifier[image] keyword[return] identifier[tasks] identifier[converter] = identifier[Converter] ( identifier[convert] = identifier[convert_tasks] , identifier[convert_path] =[ literal[string] , identifier[image] , literal[string] ]) identifier[configuration] . identifier[add_converter] ( identifier[converter] )
def make_image_converters(self, image, configuration, harpoon_spec): """Make converters for this image and add them to the configuration""" def convert_image(path, val): log.info('Converting %s', path) everything = path.configuration.root().wrapped() meta = Meta(everything, []) configuration.converters.started(path) base = path.configuration.root().wrapped() base.update(configuration.as_dict(ignore=['images'])) base.update(val.as_dict(ignore=['images'])) base['__image__'] = base everything['__image__'] = base base['harpoon'] = configuration['harpoon'] base['configuration'] = configuration return harpoon_spec.image_spec.normalise(meta.at('images').at(image), base) converter = Converter(convert=convert_image, convert_path=['images', image]) configuration.add_converter(converter) def convert_tasks(path, val): spec = harpoon_spec.tasks_spec(available_actions) meta = Meta(path.configuration.root(), []).at('images').at(image).at('tasks') configuration.converters.started(path) tasks = spec.normalise(meta, val) for task in tasks.values(): task.image = image # depends on [control=['for'], data=['task']] return tasks converter = Converter(convert=convert_tasks, convert_path=['images', image, 'tasks']) configuration.add_converter(converter)
def sign_json(self, json): """Signs a JSON object. NOTE: The object is modified in-place and the return value can be ignored. As specified, this is done by encoding the JSON object without ``signatures`` or keys grouped as ``unsigned``, using canonical encoding. Args: json (dict): The JSON object to sign. Returns: The same JSON object, with a ``signatures`` key added. It is formatted as ``"signatures": ed25519:<device_id>: <base64_signature>``. """ signatures = json.pop('signatures', {}) unsigned = json.pop('unsigned', None) signature_base64 = self.olm_account.sign(encode_canonical_json(json)) key_id = 'ed25519:{}'.format(self.device_id) signatures.setdefault(self.user_id, {})[key_id] = signature_base64 json['signatures'] = signatures if unsigned: json['unsigned'] = unsigned return json
def function[sign_json, parameter[self, json]]: constant[Signs a JSON object. NOTE: The object is modified in-place and the return value can be ignored. As specified, this is done by encoding the JSON object without ``signatures`` or keys grouped as ``unsigned``, using canonical encoding. Args: json (dict): The JSON object to sign. Returns: The same JSON object, with a ``signatures`` key added. It is formatted as ``"signatures": ed25519:<device_id>: <base64_signature>``. ] variable[signatures] assign[=] call[name[json].pop, parameter[constant[signatures], dictionary[[], []]]] variable[unsigned] assign[=] call[name[json].pop, parameter[constant[unsigned], constant[None]]] variable[signature_base64] assign[=] call[name[self].olm_account.sign, parameter[call[name[encode_canonical_json], parameter[name[json]]]]] variable[key_id] assign[=] call[constant[ed25519:{}].format, parameter[name[self].device_id]] call[call[name[signatures].setdefault, parameter[name[self].user_id, dictionary[[], []]]]][name[key_id]] assign[=] name[signature_base64] call[name[json]][constant[signatures]] assign[=] name[signatures] if name[unsigned] begin[:] call[name[json]][constant[unsigned]] assign[=] name[unsigned] return[name[json]]
keyword[def] identifier[sign_json] ( identifier[self] , identifier[json] ): literal[string] identifier[signatures] = identifier[json] . identifier[pop] ( literal[string] ,{}) identifier[unsigned] = identifier[json] . identifier[pop] ( literal[string] , keyword[None] ) identifier[signature_base64] = identifier[self] . identifier[olm_account] . identifier[sign] ( identifier[encode_canonical_json] ( identifier[json] )) identifier[key_id] = literal[string] . identifier[format] ( identifier[self] . identifier[device_id] ) identifier[signatures] . identifier[setdefault] ( identifier[self] . identifier[user_id] ,{})[ identifier[key_id] ]= identifier[signature_base64] identifier[json] [ literal[string] ]= identifier[signatures] keyword[if] identifier[unsigned] : identifier[json] [ literal[string] ]= identifier[unsigned] keyword[return] identifier[json]
def sign_json(self, json): """Signs a JSON object. NOTE: The object is modified in-place and the return value can be ignored. As specified, this is done by encoding the JSON object without ``signatures`` or keys grouped as ``unsigned``, using canonical encoding. Args: json (dict): The JSON object to sign. Returns: The same JSON object, with a ``signatures`` key added. It is formatted as ``"signatures": ed25519:<device_id>: <base64_signature>``. """ signatures = json.pop('signatures', {}) unsigned = json.pop('unsigned', None) signature_base64 = self.olm_account.sign(encode_canonical_json(json)) key_id = 'ed25519:{}'.format(self.device_id) signatures.setdefault(self.user_id, {})[key_id] = signature_base64 json['signatures'] = signatures if unsigned: json['unsigned'] = unsigned # depends on [control=['if'], data=[]] return json
def frmnam(frcode, lenout=_default_len_out): """ Retrieve the name of a reference frame associated with a SPICE ID code. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/frmnam_c.html :param frcode: an integer code for a reference frame :type frcode: int :param lenout: Maximum length of output string. :type lenout: int :return: the name associated with the reference frame. :rtype: str """ frcode = ctypes.c_int(frcode) lenout = ctypes.c_int(lenout) frname = stypes.stringToCharP(lenout) libspice.frmnam_c(frcode, lenout, frname) return stypes.toPythonString(frname)
def function[frmnam, parameter[frcode, lenout]]: constant[ Retrieve the name of a reference frame associated with a SPICE ID code. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/frmnam_c.html :param frcode: an integer code for a reference frame :type frcode: int :param lenout: Maximum length of output string. :type lenout: int :return: the name associated with the reference frame. :rtype: str ] variable[frcode] assign[=] call[name[ctypes].c_int, parameter[name[frcode]]] variable[lenout] assign[=] call[name[ctypes].c_int, parameter[name[lenout]]] variable[frname] assign[=] call[name[stypes].stringToCharP, parameter[name[lenout]]] call[name[libspice].frmnam_c, parameter[name[frcode], name[lenout], name[frname]]] return[call[name[stypes].toPythonString, parameter[name[frname]]]]
keyword[def] identifier[frmnam] ( identifier[frcode] , identifier[lenout] = identifier[_default_len_out] ): literal[string] identifier[frcode] = identifier[ctypes] . identifier[c_int] ( identifier[frcode] ) identifier[lenout] = identifier[ctypes] . identifier[c_int] ( identifier[lenout] ) identifier[frname] = identifier[stypes] . identifier[stringToCharP] ( identifier[lenout] ) identifier[libspice] . identifier[frmnam_c] ( identifier[frcode] , identifier[lenout] , identifier[frname] ) keyword[return] identifier[stypes] . identifier[toPythonString] ( identifier[frname] )
def frmnam(frcode, lenout=_default_len_out): """ Retrieve the name of a reference frame associated with a SPICE ID code. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/frmnam_c.html :param frcode: an integer code for a reference frame :type frcode: int :param lenout: Maximum length of output string. :type lenout: int :return: the name associated with the reference frame. :rtype: str """ frcode = ctypes.c_int(frcode) lenout = ctypes.c_int(lenout) frname = stypes.stringToCharP(lenout) libspice.frmnam_c(frcode, lenout, frname) return stypes.toPythonString(frname)
def thesaurus( self, token: dict = None, thez_id: str = "1616597fbc4348c8b11ef9d59cf594c8", prot: str = "https", ) -> dict: """Get a thesaurus. :param str token: API auth token :param str thez_id: thesaurus UUID :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ # handling request parameters payload = {"tid": thez_id} # passing auth parameter thez_url = "{}://v1.{}.isogeo.com/thesauri/{}".format( prot, self.api_url, thez_id ) thez_req = self.get( thez_url, headers=self.header, params=payload, proxies=self.proxies, verify=self.ssl, ) # checking response checker.check_api_response(thez_req) # end of method return thez_req.json()
def function[thesaurus, parameter[self, token, thez_id, prot]]: constant[Get a thesaurus. :param str token: API auth token :param str thez_id: thesaurus UUID :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). ] variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da20c7cb910>], [<ast.Name object at 0x7da20c7cb7c0>]] variable[thez_url] assign[=] call[constant[{}://v1.{}.isogeo.com/thesauri/{}].format, parameter[name[prot], name[self].api_url, name[thez_id]]] variable[thez_req] assign[=] call[name[self].get, parameter[name[thez_url]]] call[name[checker].check_api_response, parameter[name[thez_req]]] return[call[name[thez_req].json, parameter[]]]
keyword[def] identifier[thesaurus] ( identifier[self] , identifier[token] : identifier[dict] = keyword[None] , identifier[thez_id] : identifier[str] = literal[string] , identifier[prot] : identifier[str] = literal[string] , )-> identifier[dict] : literal[string] identifier[payload] ={ literal[string] : identifier[thez_id] } identifier[thez_url] = literal[string] . identifier[format] ( identifier[prot] , identifier[self] . identifier[api_url] , identifier[thez_id] ) identifier[thez_req] = identifier[self] . identifier[get] ( identifier[thez_url] , identifier[headers] = identifier[self] . identifier[header] , identifier[params] = identifier[payload] , identifier[proxies] = identifier[self] . identifier[proxies] , identifier[verify] = identifier[self] . identifier[ssl] , ) identifier[checker] . identifier[check_api_response] ( identifier[thez_req] ) keyword[return] identifier[thez_req] . identifier[json] ()
def thesaurus(self, token: dict=None, thez_id: str='1616597fbc4348c8b11ef9d59cf594c8', prot: str='https') -> dict: """Get a thesaurus. :param str token: API auth token :param str thez_id: thesaurus UUID :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ # handling request parameters payload = {'tid': thez_id} # passing auth parameter thez_url = '{}://v1.{}.isogeo.com/thesauri/{}'.format(prot, self.api_url, thez_id) thez_req = self.get(thez_url, headers=self.header, params=payload, proxies=self.proxies, verify=self.ssl) # checking response checker.check_api_response(thez_req) # end of method return thez_req.json()
def getShocks(self): ''' Determine which agents switch from employment to unemployment. All unemployed agents remain unemployed until death. Parameters ---------- None Returns ------- None ''' employed = self.eStateNow == 1.0 N = int(np.sum(employed)) newly_unemployed = drawBernoulli(N,p=self.UnempPrb,seed=self.RNG.randint(0,2**31-1)) self.eStateNow[employed] = 1.0 - newly_unemployed
def function[getShocks, parameter[self]]: constant[ Determine which agents switch from employment to unemployment. All unemployed agents remain unemployed until death. Parameters ---------- None Returns ------- None ] variable[employed] assign[=] compare[name[self].eStateNow equal[==] constant[1.0]] variable[N] assign[=] call[name[int], parameter[call[name[np].sum, parameter[name[employed]]]]] variable[newly_unemployed] assign[=] call[name[drawBernoulli], parameter[name[N]]] call[name[self].eStateNow][name[employed]] assign[=] binary_operation[constant[1.0] - name[newly_unemployed]]
keyword[def] identifier[getShocks] ( identifier[self] ): literal[string] identifier[employed] = identifier[self] . identifier[eStateNow] == literal[int] identifier[N] = identifier[int] ( identifier[np] . identifier[sum] ( identifier[employed] )) identifier[newly_unemployed] = identifier[drawBernoulli] ( identifier[N] , identifier[p] = identifier[self] . identifier[UnempPrb] , identifier[seed] = identifier[self] . identifier[RNG] . identifier[randint] ( literal[int] , literal[int] ** literal[int] - literal[int] )) identifier[self] . identifier[eStateNow] [ identifier[employed] ]= literal[int] - identifier[newly_unemployed]
def getShocks(self): """ Determine which agents switch from employment to unemployment. All unemployed agents remain unemployed until death. Parameters ---------- None Returns ------- None """ employed = self.eStateNow == 1.0 N = int(np.sum(employed)) newly_unemployed = drawBernoulli(N, p=self.UnempPrb, seed=self.RNG.randint(0, 2 ** 31 - 1)) self.eStateNow[employed] = 1.0 - newly_unemployed
def _find_cgroup_mounts(): """ Return the information which subsystems are mounted where. @return a generator of tuples (subsystem, mountpoint) """ try: with open('/proc/mounts', 'rt') as mountsFile: for mount in mountsFile: mount = mount.split(' ') if mount[2] == 'cgroup': mountpoint = mount[1] options = mount[3] for option in options.split(','): if option in ALL_KNOWN_SUBSYSTEMS: yield (option, mountpoint) except IOError: logging.exception('Cannot read /proc/mounts')
def function[_find_cgroup_mounts, parameter[]]: constant[ Return the information which subsystems are mounted where. @return a generator of tuples (subsystem, mountpoint) ] <ast.Try object at 0x7da18dc99000>
keyword[def] identifier[_find_cgroup_mounts] (): literal[string] keyword[try] : keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[mountsFile] : keyword[for] identifier[mount] keyword[in] identifier[mountsFile] : identifier[mount] = identifier[mount] . identifier[split] ( literal[string] ) keyword[if] identifier[mount] [ literal[int] ]== literal[string] : identifier[mountpoint] = identifier[mount] [ literal[int] ] identifier[options] = identifier[mount] [ literal[int] ] keyword[for] identifier[option] keyword[in] identifier[options] . identifier[split] ( literal[string] ): keyword[if] identifier[option] keyword[in] identifier[ALL_KNOWN_SUBSYSTEMS] : keyword[yield] ( identifier[option] , identifier[mountpoint] ) keyword[except] identifier[IOError] : identifier[logging] . identifier[exception] ( literal[string] )
def _find_cgroup_mounts(): """ Return the information which subsystems are mounted where. @return a generator of tuples (subsystem, mountpoint) """ try: with open('/proc/mounts', 'rt') as mountsFile: for mount in mountsFile: mount = mount.split(' ') if mount[2] == 'cgroup': mountpoint = mount[1] options = mount[3] for option in options.split(','): if option in ALL_KNOWN_SUBSYSTEMS: yield (option, mountpoint) # depends on [control=['if'], data=['option']] # depends on [control=['for'], data=['option']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['mount']] # depends on [control=['with'], data=['mountsFile']] # depends on [control=['try'], data=[]] except IOError: logging.exception('Cannot read /proc/mounts') # depends on [control=['except'], data=[]]
def get_image_output(self, arch): """ Create the output for the image This is the Koji Content Generator metadata, along with the 'docker save' output to upload. For metadata-only builds, an empty file is used instead of the output of 'docker save'. :param arch: str, architecture for this output :return: tuple, (metadata dict, Output instance) """ saved_image = self.workflow.exported_image_sequence[-1].get('path') image_name = get_image_upload_filename(self.workflow.exported_image_sequence[-1], self.workflow.builder.image_id, arch) if self.metadata_only: metadata = self.get_output_metadata(os.path.devnull, image_name) output = Output(file=None, metadata=metadata) else: metadata = self.get_output_metadata(saved_image, image_name) output = Output(file=open(saved_image), metadata=metadata) return metadata, output
def function[get_image_output, parameter[self, arch]]: constant[ Create the output for the image This is the Koji Content Generator metadata, along with the 'docker save' output to upload. For metadata-only builds, an empty file is used instead of the output of 'docker save'. :param arch: str, architecture for this output :return: tuple, (metadata dict, Output instance) ] variable[saved_image] assign[=] call[call[name[self].workflow.exported_image_sequence][<ast.UnaryOp object at 0x7da18dc06800>].get, parameter[constant[path]]] variable[image_name] assign[=] call[name[get_image_upload_filename], parameter[call[name[self].workflow.exported_image_sequence][<ast.UnaryOp object at 0x7da20c990670>], name[self].workflow.builder.image_id, name[arch]]] if name[self].metadata_only begin[:] variable[metadata] assign[=] call[name[self].get_output_metadata, parameter[name[os].path.devnull, name[image_name]]] variable[output] assign[=] call[name[Output], parameter[]] return[tuple[[<ast.Name object at 0x7da20c990ac0>, <ast.Name object at 0x7da20c993760>]]]
keyword[def] identifier[get_image_output] ( identifier[self] , identifier[arch] ): literal[string] identifier[saved_image] = identifier[self] . identifier[workflow] . identifier[exported_image_sequence] [- literal[int] ]. identifier[get] ( literal[string] ) identifier[image_name] = identifier[get_image_upload_filename] ( identifier[self] . identifier[workflow] . identifier[exported_image_sequence] [- literal[int] ], identifier[self] . identifier[workflow] . identifier[builder] . identifier[image_id] , identifier[arch] ) keyword[if] identifier[self] . identifier[metadata_only] : identifier[metadata] = identifier[self] . identifier[get_output_metadata] ( identifier[os] . identifier[path] . identifier[devnull] , identifier[image_name] ) identifier[output] = identifier[Output] ( identifier[file] = keyword[None] , identifier[metadata] = identifier[metadata] ) keyword[else] : identifier[metadata] = identifier[self] . identifier[get_output_metadata] ( identifier[saved_image] , identifier[image_name] ) identifier[output] = identifier[Output] ( identifier[file] = identifier[open] ( identifier[saved_image] ), identifier[metadata] = identifier[metadata] ) keyword[return] identifier[metadata] , identifier[output]
def get_image_output(self, arch): """ Create the output for the image This is the Koji Content Generator metadata, along with the 'docker save' output to upload. For metadata-only builds, an empty file is used instead of the output of 'docker save'. :param arch: str, architecture for this output :return: tuple, (metadata dict, Output instance) """ saved_image = self.workflow.exported_image_sequence[-1].get('path') image_name = get_image_upload_filename(self.workflow.exported_image_sequence[-1], self.workflow.builder.image_id, arch) if self.metadata_only: metadata = self.get_output_metadata(os.path.devnull, image_name) output = Output(file=None, metadata=metadata) # depends on [control=['if'], data=[]] else: metadata = self.get_output_metadata(saved_image, image_name) output = Output(file=open(saved_image), metadata=metadata) return (metadata, output)
def clean_total_refund_amount(self): ''' The Javascript should ensure that the hidden input is updated, but double check it here. ''' initial = self.cleaned_data.get('initial_refund_amount', 0) total = self.cleaned_data['total_refund_amount'] summed_refunds = sum([v for k,v in self.cleaned_data.items() if k.startswith('item_refundamount_')]) if not self.cleaned_data.get('id'): raise ValidationError('ID not in cleaned data') if summed_refunds != total: raise ValidationError(_('Passed value does not match sum of allocated refunds.')) elif summed_refunds > self.cleaned_data['id'].amountPaid + self.cleaned_data['id'].refunds: raise ValidationError(_('Total refunds allocated exceed revenue received.')) elif total < initial: raise ValidationError(_('Cannot reduce the total amount of the refund.')) return total
def function[clean_total_refund_amount, parameter[self]]: constant[ The Javascript should ensure that the hidden input is updated, but double check it here. ] variable[initial] assign[=] call[name[self].cleaned_data.get, parameter[constant[initial_refund_amount], constant[0]]] variable[total] assign[=] call[name[self].cleaned_data][constant[total_refund_amount]] variable[summed_refunds] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b13d46a0>]] if <ast.UnaryOp object at 0x7da1b13d6170> begin[:] <ast.Raise object at 0x7da1b13d5d80> if compare[name[summed_refunds] not_equal[!=] name[total]] begin[:] <ast.Raise object at 0x7da1b13a6f20> return[name[total]]
keyword[def] identifier[clean_total_refund_amount] ( identifier[self] ): literal[string] identifier[initial] = identifier[self] . identifier[cleaned_data] . identifier[get] ( literal[string] , literal[int] ) identifier[total] = identifier[self] . identifier[cleaned_data] [ literal[string] ] identifier[summed_refunds] = identifier[sum] ([ identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[cleaned_data] . identifier[items] () keyword[if] identifier[k] . identifier[startswith] ( literal[string] )]) keyword[if] keyword[not] identifier[self] . identifier[cleaned_data] . identifier[get] ( literal[string] ): keyword[raise] identifier[ValidationError] ( literal[string] ) keyword[if] identifier[summed_refunds] != identifier[total] : keyword[raise] identifier[ValidationError] ( identifier[_] ( literal[string] )) keyword[elif] identifier[summed_refunds] > identifier[self] . identifier[cleaned_data] [ literal[string] ]. identifier[amountPaid] + identifier[self] . identifier[cleaned_data] [ literal[string] ]. identifier[refunds] : keyword[raise] identifier[ValidationError] ( identifier[_] ( literal[string] )) keyword[elif] identifier[total] < identifier[initial] : keyword[raise] identifier[ValidationError] ( identifier[_] ( literal[string] )) keyword[return] identifier[total]
def clean_total_refund_amount(self): """ The Javascript should ensure that the hidden input is updated, but double check it here. """ initial = self.cleaned_data.get('initial_refund_amount', 0) total = self.cleaned_data['total_refund_amount'] summed_refunds = sum([v for (k, v) in self.cleaned_data.items() if k.startswith('item_refundamount_')]) if not self.cleaned_data.get('id'): raise ValidationError('ID not in cleaned data') # depends on [control=['if'], data=[]] if summed_refunds != total: raise ValidationError(_('Passed value does not match sum of allocated refunds.')) # depends on [control=['if'], data=[]] elif summed_refunds > self.cleaned_data['id'].amountPaid + self.cleaned_data['id'].refunds: raise ValidationError(_('Total refunds allocated exceed revenue received.')) # depends on [control=['if'], data=[]] elif total < initial: raise ValidationError(_('Cannot reduce the total amount of the refund.')) # depends on [control=['if'], data=[]] return total
def get_auth_token_login_url( self, auth_token_ticket, authenticator, private_key, service_url, username, ): ''' Build an auth token login URL. See https://github.com/rbCAS/CASino/wiki/Auth-Token-Login for details. ''' auth_token, auth_token_signature = self._build_auth_token_data( auth_token_ticket, authenticator, private_key, username=username, ) logging.debug('[CAS] AuthToken: {}'.format(auth_token)) url = self._get_auth_token_login_url( auth_token=auth_token, auth_token_signature=auth_token_signature, service_url=service_url, ) logging.debug('[CAS] AuthToken Login URL: {}'.format(url)) return url
def function[get_auth_token_login_url, parameter[self, auth_token_ticket, authenticator, private_key, service_url, username]]: constant[ Build an auth token login URL. See https://github.com/rbCAS/CASino/wiki/Auth-Token-Login for details. ] <ast.Tuple object at 0x7da18bccbd90> assign[=] call[name[self]._build_auth_token_data, parameter[name[auth_token_ticket], name[authenticator], name[private_key]]] call[name[logging].debug, parameter[call[constant[[CAS] AuthToken: {}].format, parameter[name[auth_token]]]]] variable[url] assign[=] call[name[self]._get_auth_token_login_url, parameter[]] call[name[logging].debug, parameter[call[constant[[CAS] AuthToken Login URL: {}].format, parameter[name[url]]]]] return[name[url]]
keyword[def] identifier[get_auth_token_login_url] ( identifier[self] , identifier[auth_token_ticket] , identifier[authenticator] , identifier[private_key] , identifier[service_url] , identifier[username] , ): literal[string] identifier[auth_token] , identifier[auth_token_signature] = identifier[self] . identifier[_build_auth_token_data] ( identifier[auth_token_ticket] , identifier[authenticator] , identifier[private_key] , identifier[username] = identifier[username] , ) identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[auth_token] )) identifier[url] = identifier[self] . identifier[_get_auth_token_login_url] ( identifier[auth_token] = identifier[auth_token] , identifier[auth_token_signature] = identifier[auth_token_signature] , identifier[service_url] = identifier[service_url] , ) identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[url] )) keyword[return] identifier[url]
def get_auth_token_login_url(self, auth_token_ticket, authenticator, private_key, service_url, username): """ Build an auth token login URL. See https://github.com/rbCAS/CASino/wiki/Auth-Token-Login for details. """ (auth_token, auth_token_signature) = self._build_auth_token_data(auth_token_ticket, authenticator, private_key, username=username) logging.debug('[CAS] AuthToken: {}'.format(auth_token)) url = self._get_auth_token_login_url(auth_token=auth_token, auth_token_signature=auth_token_signature, service_url=service_url) logging.debug('[CAS] AuthToken Login URL: {}'.format(url)) return url
def processClubAttendance(f, clubs): """Process the attendance data of one club If the club already exists in the list update its data. If the club is new create a new Club object and add it to the dict The next step is to iterate over all the lines and add a record for each line. When reaching an empty line it means there are no more records for this club. Along the way some redundant lines are skipped. When the file ends the f.next() call raises a StopIteration exception and that's the sign to return False, which indicates to the caller that there are no more clubs to process. """ try: # Skip as many empty lines as necessary (file format inconsistent) line = f.next() while line == ',,,,,,,,,,,,,,,,,,,\n': line = f.next() # The first non-empty line should have the name as the first field name = line.split(',')[0] # Create a new club object if needed if name not in clubs: clubs[name] = Club(name) # Get the named club c = clubs[name] c.processAttendance(f) return True except StopIteration: return False
def function[processClubAttendance, parameter[f, clubs]]: constant[Process the attendance data of one club If the club already exists in the list update its data. If the club is new create a new Club object and add it to the dict The next step is to iterate over all the lines and add a record for each line. When reaching an empty line it means there are no more records for this club. Along the way some redundant lines are skipped. When the file ends the f.next() call raises a StopIteration exception and that's the sign to return False, which indicates to the caller that there are no more clubs to process. ] <ast.Try object at 0x7da20e9b2ad0>
keyword[def] identifier[processClubAttendance] ( identifier[f] , identifier[clubs] ): literal[string] keyword[try] : identifier[line] = identifier[f] . identifier[next] () keyword[while] identifier[line] == literal[string] : identifier[line] = identifier[f] . identifier[next] () identifier[name] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ] keyword[if] identifier[name] keyword[not] keyword[in] identifier[clubs] : identifier[clubs] [ identifier[name] ]= identifier[Club] ( identifier[name] ) identifier[c] = identifier[clubs] [ identifier[name] ] identifier[c] . identifier[processAttendance] ( identifier[f] ) keyword[return] keyword[True] keyword[except] identifier[StopIteration] : keyword[return] keyword[False]
def processClubAttendance(f, clubs): """Process the attendance data of one club If the club already exists in the list update its data. If the club is new create a new Club object and add it to the dict The next step is to iterate over all the lines and add a record for each line. When reaching an empty line it means there are no more records for this club. Along the way some redundant lines are skipped. When the file ends the f.next() call raises a StopIteration exception and that's the sign to return False, which indicates to the caller that there are no more clubs to process. """ try: # Skip as many empty lines as necessary (file format inconsistent) line = f.next() while line == ',,,,,,,,,,,,,,,,,,,\n': line = f.next() # depends on [control=['while'], data=['line']] # The first non-empty line should have the name as the first field name = line.split(',')[0] # Create a new club object if needed if name not in clubs: clubs[name] = Club(name) # depends on [control=['if'], data=['name', 'clubs']] # Get the named club c = clubs[name] c.processAttendance(f) return True # depends on [control=['try'], data=[]] except StopIteration: return False # depends on [control=['except'], data=[]]
def get_notes(): """ By convention : D is a length of the element, d is a gap """ notes = {'DPhi':{}, 'dPhi':{}} # Toroidal width (mm, inner outer) notes['DPhi']['In'] = 26.370 notes['DPhi']['Out'] = 31.929 # Inter tiles distance (mm, uniform) notes['dl'] = 0.500 # Poloidal/Radial total length (mm) notes['DL'] = 437.000 # Number of tiles radially notes['nb'] = 35 notes['nbPhi'] = 19*2*12 # Radial length of a tile (mm, uniform) notes['Dl'] = 12.000 # Vertical height of tiles (mm, uniform) notes['DZ'] = 26.000 # Toroidal space between needles (mm, inner outer) notes['dPhi']['In'] = 0.588 notes['dPhi']['Out'] = 0.612 # (X,Z,Y) polygon of one needle (mm) !!!!!! (X,Z,Y) # 1 mm should be added towards Z>0 in the direction normal to the divertor's upper surface notes['sampleXZY'] = [[-759.457, -625.500, -1797.591], # Old start point [-759.603, -624.572, -1797.936], # Only for pattern [-772.277, -620.864, -1794.112], [-761.681, -610.036, -1769.498], # Computed,tube/plane [-761.895, -620.231, -1764.921], [-751.095, -609.687, -1741.154], [-755.613, -580.944, -1751.852], [-766.413, -591.488, -1775.620], # Edge of plane [-763.902, -596.129, -1774.659], # Computed,tube/plane [-774.498, -606.956, -1799.274], # Middle top of tube [-763.246, -601.395, -1806.563], [-767.575, -605.891, -1816.813], [-763.932, -629.068, -1808.186], [-764.112, -629.255, -1808.613], [-767.755, -606.078, -1817.240], [-772.084, -610.573, -1827.490], [-768.441, -633.750, -1818.863], [-768.622, -633.938, -1819.290], [-772.265, -610.760, -1827.917], [-776.594, -615.256, -1838.167], [-772.950, -638.433, -1829.540], [-773.131, -638.620, -1829.967], [-776.774, -615.443, -1838.594], [-781.103, -619.938, -1848.844], [-777.460, -643.115, -1840.217], [-777.640, -643.303, -1840.644], [-781.283, -620.126, -1849.271], [-785.612, -624.621, -1859.520], [-781.969, -647.798, -1850.894], [-782.149, -647.985, -1851.321], [-785.793, -624.808, -1859.948], [-790.122, -629.303, -1870.197], [-786.478, -652.481, -1861.571], [-786.659, -652.668, -1861.998], [-790.302, -629.491, -1870.624], [-794.631, -633.986, -1880.874], [-790.988, -657.163, -1872.248], [-791.168, -657.351, -1872.675], [-794.811, -634.173, -1881.301]] notes['sampleXZY'] = np.array(notes['sampleXZY']) for kk in notes.keys(): if type(notes[kk]) is dict: notes[kk]['In'] = notes[kk]['In']*1.e-3 notes[kk]['Out'] = notes[kk]['Out']*1.e-3 elif not 'nb' in kk: notes[kk] = notes[kk]*1.e-3 return notes
def function[get_notes, parameter[]]: constant[ By convention : D is a length of the element, d is a gap ] variable[notes] assign[=] dictionary[[<ast.Constant object at 0x7da2047e8c10>, <ast.Constant object at 0x7da2047e8d60>], [<ast.Dict object at 0x7da2047ea140>, <ast.Dict object at 0x7da2047e81c0>]] call[call[name[notes]][constant[DPhi]]][constant[In]] assign[=] constant[26.37] call[call[name[notes]][constant[DPhi]]][constant[Out]] assign[=] constant[31.929] call[name[notes]][constant[dl]] assign[=] constant[0.5] call[name[notes]][constant[DL]] assign[=] constant[437.0] call[name[notes]][constant[nb]] assign[=] constant[35] call[name[notes]][constant[nbPhi]] assign[=] binary_operation[binary_operation[constant[19] * constant[2]] * constant[12]] call[name[notes]][constant[Dl]] assign[=] constant[12.0] call[name[notes]][constant[DZ]] assign[=] constant[26.0] call[call[name[notes]][constant[dPhi]]][constant[In]] assign[=] constant[0.588] call[call[name[notes]][constant[dPhi]]][constant[Out]] assign[=] constant[0.612] call[name[notes]][constant[sampleXZY]] assign[=] list[[<ast.List object at 0x7da2047e8ac0>, <ast.List object at 0x7da2047e9780>, <ast.List object at 0x7da2047eb4f0>, <ast.List object at 0x7da2047eb970>, <ast.List object at 0x7da2047e8760>, <ast.List object at 0x7da2047e9480>, <ast.List object at 0x7da2047e8340>, <ast.List object at 0x7da2047e97b0>, <ast.List object at 0x7da2047e8850>, <ast.List object at 0x7da2047e87f0>, <ast.List object at 0x7da2047eaa70>, <ast.List object at 0x7da2047e8880>, <ast.List object at 0x7da2047ea920>, <ast.List object at 0x7da2047e8d90>, <ast.List object at 0x7da2047e8b80>, <ast.List object at 0x7da18f720e20>, <ast.List object at 0x7da18f721360>, <ast.List object at 0x7da18f720af0>, <ast.List object at 0x7da2054a5630>, <ast.List object at 0x7da2054a6ce0>, <ast.List object at 0x7da2054a7280>, <ast.List object at 0x7da2054a79d0>, <ast.List object at 0x7da2054a6ef0>, <ast.List object at 0x7da2054a41f0>, <ast.List object at 0x7da2054a6980>, <ast.List object at 0x7da2054a5180>, <ast.List object at 0x7da2054a44f0>, <ast.List object at 0x7da2054a5540>, <ast.List object at 0x7da2054a4e20>, <ast.List object at 0x7da2054a6a10>, <ast.List object at 0x7da2054a56c0>, <ast.List object at 0x7da2054a6f50>, <ast.List object at 0x7da2054a51b0>, <ast.List object at 0x7da2054a48e0>, <ast.List object at 0x7da2054a5660>, <ast.List object at 0x7da2054a5de0>, <ast.List object at 0x7da2054a69e0>, <ast.List object at 0x7da2054a6da0>, <ast.List object at 0x7da2054a6fe0>]] call[name[notes]][constant[sampleXZY]] assign[=] call[name[np].array, parameter[call[name[notes]][constant[sampleXZY]]]] for taget[name[kk]] in starred[call[name[notes].keys, parameter[]]] begin[:] if compare[call[name[type], parameter[call[name[notes]][name[kk]]]] is name[dict]] begin[:] call[call[name[notes]][name[kk]]][constant[In]] assign[=] binary_operation[call[call[name[notes]][name[kk]]][constant[In]] * constant[0.001]] call[call[name[notes]][name[kk]]][constant[Out]] assign[=] binary_operation[call[call[name[notes]][name[kk]]][constant[Out]] * constant[0.001]] return[name[notes]]
keyword[def] identifier[get_notes] (): literal[string] identifier[notes] ={ literal[string] :{}, literal[string] :{}} identifier[notes] [ literal[string] ][ literal[string] ]= literal[int] identifier[notes] [ literal[string] ][ literal[string] ]= literal[int] identifier[notes] [ literal[string] ]= literal[int] identifier[notes] [ literal[string] ]= literal[int] identifier[notes] [ literal[string] ]= literal[int] identifier[notes] [ literal[string] ]= literal[int] * literal[int] * literal[int] identifier[notes] [ literal[string] ]= literal[int] identifier[notes] [ literal[string] ]= literal[int] identifier[notes] [ literal[string] ][ literal[string] ]= literal[int] identifier[notes] [ literal[string] ][ literal[string] ]= literal[int] identifier[notes] [ literal[string] ]=[[- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ], [- literal[int] ,- literal[int] ,- literal[int] ]] identifier[notes] [ literal[string] ]= identifier[np] . identifier[array] ( identifier[notes] [ literal[string] ]) keyword[for] identifier[kk] keyword[in] identifier[notes] . identifier[keys] (): keyword[if] identifier[type] ( identifier[notes] [ identifier[kk] ]) keyword[is] identifier[dict] : identifier[notes] [ identifier[kk] ][ literal[string] ]= identifier[notes] [ identifier[kk] ][ literal[string] ]* literal[int] identifier[notes] [ identifier[kk] ][ literal[string] ]= identifier[notes] [ identifier[kk] ][ literal[string] ]* literal[int] keyword[elif] keyword[not] literal[string] keyword[in] identifier[kk] : identifier[notes] [ identifier[kk] ]= identifier[notes] [ identifier[kk] ]* literal[int] keyword[return] identifier[notes]
def get_notes(): """ By convention : D is a length of the element, d is a gap """ notes = {'DPhi': {}, 'dPhi': {}} # Toroidal width (mm, inner outer) notes['DPhi']['In'] = 26.37 notes['DPhi']['Out'] = 31.929 # Inter tiles distance (mm, uniform) notes['dl'] = 0.5 # Poloidal/Radial total length (mm) notes['DL'] = 437.0 # Number of tiles radially notes['nb'] = 35 notes['nbPhi'] = 19 * 2 * 12 # Radial length of a tile (mm, uniform) notes['Dl'] = 12.0 # Vertical height of tiles (mm, uniform) notes['DZ'] = 26.0 # Toroidal space between needles (mm, inner outer) notes['dPhi']['In'] = 0.588 notes['dPhi']['Out'] = 0.612 # (X,Z,Y) polygon of one needle (mm) !!!!!! (X,Z,Y) # 1 mm should be added towards Z>0 in the direction normal to the divertor's upper surface # Old start point # Only for pattern # Computed,tube/plane # Edge of plane # Computed,tube/plane # Middle top of tube notes['sampleXZY'] = [[-759.457, -625.5, -1797.591], [-759.603, -624.572, -1797.936], [-772.277, -620.864, -1794.112], [-761.681, -610.036, -1769.498], [-761.895, -620.231, -1764.921], [-751.095, -609.687, -1741.154], [-755.613, -580.944, -1751.852], [-766.413, -591.488, -1775.62], [-763.902, -596.129, -1774.659], [-774.498, -606.956, -1799.274], [-763.246, -601.395, -1806.563], [-767.575, -605.891, -1816.813], [-763.932, -629.068, -1808.186], [-764.112, -629.255, -1808.613], [-767.755, -606.078, -1817.24], [-772.084, -610.573, -1827.49], [-768.441, -633.75, -1818.863], [-768.622, -633.938, -1819.29], [-772.265, -610.76, -1827.917], [-776.594, -615.256, -1838.167], [-772.95, -638.433, -1829.54], [-773.131, -638.62, -1829.967], [-776.774, -615.443, -1838.594], [-781.103, -619.938, -1848.844], [-777.46, -643.115, -1840.217], [-777.64, -643.303, -1840.644], [-781.283, -620.126, -1849.271], [-785.612, -624.621, -1859.52], [-781.969, -647.798, -1850.894], [-782.149, -647.985, -1851.321], [-785.793, -624.808, -1859.948], [-790.122, -629.303, -1870.197], [-786.478, -652.481, -1861.571], [-786.659, -652.668, -1861.998], [-790.302, -629.491, -1870.624], [-794.631, -633.986, -1880.874], [-790.988, -657.163, -1872.248], [-791.168, -657.351, -1872.675], [-794.811, -634.173, -1881.301]] notes['sampleXZY'] = np.array(notes['sampleXZY']) for kk in notes.keys(): if type(notes[kk]) is dict: notes[kk]['In'] = notes[kk]['In'] * 0.001 notes[kk]['Out'] = notes[kk]['Out'] * 0.001 # depends on [control=['if'], data=[]] elif not 'nb' in kk: notes[kk] = notes[kk] * 0.001 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['kk']] return notes
def sleep(self): """Send the controller to sleep""" logger.debug("Sleep the controller") self.write(Registers.MODE_1, self.mode_1 | (1 << Mode1.SLEEP))
def function[sleep, parameter[self]]: constant[Send the controller to sleep] call[name[logger].debug, parameter[constant[Sleep the controller]]] call[name[self].write, parameter[name[Registers].MODE_1, binary_operation[name[self].mode_1 <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> name[Mode1].SLEEP]]]]
keyword[def] identifier[sleep] ( identifier[self] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[write] ( identifier[Registers] . identifier[MODE_1] , identifier[self] . identifier[mode_1] |( literal[int] << identifier[Mode1] . identifier[SLEEP] ))
def sleep(self): """Send the controller to sleep""" logger.debug('Sleep the controller') self.write(Registers.MODE_1, self.mode_1 | 1 << Mode1.SLEEP)
def __Languages_comboBox_set_default_view_state(self): """ Sets the **Languages_comboBox** Widget default View state. """ if not self.__container.has_editor_tab(): return editor = self.__container.get_current_editor() index = self.Languages_comboBox.findText(editor.language.name) self.Languages_comboBox.setCurrentIndex(index)
def function[__Languages_comboBox_set_default_view_state, parameter[self]]: constant[ Sets the **Languages_comboBox** Widget default View state. ] if <ast.UnaryOp object at 0x7da18bcc95d0> begin[:] return[None] variable[editor] assign[=] call[name[self].__container.get_current_editor, parameter[]] variable[index] assign[=] call[name[self].Languages_comboBox.findText, parameter[name[editor].language.name]] call[name[self].Languages_comboBox.setCurrentIndex, parameter[name[index]]]
keyword[def] identifier[__Languages_comboBox_set_default_view_state] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[__container] . identifier[has_editor_tab] (): keyword[return] identifier[editor] = identifier[self] . identifier[__container] . identifier[get_current_editor] () identifier[index] = identifier[self] . identifier[Languages_comboBox] . identifier[findText] ( identifier[editor] . identifier[language] . identifier[name] ) identifier[self] . identifier[Languages_comboBox] . identifier[setCurrentIndex] ( identifier[index] )
def __Languages_comboBox_set_default_view_state(self): """ Sets the **Languages_comboBox** Widget default View state. """ if not self.__container.has_editor_tab(): return # depends on [control=['if'], data=[]] editor = self.__container.get_current_editor() index = self.Languages_comboBox.findText(editor.language.name) self.Languages_comboBox.setCurrentIndex(index)
def fast_cov(x, y=None, destination=None): """calculate the covariance matrix for the columns of x (MxN), or optionally, the covariance matrix between the columns of x and and the columns of y (MxP). (In the language of statistics, the columns are variables, the rows are observations). Args: x (numpy array-like) MxN in shape y (numpy array-like) MxP in shape destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy memmap of a file) returns (numpy array-like) array of the covariance values for defaults (y=None), shape is NxN if y is provided, shape is NxP """ validate_inputs(x, y, destination) if y is None: y = x if destination is None: destination = numpy.zeros((x.shape[1], y.shape[1])) mean_x = numpy.mean(x, axis=0) mean_y = numpy.mean(y, axis=0) mean_centered_x = (x - mean_x).astype(destination.dtype) mean_centered_y = (y - mean_y).astype(destination.dtype) numpy.dot(mean_centered_x.T, mean_centered_y, out=destination) numpy.divide(destination, (x.shape[0] - 1), out=destination) return destination
def function[fast_cov, parameter[x, y, destination]]: constant[calculate the covariance matrix for the columns of x (MxN), or optionally, the covariance matrix between the columns of x and and the columns of y (MxP). (In the language of statistics, the columns are variables, the rows are observations). Args: x (numpy array-like) MxN in shape y (numpy array-like) MxP in shape destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy memmap of a file) returns (numpy array-like) array of the covariance values for defaults (y=None), shape is NxN if y is provided, shape is NxP ] call[name[validate_inputs], parameter[name[x], name[y], name[destination]]] if compare[name[y] is constant[None]] begin[:] variable[y] assign[=] name[x] if compare[name[destination] is constant[None]] begin[:] variable[destination] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Subscript object at 0x7da1b0e33610>, <ast.Subscript object at 0x7da207f00400>]]]] variable[mean_x] assign[=] call[name[numpy].mean, parameter[name[x]]] variable[mean_y] assign[=] call[name[numpy].mean, parameter[name[y]]] variable[mean_centered_x] assign[=] call[binary_operation[name[x] - name[mean_x]].astype, parameter[name[destination].dtype]] variable[mean_centered_y] assign[=] call[binary_operation[name[y] - name[mean_y]].astype, parameter[name[destination].dtype]] call[name[numpy].dot, parameter[name[mean_centered_x].T, name[mean_centered_y]]] call[name[numpy].divide, parameter[name[destination], binary_operation[call[name[x].shape][constant[0]] - constant[1]]]] return[name[destination]]
keyword[def] identifier[fast_cov] ( identifier[x] , identifier[y] = keyword[None] , identifier[destination] = keyword[None] ): literal[string] identifier[validate_inputs] ( identifier[x] , identifier[y] , identifier[destination] ) keyword[if] identifier[y] keyword[is] keyword[None] : identifier[y] = identifier[x] keyword[if] identifier[destination] keyword[is] keyword[None] : identifier[destination] = identifier[numpy] . identifier[zeros] (( identifier[x] . identifier[shape] [ literal[int] ], identifier[y] . identifier[shape] [ literal[int] ])) identifier[mean_x] = identifier[numpy] . identifier[mean] ( identifier[x] , identifier[axis] = literal[int] ) identifier[mean_y] = identifier[numpy] . identifier[mean] ( identifier[y] , identifier[axis] = literal[int] ) identifier[mean_centered_x] =( identifier[x] - identifier[mean_x] ). identifier[astype] ( identifier[destination] . identifier[dtype] ) identifier[mean_centered_y] =( identifier[y] - identifier[mean_y] ). identifier[astype] ( identifier[destination] . identifier[dtype] ) identifier[numpy] . identifier[dot] ( identifier[mean_centered_x] . identifier[T] , identifier[mean_centered_y] , identifier[out] = identifier[destination] ) identifier[numpy] . identifier[divide] ( identifier[destination] ,( identifier[x] . identifier[shape] [ literal[int] ]- literal[int] ), identifier[out] = identifier[destination] ) keyword[return] identifier[destination]
def fast_cov(x, y=None, destination=None): """calculate the covariance matrix for the columns of x (MxN), or optionally, the covariance matrix between the columns of x and and the columns of y (MxP). (In the language of statistics, the columns are variables, the rows are observations). Args: x (numpy array-like) MxN in shape y (numpy array-like) MxP in shape destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy memmap of a file) returns (numpy array-like) array of the covariance values for defaults (y=None), shape is NxN if y is provided, shape is NxP """ validate_inputs(x, y, destination) if y is None: y = x # depends on [control=['if'], data=['y']] if destination is None: destination = numpy.zeros((x.shape[1], y.shape[1])) # depends on [control=['if'], data=['destination']] mean_x = numpy.mean(x, axis=0) mean_y = numpy.mean(y, axis=0) mean_centered_x = (x - mean_x).astype(destination.dtype) mean_centered_y = (y - mean_y).astype(destination.dtype) numpy.dot(mean_centered_x.T, mean_centered_y, out=destination) numpy.divide(destination, x.shape[0] - 1, out=destination) return destination
def check_interface_is_subset(circuit1, circuit2): """ Checks that the interface of circuit1 is a subset of circuit2 Subset is defined as circuit2 contains all the ports of circuit1. Ports are matched by name comparison, then the types are checked to see if one could be converted to another. """ circuit1_port_names = circuit1.interface.ports.keys() for name in circuit1_port_names: if name not in circuit2.interface.ports: raise ValueError(f"{circuit2} (circuit2) does not have port {name}") circuit1_kind = type(type(getattr(circuit1, name))) circuit2_kind = type(type(getattr(circuit2, name))) circuit1_sub_circuit2 = issubclass(circuit1_kind, circuit2_kind) circuit2_sub_circuit1 = issubclass(circuit2_kind, circuit1_kind) # Check that the type of one could be converted to the other if not (circuit1_sub_circuit2 or circuit2_sub_circuit1): raise ValueError(f"Port {name} types don't match:" f" Type0={circuit1_kind}," f" Type1={circuit2_kind}")
def function[check_interface_is_subset, parameter[circuit1, circuit2]]: constant[ Checks that the interface of circuit1 is a subset of circuit2 Subset is defined as circuit2 contains all the ports of circuit1. Ports are matched by name comparison, then the types are checked to see if one could be converted to another. ] variable[circuit1_port_names] assign[=] call[name[circuit1].interface.ports.keys, parameter[]] for taget[name[name]] in starred[name[circuit1_port_names]] begin[:] if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[circuit2].interface.ports] begin[:] <ast.Raise object at 0x7da18bcc9360> variable[circuit1_kind] assign[=] call[name[type], parameter[call[name[type], parameter[call[name[getattr], parameter[name[circuit1], name[name]]]]]]] variable[circuit2_kind] assign[=] call[name[type], parameter[call[name[type], parameter[call[name[getattr], parameter[name[circuit2], name[name]]]]]]] variable[circuit1_sub_circuit2] assign[=] call[name[issubclass], parameter[name[circuit1_kind], name[circuit2_kind]]] variable[circuit2_sub_circuit1] assign[=] call[name[issubclass], parameter[name[circuit2_kind], name[circuit1_kind]]] if <ast.UnaryOp object at 0x7da18bccb6a0> begin[:] <ast.Raise object at 0x7da18bcc9810>
keyword[def] identifier[check_interface_is_subset] ( identifier[circuit1] , identifier[circuit2] ): literal[string] identifier[circuit1_port_names] = identifier[circuit1] . identifier[interface] . identifier[ports] . identifier[keys] () keyword[for] identifier[name] keyword[in] identifier[circuit1_port_names] : keyword[if] identifier[name] keyword[not] keyword[in] identifier[circuit2] . identifier[interface] . identifier[ports] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[circuit1_kind] = identifier[type] ( identifier[type] ( identifier[getattr] ( identifier[circuit1] , identifier[name] ))) identifier[circuit2_kind] = identifier[type] ( identifier[type] ( identifier[getattr] ( identifier[circuit2] , identifier[name] ))) identifier[circuit1_sub_circuit2] = identifier[issubclass] ( identifier[circuit1_kind] , identifier[circuit2_kind] ) identifier[circuit2_sub_circuit1] = identifier[issubclass] ( identifier[circuit2_kind] , identifier[circuit1_kind] ) keyword[if] keyword[not] ( identifier[circuit1_sub_circuit2] keyword[or] identifier[circuit2_sub_circuit1] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] literal[string] )
def check_interface_is_subset(circuit1, circuit2): """ Checks that the interface of circuit1 is a subset of circuit2 Subset is defined as circuit2 contains all the ports of circuit1. Ports are matched by name comparison, then the types are checked to see if one could be converted to another. """ circuit1_port_names = circuit1.interface.ports.keys() for name in circuit1_port_names: if name not in circuit2.interface.ports: raise ValueError(f'{circuit2} (circuit2) does not have port {name}') # depends on [control=['if'], data=['name']] circuit1_kind = type(type(getattr(circuit1, name))) circuit2_kind = type(type(getattr(circuit2, name))) circuit1_sub_circuit2 = issubclass(circuit1_kind, circuit2_kind) circuit2_sub_circuit1 = issubclass(circuit2_kind, circuit1_kind) # Check that the type of one could be converted to the other if not (circuit1_sub_circuit2 or circuit2_sub_circuit1): raise ValueError(f"Port {name} types don't match: Type0={circuit1_kind}, Type1={circuit2_kind}") # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
def submit_form_action(self, url): """ Submit the form with the given action URL (i.e. the form that submits to ``/post/my/data``). """ form = ElementSelector( world.browser, str('//form[@action="%s"]' % url), ) assert form, \ "Cannot find a form with action '{}' on the page.".format(url) form.submit()
def function[submit_form_action, parameter[self, url]]: constant[ Submit the form with the given action URL (i.e. the form that submits to ``/post/my/data``). ] variable[form] assign[=] call[name[ElementSelector], parameter[name[world].browser, call[name[str], parameter[binary_operation[constant[//form[@action="%s"]] <ast.Mod object at 0x7da2590d6920> name[url]]]]]] assert[name[form]] call[name[form].submit, parameter[]]
keyword[def] identifier[submit_form_action] ( identifier[self] , identifier[url] ): literal[string] identifier[form] = identifier[ElementSelector] ( identifier[world] . identifier[browser] , identifier[str] ( literal[string] % identifier[url] ), ) keyword[assert] identifier[form] , literal[string] . identifier[format] ( identifier[url] ) identifier[form] . identifier[submit] ()
def submit_form_action(self, url): """ Submit the form with the given action URL (i.e. the form that submits to ``/post/my/data``). """ form = ElementSelector(world.browser, str('//form[@action="%s"]' % url)) assert form, "Cannot find a form with action '{}' on the page.".format(url) form.submit()
def configure(self, args): """Configure the set of plugins with the given args. After configuration, disabled plugins are removed from the plugins list. """ for plug in self._plugins: plug_name = self.plugin_name(plug) plug.enabled = getattr(args, "plugin_%s" % plug_name, False) if plug.enabled and getattr(plug, "configure", None): if callable(getattr(plug, "configure", None)): plug.configure(args) LOG.debug("Available plugins: %s", self._plugins) self.plugins = [plugin for plugin in self._plugins if getattr(plugin, "enabled", False)] LOG.debug("Enabled plugins: %s", self.plugins)
def function[configure, parameter[self, args]]: constant[Configure the set of plugins with the given args. After configuration, disabled plugins are removed from the plugins list. ] for taget[name[plug]] in starred[name[self]._plugins] begin[:] variable[plug_name] assign[=] call[name[self].plugin_name, parameter[name[plug]]] name[plug].enabled assign[=] call[name[getattr], parameter[name[args], binary_operation[constant[plugin_%s] <ast.Mod object at 0x7da2590d6920> name[plug_name]], constant[False]]] if <ast.BoolOp object at 0x7da18c4cf610> begin[:] if call[name[callable], parameter[call[name[getattr], parameter[name[plug], constant[configure], constant[None]]]]] begin[:] call[name[plug].configure, parameter[name[args]]] call[name[LOG].debug, parameter[constant[Available plugins: %s], name[self]._plugins]] name[self].plugins assign[=] <ast.ListComp object at 0x7da18c4cd510> call[name[LOG].debug, parameter[constant[Enabled plugins: %s], name[self].plugins]]
keyword[def] identifier[configure] ( identifier[self] , identifier[args] ): literal[string] keyword[for] identifier[plug] keyword[in] identifier[self] . identifier[_plugins] : identifier[plug_name] = identifier[self] . identifier[plugin_name] ( identifier[plug] ) identifier[plug] . identifier[enabled] = identifier[getattr] ( identifier[args] , literal[string] % identifier[plug_name] , keyword[False] ) keyword[if] identifier[plug] . identifier[enabled] keyword[and] identifier[getattr] ( identifier[plug] , literal[string] , keyword[None] ): keyword[if] identifier[callable] ( identifier[getattr] ( identifier[plug] , literal[string] , keyword[None] )): identifier[plug] . identifier[configure] ( identifier[args] ) identifier[LOG] . identifier[debug] ( literal[string] , identifier[self] . identifier[_plugins] ) identifier[self] . identifier[plugins] =[ identifier[plugin] keyword[for] identifier[plugin] keyword[in] identifier[self] . identifier[_plugins] keyword[if] identifier[getattr] ( identifier[plugin] , literal[string] , keyword[False] )] identifier[LOG] . identifier[debug] ( literal[string] , identifier[self] . identifier[plugins] )
def configure(self, args): """Configure the set of plugins with the given args. After configuration, disabled plugins are removed from the plugins list. """ for plug in self._plugins: plug_name = self.plugin_name(plug) plug.enabled = getattr(args, 'plugin_%s' % plug_name, False) if plug.enabled and getattr(plug, 'configure', None): if callable(getattr(plug, 'configure', None)): plug.configure(args) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['plug']] LOG.debug('Available plugins: %s', self._plugins) self.plugins = [plugin for plugin in self._plugins if getattr(plugin, 'enabled', False)] LOG.debug('Enabled plugins: %s', self.plugins)
def filter_line(line: str, context: RunContext) -> typing.Optional[str]: """ Filters out lines that match a given regex :param line: line to filter :type line: str :param context: run context :type context: _RunContext :return: line if it doesn't match the filter :rtype: optional str """ if context.filters is not None: for filter_ in context.filters: if re.match(filter_, line): return None return line
def function[filter_line, parameter[line, context]]: constant[ Filters out lines that match a given regex :param line: line to filter :type line: str :param context: run context :type context: _RunContext :return: line if it doesn't match the filter :rtype: optional str ] if compare[name[context].filters is_not constant[None]] begin[:] for taget[name[filter_]] in starred[name[context].filters] begin[:] if call[name[re].match, parameter[name[filter_], name[line]]] begin[:] return[constant[None]] return[name[line]]
keyword[def] identifier[filter_line] ( identifier[line] : identifier[str] , identifier[context] : identifier[RunContext] )-> identifier[typing] . identifier[Optional] [ identifier[str] ]: literal[string] keyword[if] identifier[context] . identifier[filters] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[filter_] keyword[in] identifier[context] . identifier[filters] : keyword[if] identifier[re] . identifier[match] ( identifier[filter_] , identifier[line] ): keyword[return] keyword[None] keyword[return] identifier[line]
def filter_line(line: str, context: RunContext) -> typing.Optional[str]: """ Filters out lines that match a given regex :param line: line to filter :type line: str :param context: run context :type context: _RunContext :return: line if it doesn't match the filter :rtype: optional str """ if context.filters is not None: for filter_ in context.filters: if re.match(filter_, line): return None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filter_']] # depends on [control=['if'], data=[]] return line
def get_hash(self) -> str: """ Returns a hash code representing the current state of this ``Params`` object. We don't want to implement ``__hash__`` because that has deeper python implications (and this is a mutable object), but this will give you a representation of the current state. """ return str(hash(json.dumps(self.params, sort_keys=True)))
def function[get_hash, parameter[self]]: constant[ Returns a hash code representing the current state of this ``Params`` object. We don't want to implement ``__hash__`` because that has deeper python implications (and this is a mutable object), but this will give you a representation of the current state. ] return[call[name[str], parameter[call[name[hash], parameter[call[name[json].dumps, parameter[name[self].params]]]]]]]
keyword[def] identifier[get_hash] ( identifier[self] )-> identifier[str] : literal[string] keyword[return] identifier[str] ( identifier[hash] ( identifier[json] . identifier[dumps] ( identifier[self] . identifier[params] , identifier[sort_keys] = keyword[True] )))
def get_hash(self) -> str: """ Returns a hash code representing the current state of this ``Params`` object. We don't want to implement ``__hash__`` because that has deeper python implications (and this is a mutable object), but this will give you a representation of the current state. """ return str(hash(json.dumps(self.params, sort_keys=True)))
def filter_desc(self, graintype=None, group=None, reference=None, size=None, phase=None): ''' This routine is to filter for description elements. You can check what is available in the description by running, >>> i.header_desc() where i is the instance you loaded. You can run the filter multiple times! You can filter for the following types: Parameters ---------- graintype : string or list Give graintypes as either 'M' for only mainstream or more than one ['M','Z']. group : integer or list Group of graintypes, important for oxides and silicates, since they are split into groups and not into types. Example 1, or give a list [1,3]. reference : string or list Give the reference you want to filter for, try an i.info() to pick the right name! You can select a single referennce as string or multiple references in as a list. size : string Filter for grain sizes, give '<5.0' or '>5.0' as a string for larger or smaller than a given grainsize in um. Only data with known grainsizes are chosen. Often grain sizes are given in a times b, where a and b are the minumum and maximum measurements from an image. If you give a >5.0 then grains with the smaller dimension >5um are taken into account. If you want <5.0 then grains with the upper dimension <5um are taken into account. ''' # filter for graintype if graintype != None: indexing = [] # index file on which lines to pick if type(graintype) == str: graintype = [graintype] # filter for typ in graintype: for i in range(len(self.desc)): if self.desc[i][self.descdict['Type']] == typ: indexing.append(i) # filter: self._filter_desc(indexing) # filter for graintype if phase != None: indexing = [] # index file on which lines to pick if type(phase) == str: phase = [phase] # filter for typ in phase: for i in range(len(self.desc)): if self.desc[i][self.descdict['Phase']] == typ: indexing.append(i) # filter: self._filter_desc(indexing) # filter for group (oxides and silicates) if group != None: indexing = [] # index file on which lines to pick if type(group) != list: group = [group] # filter for grp in group: for i in range(len(self.desc)): if self.desc[i][self.descdict['Group']] == str(int(grp)): indexing.append(i) # filter: self._filter_desc(indexing) # filter for reference if reference != None: indexing = [] # index file on which lines to pick if type(reference) != list: reference = [reference] # filter for ri in range(len(reference)): for i in range(len(self.desc)): if self.desc[i][self.descdict['Reference']] == reference[ri]: indexing.append(i) # filter: self._filter_desc(indexing) # filter for grainzise if size != None: indexing = [] # index file on which lines to pick # filter operator = size[0:1] size = float(size[1:len(size)]) for i in range(len(self.desc)): if self.desc[i][self.descdict['Size (microns)']] != '': try: # print self.desc[i][self.descdict['Size (microns)']] comperator1 = self.desc[i][self.descdict['Size (microns)']].split('x')[0] comperator2 = self.desc[i][self.descdict['Size (microns)']].split('x')[1] comperator = [float(comperator1),float(comperator2)] if operator == '<': comperator = np.min(comperator) else: comperator = np.max(comperator) except IndexError or AttributeError: try: comperator = float(self.desc[i][self.descdict['Size (microns)']]) except ValueError: continue if operator == '>': if comperator > size: indexing.append(i) elif operator == '<': if comperator < size: indexing.append(i) else: continue # filter: self._filter_desc(indexing)
def function[filter_desc, parameter[self, graintype, group, reference, size, phase]]: constant[ This routine is to filter for description elements. You can check what is available in the description by running, >>> i.header_desc() where i is the instance you loaded. You can run the filter multiple times! You can filter for the following types: Parameters ---------- graintype : string or list Give graintypes as either 'M' for only mainstream or more than one ['M','Z']. group : integer or list Group of graintypes, important for oxides and silicates, since they are split into groups and not into types. Example 1, or give a list [1,3]. reference : string or list Give the reference you want to filter for, try an i.info() to pick the right name! You can select a single referennce as string or multiple references in as a list. size : string Filter for grain sizes, give '<5.0' or '>5.0' as a string for larger or smaller than a given grainsize in um. Only data with known grainsizes are chosen. Often grain sizes are given in a times b, where a and b are the minumum and maximum measurements from an image. If you give a >5.0 then grains with the smaller dimension >5um are taken into account. If you want <5.0 then grains with the upper dimension <5um are taken into account. ] if compare[name[graintype] not_equal[!=] constant[None]] begin[:] variable[indexing] assign[=] list[[]] if compare[call[name[type], parameter[name[graintype]]] equal[==] name[str]] begin[:] variable[graintype] assign[=] list[[<ast.Name object at 0x7da1b1913910>]] for taget[name[typ]] in starred[name[graintype]] begin[:] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].desc]]]]] begin[:] if compare[call[call[name[self].desc][name[i]]][call[name[self].descdict][constant[Type]]] equal[==] name[typ]] begin[:] call[name[indexing].append, parameter[name[i]]] call[name[self]._filter_desc, parameter[name[indexing]]] if compare[name[phase] not_equal[!=] constant[None]] begin[:] variable[indexing] assign[=] list[[]] if compare[call[name[type], parameter[name[phase]]] equal[==] name[str]] begin[:] variable[phase] assign[=] list[[<ast.Name object at 0x7da1b1912f50>]] for taget[name[typ]] in starred[name[phase]] begin[:] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].desc]]]]] begin[:] if compare[call[call[name[self].desc][name[i]]][call[name[self].descdict][constant[Phase]]] equal[==] name[typ]] begin[:] call[name[indexing].append, parameter[name[i]]] call[name[self]._filter_desc, parameter[name[indexing]]] if compare[name[group] not_equal[!=] constant[None]] begin[:] variable[indexing] assign[=] list[[]] if compare[call[name[type], parameter[name[group]]] not_equal[!=] name[list]] begin[:] variable[group] assign[=] list[[<ast.Name object at 0x7da1b1912590>]] for taget[name[grp]] in starred[name[group]] begin[:] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].desc]]]]] begin[:] if compare[call[call[name[self].desc][name[i]]][call[name[self].descdict][constant[Group]]] equal[==] call[name[str], parameter[call[name[int], parameter[name[grp]]]]]] begin[:] call[name[indexing].append, parameter[name[i]]] call[name[self]._filter_desc, parameter[name[indexing]]] if compare[name[reference] not_equal[!=] constant[None]] begin[:] variable[indexing] assign[=] list[[]] if compare[call[name[type], parameter[name[reference]]] not_equal[!=] name[list]] begin[:] variable[reference] assign[=] list[[<ast.Name object at 0x7da1b1910a30>]] for taget[name[ri]] in starred[call[name[range], parameter[call[name[len], parameter[name[reference]]]]]] begin[:] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].desc]]]]] begin[:] if compare[call[call[name[self].desc][name[i]]][call[name[self].descdict][constant[Reference]]] equal[==] call[name[reference]][name[ri]]] begin[:] call[name[indexing].append, parameter[name[i]]] call[name[self]._filter_desc, parameter[name[indexing]]] if compare[name[size] not_equal[!=] constant[None]] begin[:] variable[indexing] assign[=] list[[]] variable[operator] assign[=] call[name[size]][<ast.Slice object at 0x7da1b1ab36a0>] variable[size] assign[=] call[name[float], parameter[call[name[size]][<ast.Slice object at 0x7da1b1ab3d90>]]] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].desc]]]]] begin[:] if compare[call[call[name[self].desc][name[i]]][call[name[self].descdict][constant[Size (microns)]]] not_equal[!=] constant[]] begin[:] <ast.Try object at 0x7da1b1ab3d30> if compare[name[operator] equal[==] constant[>]] begin[:] if compare[name[comperator] greater[>] name[size]] begin[:] call[name[indexing].append, parameter[name[i]]] call[name[self]._filter_desc, parameter[name[indexing]]]
keyword[def] identifier[filter_desc] ( identifier[self] , identifier[graintype] = keyword[None] , identifier[group] = keyword[None] , identifier[reference] = keyword[None] , identifier[size] = keyword[None] , identifier[phase] = keyword[None] ): literal[string] keyword[if] identifier[graintype] != keyword[None] : identifier[indexing] =[] keyword[if] identifier[type] ( identifier[graintype] )== identifier[str] : identifier[graintype] =[ identifier[graintype] ] keyword[for] identifier[typ] keyword[in] identifier[graintype] : keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[desc] )): keyword[if] identifier[self] . identifier[desc] [ identifier[i] ][ identifier[self] . identifier[descdict] [ literal[string] ]]== identifier[typ] : identifier[indexing] . identifier[append] ( identifier[i] ) identifier[self] . identifier[_filter_desc] ( identifier[indexing] ) keyword[if] identifier[phase] != keyword[None] : identifier[indexing] =[] keyword[if] identifier[type] ( identifier[phase] )== identifier[str] : identifier[phase] =[ identifier[phase] ] keyword[for] identifier[typ] keyword[in] identifier[phase] : keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[desc] )): keyword[if] identifier[self] . identifier[desc] [ identifier[i] ][ identifier[self] . identifier[descdict] [ literal[string] ]]== identifier[typ] : identifier[indexing] . identifier[append] ( identifier[i] ) identifier[self] . identifier[_filter_desc] ( identifier[indexing] ) keyword[if] identifier[group] != keyword[None] : identifier[indexing] =[] keyword[if] identifier[type] ( identifier[group] )!= identifier[list] : identifier[group] =[ identifier[group] ] keyword[for] identifier[grp] keyword[in] identifier[group] : keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[desc] )): keyword[if] identifier[self] . identifier[desc] [ identifier[i] ][ identifier[self] . identifier[descdict] [ literal[string] ]]== identifier[str] ( identifier[int] ( identifier[grp] )): identifier[indexing] . identifier[append] ( identifier[i] ) identifier[self] . identifier[_filter_desc] ( identifier[indexing] ) keyword[if] identifier[reference] != keyword[None] : identifier[indexing] =[] keyword[if] identifier[type] ( identifier[reference] )!= identifier[list] : identifier[reference] =[ identifier[reference] ] keyword[for] identifier[ri] keyword[in] identifier[range] ( identifier[len] ( identifier[reference] )): keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[desc] )): keyword[if] identifier[self] . identifier[desc] [ identifier[i] ][ identifier[self] . identifier[descdict] [ literal[string] ]]== identifier[reference] [ identifier[ri] ]: identifier[indexing] . identifier[append] ( identifier[i] ) identifier[self] . identifier[_filter_desc] ( identifier[indexing] ) keyword[if] identifier[size] != keyword[None] : identifier[indexing] =[] identifier[operator] = identifier[size] [ literal[int] : literal[int] ] identifier[size] = identifier[float] ( identifier[size] [ literal[int] : identifier[len] ( identifier[size] )]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[desc] )): keyword[if] identifier[self] . identifier[desc] [ identifier[i] ][ identifier[self] . identifier[descdict] [ literal[string] ]]!= literal[string] : keyword[try] : identifier[comperator1] = identifier[self] . identifier[desc] [ identifier[i] ][ identifier[self] . identifier[descdict] [ literal[string] ]]. identifier[split] ( literal[string] )[ literal[int] ] identifier[comperator2] = identifier[self] . identifier[desc] [ identifier[i] ][ identifier[self] . identifier[descdict] [ literal[string] ]]. identifier[split] ( literal[string] )[ literal[int] ] identifier[comperator] =[ identifier[float] ( identifier[comperator1] ), identifier[float] ( identifier[comperator2] )] keyword[if] identifier[operator] == literal[string] : identifier[comperator] = identifier[np] . identifier[min] ( identifier[comperator] ) keyword[else] : identifier[comperator] = identifier[np] . identifier[max] ( identifier[comperator] ) keyword[except] identifier[IndexError] keyword[or] identifier[AttributeError] : keyword[try] : identifier[comperator] = identifier[float] ( identifier[self] . identifier[desc] [ identifier[i] ][ identifier[self] . identifier[descdict] [ literal[string] ]]) keyword[except] identifier[ValueError] : keyword[continue] keyword[if] identifier[operator] == literal[string] : keyword[if] identifier[comperator] > identifier[size] : identifier[indexing] . identifier[append] ( identifier[i] ) keyword[elif] identifier[operator] == literal[string] : keyword[if] identifier[comperator] < identifier[size] : identifier[indexing] . identifier[append] ( identifier[i] ) keyword[else] : keyword[continue] identifier[self] . identifier[_filter_desc] ( identifier[indexing] )
def filter_desc(self, graintype=None, group=None, reference=None, size=None, phase=None): """ This routine is to filter for description elements. You can check what is available in the description by running, >>> i.header_desc() where i is the instance you loaded. You can run the filter multiple times! You can filter for the following types: Parameters ---------- graintype : string or list Give graintypes as either 'M' for only mainstream or more than one ['M','Z']. group : integer or list Group of graintypes, important for oxides and silicates, since they are split into groups and not into types. Example 1, or give a list [1,3]. reference : string or list Give the reference you want to filter for, try an i.info() to pick the right name! You can select a single referennce as string or multiple references in as a list. size : string Filter for grain sizes, give '<5.0' or '>5.0' as a string for larger or smaller than a given grainsize in um. Only data with known grainsizes are chosen. Often grain sizes are given in a times b, where a and b are the minumum and maximum measurements from an image. If you give a >5.0 then grains with the smaller dimension >5um are taken into account. If you want <5.0 then grains with the upper dimension <5um are taken into account. """ # filter for graintype if graintype != None: indexing = [] # index file on which lines to pick if type(graintype) == str: graintype = [graintype] # depends on [control=['if'], data=[]] # filter for typ in graintype: for i in range(len(self.desc)): if self.desc[i][self.descdict['Type']] == typ: indexing.append(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['typ']] # filter: self._filter_desc(indexing) # depends on [control=['if'], data=['graintype']] # filter for graintype if phase != None: indexing = [] # index file on which lines to pick if type(phase) == str: phase = [phase] # depends on [control=['if'], data=[]] # filter for typ in phase: for i in range(len(self.desc)): if self.desc[i][self.descdict['Phase']] == typ: indexing.append(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['typ']] # filter: self._filter_desc(indexing) # depends on [control=['if'], data=['phase']] # filter for group (oxides and silicates) if group != None: indexing = [] # index file on which lines to pick if type(group) != list: group = [group] # depends on [control=['if'], data=[]] # filter for grp in group: for i in range(len(self.desc)): if self.desc[i][self.descdict['Group']] == str(int(grp)): indexing.append(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['grp']] # filter: self._filter_desc(indexing) # depends on [control=['if'], data=['group']] # filter for reference if reference != None: indexing = [] # index file on which lines to pick if type(reference) != list: reference = [reference] # depends on [control=['if'], data=[]] # filter for ri in range(len(reference)): for i in range(len(self.desc)): if self.desc[i][self.descdict['Reference']] == reference[ri]: indexing.append(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['ri']] # filter: self._filter_desc(indexing) # depends on [control=['if'], data=['reference']] # filter for grainzise if size != None: indexing = [] # index file on which lines to pick # filter operator = size[0:1] size = float(size[1:len(size)]) for i in range(len(self.desc)): if self.desc[i][self.descdict['Size (microns)']] != '': try: # print self.desc[i][self.descdict['Size (microns)']] comperator1 = self.desc[i][self.descdict['Size (microns)']].split('x')[0] comperator2 = self.desc[i][self.descdict['Size (microns)']].split('x')[1] comperator = [float(comperator1), float(comperator2)] if operator == '<': comperator = np.min(comperator) # depends on [control=['if'], data=[]] else: comperator = np.max(comperator) # depends on [control=['try'], data=[]] except IndexError or AttributeError: try: comperator = float(self.desc[i][self.descdict['Size (microns)']]) # depends on [control=['try'], data=[]] except ValueError: continue # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] if operator == '>': if comperator > size: indexing.append(i) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif operator == '<': if comperator < size: indexing.append(i) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: continue # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # filter: self._filter_desc(indexing) # depends on [control=['if'], data=['size']]
def create_definition(self, definition, project, definition_to_clone_id=None, definition_to_clone_revision=None): """CreateDefinition. Creates a new definition. :param :class:`<BuildDefinition> <azure.devops.v5_0.build.models.BuildDefinition>` definition: The definition. :param str project: Project ID or project name :param int definition_to_clone_id: :param int definition_to_clone_revision: :rtype: :class:`<BuildDefinition> <azure.devops.v5_0.build.models.BuildDefinition>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if definition_to_clone_id is not None: query_parameters['definitionToCloneId'] = self._serialize.query('definition_to_clone_id', definition_to_clone_id, 'int') if definition_to_clone_revision is not None: query_parameters['definitionToCloneRevision'] = self._serialize.query('definition_to_clone_revision', definition_to_clone_revision, 'int') content = self._serialize.body(definition, 'BuildDefinition') response = self._send(http_method='POST', location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6', version='5.0', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('BuildDefinition', response)
def function[create_definition, parameter[self, definition, project, definition_to_clone_id, definition_to_clone_revision]]: constant[CreateDefinition. Creates a new definition. :param :class:`<BuildDefinition> <azure.devops.v5_0.build.models.BuildDefinition>` definition: The definition. :param str project: Project ID or project name :param int definition_to_clone_id: :param int definition_to_clone_revision: :rtype: :class:`<BuildDefinition> <azure.devops.v5_0.build.models.BuildDefinition>` ] variable[route_values] assign[=] dictionary[[], []] if compare[name[project] is_not constant[None]] begin[:] call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[str]]] variable[query_parameters] assign[=] dictionary[[], []] if compare[name[definition_to_clone_id] is_not constant[None]] begin[:] call[name[query_parameters]][constant[definitionToCloneId]] assign[=] call[name[self]._serialize.query, parameter[constant[definition_to_clone_id], name[definition_to_clone_id], constant[int]]] if compare[name[definition_to_clone_revision] is_not constant[None]] begin[:] call[name[query_parameters]][constant[definitionToCloneRevision]] assign[=] call[name[self]._serialize.query, parameter[constant[definition_to_clone_revision], name[definition_to_clone_revision], constant[int]]] variable[content] assign[=] call[name[self]._serialize.body, parameter[name[definition], constant[BuildDefinition]]] variable[response] assign[=] call[name[self]._send, parameter[]] return[call[name[self]._deserialize, parameter[constant[BuildDefinition], name[response]]]]
keyword[def] identifier[create_definition] ( identifier[self] , identifier[definition] , identifier[project] , identifier[definition_to_clone_id] = keyword[None] , identifier[definition_to_clone_revision] = keyword[None] ): literal[string] identifier[route_values] ={} keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] : identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] ) identifier[query_parameters] ={} keyword[if] identifier[definition_to_clone_id] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[definition_to_clone_id] , literal[string] ) keyword[if] identifier[definition_to_clone_revision] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[definition_to_clone_revision] , literal[string] ) identifier[content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[definition] , literal[string] ) identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] , identifier[location_id] = literal[string] , identifier[version] = literal[string] , identifier[route_values] = identifier[route_values] , identifier[query_parameters] = identifier[query_parameters] , identifier[content] = identifier[content] ) keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] )
def create_definition(self, definition, project, definition_to_clone_id=None, definition_to_clone_revision=None): """CreateDefinition. Creates a new definition. :param :class:`<BuildDefinition> <azure.devops.v5_0.build.models.BuildDefinition>` definition: The definition. :param str project: Project ID or project name :param int definition_to_clone_id: :param int definition_to_clone_revision: :rtype: :class:`<BuildDefinition> <azure.devops.v5_0.build.models.BuildDefinition>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') # depends on [control=['if'], data=['project']] query_parameters = {} if definition_to_clone_id is not None: query_parameters['definitionToCloneId'] = self._serialize.query('definition_to_clone_id', definition_to_clone_id, 'int') # depends on [control=['if'], data=['definition_to_clone_id']] if definition_to_clone_revision is not None: query_parameters['definitionToCloneRevision'] = self._serialize.query('definition_to_clone_revision', definition_to_clone_revision, 'int') # depends on [control=['if'], data=['definition_to_clone_revision']] content = self._serialize.body(definition, 'BuildDefinition') response = self._send(http_method='POST', location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6', version='5.0', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('BuildDefinition', response)
def _get_dcd(self, alias): """ Get the Docker-Content-Digest header for an alias. :param alias: Alias name. :type alias: str :rtype: str :returns: DCD header for the alias. """ # https://docs.docker.com/registry/spec/api/#deleting-an-image # Note When deleting a manifest from a registry version 2.3 or later, # the following header must be used when HEAD or GET-ing the manifest # to obtain the correct digest to delete: # Accept: application/vnd.docker.distribution.manifest.v2+json return self._request( 'head', 'manifests/{}'.format(alias), headers={'Accept': _schema2_mimetype}, ).headers.get('Docker-Content-Digest')
def function[_get_dcd, parameter[self, alias]]: constant[ Get the Docker-Content-Digest header for an alias. :param alias: Alias name. :type alias: str :rtype: str :returns: DCD header for the alias. ] return[call[call[name[self]._request, parameter[constant[head], call[constant[manifests/{}].format, parameter[name[alias]]]]].headers.get, parameter[constant[Docker-Content-Digest]]]]
keyword[def] identifier[_get_dcd] ( identifier[self] , identifier[alias] ): literal[string] keyword[return] identifier[self] . identifier[_request] ( literal[string] , literal[string] . identifier[format] ( identifier[alias] ), identifier[headers] ={ literal[string] : identifier[_schema2_mimetype] }, ). identifier[headers] . identifier[get] ( literal[string] )
def _get_dcd(self, alias): """ Get the Docker-Content-Digest header for an alias. :param alias: Alias name. :type alias: str :rtype: str :returns: DCD header for the alias. """ # https://docs.docker.com/registry/spec/api/#deleting-an-image # Note When deleting a manifest from a registry version 2.3 or later, # the following header must be used when HEAD or GET-ing the manifest # to obtain the correct digest to delete: # Accept: application/vnd.docker.distribution.manifest.v2+json return self._request('head', 'manifests/{}'.format(alias), headers={'Accept': _schema2_mimetype}).headers.get('Docker-Content-Digest')
def parse_eep(self, rorg_func=None, rorg_type=None, direction=None, command=None): ''' Parse EEP based on FUNC and TYPE ''' # set EEP profile, if demanded if rorg_func is not None and rorg_type is not None: self.select_eep(rorg_func, rorg_type, direction, command) # parse data provides, values = self.eep.get_values(self._profile, self._bit_data, self._bit_status) self.parsed.update(values) return list(provides)
def function[parse_eep, parameter[self, rorg_func, rorg_type, direction, command]]: constant[ Parse EEP based on FUNC and TYPE ] if <ast.BoolOp object at 0x7da1b07ba7d0> begin[:] call[name[self].select_eep, parameter[name[rorg_func], name[rorg_type], name[direction], name[command]]] <ast.Tuple object at 0x7da1b07bafe0> assign[=] call[name[self].eep.get_values, parameter[name[self]._profile, name[self]._bit_data, name[self]._bit_status]] call[name[self].parsed.update, parameter[name[values]]] return[call[name[list], parameter[name[provides]]]]
keyword[def] identifier[parse_eep] ( identifier[self] , identifier[rorg_func] = keyword[None] , identifier[rorg_type] = keyword[None] , identifier[direction] = keyword[None] , identifier[command] = keyword[None] ): literal[string] keyword[if] identifier[rorg_func] keyword[is] keyword[not] keyword[None] keyword[and] identifier[rorg_type] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[select_eep] ( identifier[rorg_func] , identifier[rorg_type] , identifier[direction] , identifier[command] ) identifier[provides] , identifier[values] = identifier[self] . identifier[eep] . identifier[get_values] ( identifier[self] . identifier[_profile] , identifier[self] . identifier[_bit_data] , identifier[self] . identifier[_bit_status] ) identifier[self] . identifier[parsed] . identifier[update] ( identifier[values] ) keyword[return] identifier[list] ( identifier[provides] )
def parse_eep(self, rorg_func=None, rorg_type=None, direction=None, command=None): """ Parse EEP based on FUNC and TYPE """ # set EEP profile, if demanded if rorg_func is not None and rorg_type is not None: self.select_eep(rorg_func, rorg_type, direction, command) # depends on [control=['if'], data=[]] # parse data (provides, values) = self.eep.get_values(self._profile, self._bit_data, self._bit_status) self.parsed.update(values) return list(provides)
def step_note_that(context, remark): """ Used as generic step that provides an additional remark/hint and enhance the readability/understanding without performing any check. .. code-block:: gherkin Given that today is "April 1st" But note that "April 1st is Fools day (and beware)" """ log = getattr(context, "log", None) if log: log.info(u"NOTE: %s;" % remark)
def function[step_note_that, parameter[context, remark]]: constant[ Used as generic step that provides an additional remark/hint and enhance the readability/understanding without performing any check. .. code-block:: gherkin Given that today is "April 1st" But note that "April 1st is Fools day (and beware)" ] variable[log] assign[=] call[name[getattr], parameter[name[context], constant[log], constant[None]]] if name[log] begin[:] call[name[log].info, parameter[binary_operation[constant[NOTE: %s;] <ast.Mod object at 0x7da2590d6920> name[remark]]]]
keyword[def] identifier[step_note_that] ( identifier[context] , identifier[remark] ): literal[string] identifier[log] = identifier[getattr] ( identifier[context] , literal[string] , keyword[None] ) keyword[if] identifier[log] : identifier[log] . identifier[info] ( literal[string] % identifier[remark] )
def step_note_that(context, remark): """ Used as generic step that provides an additional remark/hint and enhance the readability/understanding without performing any check. .. code-block:: gherkin Given that today is "April 1st" But note that "April 1st is Fools day (and beware)" """ log = getattr(context, 'log', None) if log: log.info(u'NOTE: %s;' % remark) # depends on [control=['if'], data=[]]
def get_getter(self, kind, sid): """ :param kind: 'poe' or 'gmf' :param sid: a site ID :returns: a PmapGetter or GmfDataGetter """ hdf5cache = getattr(self, 'hdf5cache', None) if hdf5cache: dstore = hdf5cache elif (self.oqparam.hazard_calculation_id and 'gmf_data' not in self.datastore): # 'gmf_data' in self.datastore happens for ShakeMap calculations self.datastore.parent.close() # make sure it is closed dstore = self.datastore.parent else: dstore = self.datastore if kind == 'poe': # hcurves, shape (R, N) getter = getters.PmapGetter(dstore, self.rlzs_assoc, [sid]) else: # gmf getter = getters.GmfDataGetter(dstore, [sid], self.R) if dstore is self.datastore: getter.init() return getter
def function[get_getter, parameter[self, kind, sid]]: constant[ :param kind: 'poe' or 'gmf' :param sid: a site ID :returns: a PmapGetter or GmfDataGetter ] variable[hdf5cache] assign[=] call[name[getattr], parameter[name[self], constant[hdf5cache], constant[None]]] if name[hdf5cache] begin[:] variable[dstore] assign[=] name[hdf5cache] if compare[name[kind] equal[==] constant[poe]] begin[:] variable[getter] assign[=] call[name[getters].PmapGetter, parameter[name[dstore], name[self].rlzs_assoc, list[[<ast.Name object at 0x7da20c794670>]]]] if compare[name[dstore] is name[self].datastore] begin[:] call[name[getter].init, parameter[]] return[name[getter]]
keyword[def] identifier[get_getter] ( identifier[self] , identifier[kind] , identifier[sid] ): literal[string] identifier[hdf5cache] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ) keyword[if] identifier[hdf5cache] : identifier[dstore] = identifier[hdf5cache] keyword[elif] ( identifier[self] . identifier[oqparam] . identifier[hazard_calculation_id] keyword[and] literal[string] keyword[not] keyword[in] identifier[self] . identifier[datastore] ): identifier[self] . identifier[datastore] . identifier[parent] . identifier[close] () identifier[dstore] = identifier[self] . identifier[datastore] . identifier[parent] keyword[else] : identifier[dstore] = identifier[self] . identifier[datastore] keyword[if] identifier[kind] == literal[string] : identifier[getter] = identifier[getters] . identifier[PmapGetter] ( identifier[dstore] , identifier[self] . identifier[rlzs_assoc] ,[ identifier[sid] ]) keyword[else] : identifier[getter] = identifier[getters] . identifier[GmfDataGetter] ( identifier[dstore] ,[ identifier[sid] ], identifier[self] . identifier[R] ) keyword[if] identifier[dstore] keyword[is] identifier[self] . identifier[datastore] : identifier[getter] . identifier[init] () keyword[return] identifier[getter]
def get_getter(self, kind, sid): """ :param kind: 'poe' or 'gmf' :param sid: a site ID :returns: a PmapGetter or GmfDataGetter """ hdf5cache = getattr(self, 'hdf5cache', None) if hdf5cache: dstore = hdf5cache # depends on [control=['if'], data=[]] elif self.oqparam.hazard_calculation_id and 'gmf_data' not in self.datastore: # 'gmf_data' in self.datastore happens for ShakeMap calculations self.datastore.parent.close() # make sure it is closed dstore = self.datastore.parent # depends on [control=['if'], data=[]] else: dstore = self.datastore if kind == 'poe': # hcurves, shape (R, N) getter = getters.PmapGetter(dstore, self.rlzs_assoc, [sid]) # depends on [control=['if'], data=[]] else: # gmf getter = getters.GmfDataGetter(dstore, [sid], self.R) if dstore is self.datastore: getter.init() # depends on [control=['if'], data=[]] return getter
def set_scene_color(self, scene_id, color): """reconfigure a scene by scene ID""" if not scene_id in self.state.scenes: # does that scene_id exist? err_msg = "Requested to recolor scene {sceneNum}, which does not exist".format(sceneNum=scene_id) logging.info(err_msg) return(False, 0, err_msg) self.state.scenes[scene_id] = self.state.scenes[scene_id]._replace(color=color) sequence_number = self.zmq_publisher.publish_scene_color(scene_id, color) logging.debug("Recolored scene {sceneNum}".format(sceneNum=scene_id)) if scene_id == self.state.activeSceneId: self.state.activeAnimation.set_color(color) self._do_next_frame() # TODO: make it more sensible, e.g. call only if static scene return (True, sequence_number, "OK")
def function[set_scene_color, parameter[self, scene_id, color]]: constant[reconfigure a scene by scene ID] if <ast.UnaryOp object at 0x7da1b15b17b0> begin[:] variable[err_msg] assign[=] call[constant[Requested to recolor scene {sceneNum}, which does not exist].format, parameter[]] call[name[logging].info, parameter[name[err_msg]]] return[tuple[[<ast.Constant object at 0x7da1b15fb1f0>, <ast.Constant object at 0x7da1b15fbca0>, <ast.Name object at 0x7da1b15fb310>]]] call[name[self].state.scenes][name[scene_id]] assign[=] call[call[name[self].state.scenes][name[scene_id]]._replace, parameter[]] variable[sequence_number] assign[=] call[name[self].zmq_publisher.publish_scene_color, parameter[name[scene_id], name[color]]] call[name[logging].debug, parameter[call[constant[Recolored scene {sceneNum}].format, parameter[]]]] if compare[name[scene_id] equal[==] name[self].state.activeSceneId] begin[:] call[name[self].state.activeAnimation.set_color, parameter[name[color]]] call[name[self]._do_next_frame, parameter[]] return[tuple[[<ast.Constant object at 0x7da2041da110>, <ast.Name object at 0x7da2041d8b80>, <ast.Constant object at 0x7da2041d9630>]]]
keyword[def] identifier[set_scene_color] ( identifier[self] , identifier[scene_id] , identifier[color] ): literal[string] keyword[if] keyword[not] identifier[scene_id] keyword[in] identifier[self] . identifier[state] . identifier[scenes] : identifier[err_msg] = literal[string] . identifier[format] ( identifier[sceneNum] = identifier[scene_id] ) identifier[logging] . identifier[info] ( identifier[err_msg] ) keyword[return] ( keyword[False] , literal[int] , identifier[err_msg] ) identifier[self] . identifier[state] . identifier[scenes] [ identifier[scene_id] ]= identifier[self] . identifier[state] . identifier[scenes] [ identifier[scene_id] ]. identifier[_replace] ( identifier[color] = identifier[color] ) identifier[sequence_number] = identifier[self] . identifier[zmq_publisher] . identifier[publish_scene_color] ( identifier[scene_id] , identifier[color] ) identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[sceneNum] = identifier[scene_id] )) keyword[if] identifier[scene_id] == identifier[self] . identifier[state] . identifier[activeSceneId] : identifier[self] . identifier[state] . identifier[activeAnimation] . identifier[set_color] ( identifier[color] ) identifier[self] . identifier[_do_next_frame] () keyword[return] ( keyword[True] , identifier[sequence_number] , literal[string] )
def set_scene_color(self, scene_id, color): """reconfigure a scene by scene ID""" if not scene_id in self.state.scenes: # does that scene_id exist? err_msg = 'Requested to recolor scene {sceneNum}, which does not exist'.format(sceneNum=scene_id) logging.info(err_msg) return (False, 0, err_msg) # depends on [control=['if'], data=[]] self.state.scenes[scene_id] = self.state.scenes[scene_id]._replace(color=color) sequence_number = self.zmq_publisher.publish_scene_color(scene_id, color) logging.debug('Recolored scene {sceneNum}'.format(sceneNum=scene_id)) if scene_id == self.state.activeSceneId: self.state.activeAnimation.set_color(color) self._do_next_frame() # TODO: make it more sensible, e.g. call only if static scene # depends on [control=['if'], data=[]] return (True, sequence_number, 'OK')
def set_to_numa_nodemask(mask): """ Conver Python set to NUMA nodemask. """ result = nodemask_t() __nodemask_zero(result) for i in range(0, get_max_node() + 1): if i in mask: __nodemask_set(result, i) return result
def function[set_to_numa_nodemask, parameter[mask]]: constant[ Conver Python set to NUMA nodemask. ] variable[result] assign[=] call[name[nodemask_t], parameter[]] call[name[__nodemask_zero], parameter[name[result]]] for taget[name[i]] in starred[call[name[range], parameter[constant[0], binary_operation[call[name[get_max_node], parameter[]] + constant[1]]]]] begin[:] if compare[name[i] in name[mask]] begin[:] call[name[__nodemask_set], parameter[name[result], name[i]]] return[name[result]]
keyword[def] identifier[set_to_numa_nodemask] ( identifier[mask] ): literal[string] identifier[result] = identifier[nodemask_t] () identifier[__nodemask_zero] ( identifier[result] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[get_max_node] ()+ literal[int] ): keyword[if] identifier[i] keyword[in] identifier[mask] : identifier[__nodemask_set] ( identifier[result] , identifier[i] ) keyword[return] identifier[result]
def set_to_numa_nodemask(mask): """ Conver Python set to NUMA nodemask. """ result = nodemask_t() __nodemask_zero(result) for i in range(0, get_max_node() + 1): if i in mask: __nodemask_set(result, i) # depends on [control=['if'], data=['i']] # depends on [control=['for'], data=['i']] return result
def memoized_concentric_hexagons(radius): """A memoized wrapper around :py:func:`rig.geometry.concentric_hexagons` which memoizes the coordinates and stores them as a tuple. Note that the caller must manually offset the coordinates as required. This wrapper is used to avoid the need to repeatedly call :py:func:`rig.geometry.concentric_hexagons` for every sink in a network. This results in a relatively minor speedup (but at equally minor cost) in large networks. """ out = _concentric_hexagons.get(radius) if out is None: out = tuple(concentric_hexagons(radius)) _concentric_hexagons[radius] = out return out
def function[memoized_concentric_hexagons, parameter[radius]]: constant[A memoized wrapper around :py:func:`rig.geometry.concentric_hexagons` which memoizes the coordinates and stores them as a tuple. Note that the caller must manually offset the coordinates as required. This wrapper is used to avoid the need to repeatedly call :py:func:`rig.geometry.concentric_hexagons` for every sink in a network. This results in a relatively minor speedup (but at equally minor cost) in large networks. ] variable[out] assign[=] call[name[_concentric_hexagons].get, parameter[name[radius]]] if compare[name[out] is constant[None]] begin[:] variable[out] assign[=] call[name[tuple], parameter[call[name[concentric_hexagons], parameter[name[radius]]]]] call[name[_concentric_hexagons]][name[radius]] assign[=] name[out] return[name[out]]
keyword[def] identifier[memoized_concentric_hexagons] ( identifier[radius] ): literal[string] identifier[out] = identifier[_concentric_hexagons] . identifier[get] ( identifier[radius] ) keyword[if] identifier[out] keyword[is] keyword[None] : identifier[out] = identifier[tuple] ( identifier[concentric_hexagons] ( identifier[radius] )) identifier[_concentric_hexagons] [ identifier[radius] ]= identifier[out] keyword[return] identifier[out]
def memoized_concentric_hexagons(radius): """A memoized wrapper around :py:func:`rig.geometry.concentric_hexagons` which memoizes the coordinates and stores them as a tuple. Note that the caller must manually offset the coordinates as required. This wrapper is used to avoid the need to repeatedly call :py:func:`rig.geometry.concentric_hexagons` for every sink in a network. This results in a relatively minor speedup (but at equally minor cost) in large networks. """ out = _concentric_hexagons.get(radius) if out is None: out = tuple(concentric_hexagons(radius)) _concentric_hexagons[radius] = out # depends on [control=['if'], data=['out']] return out
def main(): """Entry point to dcgan""" print("|------- new changes!!!!!!!!!") # to get the dataset and net configuration train_data, val_data = get_dataset(dataset) netG = get_netG() netD = get_netD() loss, trainerG, trainerD = get_configurations(netG, netD) # set labels real_label = mx.nd.ones((opt.batch_size,), ctx=ctx) fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx) metric = mx.metric.Accuracy() print('Training... ') stamp = datetime.now().strftime('%Y_%m_%d-%H_%M') iter = 0 # to metric the network loss_d = [] loss_g = [] inception_score = [] for epoch in range(opt.nepoch): tic = time.time() btic = time.time() for data, _ in train_data: ############################ # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) ########################### # train with real_t data = data.as_in_context(ctx) noise = mx.nd.random.normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx) with autograd.record(): output = netD(data) # reshape output from (opt.batch_size, 2, 1, 1) to (opt.batch_size, 2) output = output.reshape((opt.batch_size, 2)) errD_real = loss(output, real_label) metric.update([real_label, ], [output, ]) with autograd.record(): fake = netG(noise) output = netD(fake.detach()) output = output.reshape((opt.batch_size, 2)) errD_fake = loss(output, fake_label) errD = errD_real + errD_fake errD.backward() metric.update([fake_label,], [output,]) trainerD.step(opt.batch_size) ############################ # (2) Update G network: maximize log(D(G(z))) ########################### with autograd.record(): output = netD(fake) output = output.reshape((-1, 2)) errG = loss(output, real_label) errG.backward() trainerG.step(opt.batch_size) name, acc = metric.get() logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d' , mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch) if iter % niter == 0: visual('gout', fake.asnumpy(), name=os.path.join(outf, 'fake_img_iter_%d.png' % iter)) visual('data', data.asnumpy(), name=os.path.join(outf, 'real_img_iter_%d.png' % iter)) # record the metric data loss_d.append(errD) loss_g.append(errG) if opt.inception_score: score, _ = get_inception_score(fake) inception_score.append(score) iter = iter + 1 btic = time.time() name, acc = metric.get() metric.reset() logging.info('\nbinary training acc at epoch %d: %s=%f', epoch, name, acc) logging.info('time: %f', time.time() - tic) # save check_point if check_point: netG.save_parameters(os.path.join(outf, 'generator_epoch_%d.params' %epoch)) netD.save_parameters(os.path.join(outf, 'discriminator_epoch_%d.params' % epoch)) # save parameter netG.save_parameters(os.path.join(outf, 'generator.params')) netD.save_parameters(os.path.join(outf, 'discriminator.params')) # visualization the inception_score as a picture if opt.inception_score: ins_save(inception_score)
def function[main, parameter[]]: constant[Entry point to dcgan] call[name[print], parameter[constant[|------- new changes!!!!!!!!!]]] <ast.Tuple object at 0x7da1b1fbb940> assign[=] call[name[get_dataset], parameter[name[dataset]]] variable[netG] assign[=] call[name[get_netG], parameter[]] variable[netD] assign[=] call[name[get_netD], parameter[]] <ast.Tuple object at 0x7da1b1fbbc70> assign[=] call[name[get_configurations], parameter[name[netG], name[netD]]] variable[real_label] assign[=] call[name[mx].nd.ones, parameter[tuple[[<ast.Attribute object at 0x7da1b1fb8a00>]]]] variable[fake_label] assign[=] call[name[mx].nd.zeros, parameter[tuple[[<ast.Attribute object at 0x7da1b1fb80a0>]]]] variable[metric] assign[=] call[name[mx].metric.Accuracy, parameter[]] call[name[print], parameter[constant[Training... ]]] variable[stamp] assign[=] call[call[name[datetime].now, parameter[]].strftime, parameter[constant[%Y_%m_%d-%H_%M]]] variable[iter] assign[=] constant[0] variable[loss_d] assign[=] list[[]] variable[loss_g] assign[=] list[[]] variable[inception_score] assign[=] list[[]] for taget[name[epoch]] in starred[call[name[range], parameter[name[opt].nepoch]]] begin[:] variable[tic] assign[=] call[name[time].time, parameter[]] variable[btic] assign[=] call[name[time].time, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b1fb81f0>, <ast.Name object at 0x7da1b1fb8220>]]] in starred[name[train_data]] begin[:] variable[data] assign[=] call[name[data].as_in_context, parameter[name[ctx]]] variable[noise] assign[=] call[name[mx].nd.random.normal, parameter[constant[0], constant[1]]] with call[name[autograd].record, parameter[]] begin[:] variable[output] assign[=] call[name[netD], parameter[name[data]]] variable[output] assign[=] call[name[output].reshape, parameter[tuple[[<ast.Attribute object at 0x7da1b1f75d50>, <ast.Constant object at 0x7da1b1f74460>]]]] variable[errD_real] assign[=] call[name[loss], parameter[name[output], name[real_label]]] call[name[metric].update, parameter[list[[<ast.Name object at 0x7da1b1f76740>]], list[[<ast.Name object at 0x7da1b1f74d60>]]]] with call[name[autograd].record, parameter[]] begin[:] variable[fake] assign[=] call[name[netG], parameter[name[noise]]] variable[output] assign[=] call[name[netD], parameter[call[name[fake].detach, parameter[]]]] variable[output] assign[=] call[name[output].reshape, parameter[tuple[[<ast.Attribute object at 0x7da1b1f74f10>, <ast.Constant object at 0x7da1b1f75180>]]]] variable[errD_fake] assign[=] call[name[loss], parameter[name[output], name[fake_label]]] variable[errD] assign[=] binary_operation[name[errD_real] + name[errD_fake]] call[name[errD].backward, parameter[]] call[name[metric].update, parameter[list[[<ast.Name object at 0x7da1b1f75990>]], list[[<ast.Name object at 0x7da1b1f74b80>]]]] call[name[trainerD].step, parameter[name[opt].batch_size]] with call[name[autograd].record, parameter[]] begin[:] variable[output] assign[=] call[name[netD], parameter[name[fake]]] variable[output] assign[=] call[name[output].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b1f749d0>, <ast.Constant object at 0x7da1b1f74ca0>]]]] variable[errG] assign[=] call[name[loss], parameter[name[output], name[real_label]]] call[name[errG].backward, parameter[]] call[name[trainerG].step, parameter[name[opt].batch_size]] <ast.Tuple object at 0x7da1b1f77100> assign[=] call[name[metric].get, parameter[]] call[name[logging].info, parameter[constant[discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d], call[call[name[mx].nd.mean, parameter[name[errD]]].asscalar, parameter[]], call[call[name[mx].nd.mean, parameter[name[errG]]].asscalar, parameter[]], name[acc], name[iter], name[epoch]]] if compare[binary_operation[name[iter] <ast.Mod object at 0x7da2590d6920> name[niter]] equal[==] constant[0]] begin[:] call[name[visual], parameter[constant[gout], call[name[fake].asnumpy, parameter[]]]] call[name[visual], parameter[constant[data], call[name[data].asnumpy, parameter[]]]] call[name[loss_d].append, parameter[name[errD]]] call[name[loss_g].append, parameter[name[errG]]] if name[opt].inception_score begin[:] <ast.Tuple object at 0x7da1b1f77cd0> assign[=] call[name[get_inception_score], parameter[name[fake]]] call[name[inception_score].append, parameter[name[score]]] variable[iter] assign[=] binary_operation[name[iter] + constant[1]] variable[btic] assign[=] call[name[time].time, parameter[]] <ast.Tuple object at 0x7da1b1f76cb0> assign[=] call[name[metric].get, parameter[]] call[name[metric].reset, parameter[]] call[name[logging].info, parameter[constant[ binary training acc at epoch %d: %s=%f], name[epoch], name[name], name[acc]]] call[name[logging].info, parameter[constant[time: %f], binary_operation[call[name[time].time, parameter[]] - name[tic]]]] if name[check_point] begin[:] call[name[netG].save_parameters, parameter[call[name[os].path.join, parameter[name[outf], binary_operation[constant[generator_epoch_%d.params] <ast.Mod object at 0x7da2590d6920> name[epoch]]]]]] call[name[netD].save_parameters, parameter[call[name[os].path.join, parameter[name[outf], binary_operation[constant[discriminator_epoch_%d.params] <ast.Mod object at 0x7da2590d6920> name[epoch]]]]]] call[name[netG].save_parameters, parameter[call[name[os].path.join, parameter[name[outf], constant[generator.params]]]]] call[name[netD].save_parameters, parameter[call[name[os].path.join, parameter[name[outf], constant[discriminator.params]]]]] if name[opt].inception_score begin[:] call[name[ins_save], parameter[name[inception_score]]]
keyword[def] identifier[main] (): literal[string] identifier[print] ( literal[string] ) identifier[train_data] , identifier[val_data] = identifier[get_dataset] ( identifier[dataset] ) identifier[netG] = identifier[get_netG] () identifier[netD] = identifier[get_netD] () identifier[loss] , identifier[trainerG] , identifier[trainerD] = identifier[get_configurations] ( identifier[netG] , identifier[netD] ) identifier[real_label] = identifier[mx] . identifier[nd] . identifier[ones] (( identifier[opt] . identifier[batch_size] ,), identifier[ctx] = identifier[ctx] ) identifier[fake_label] = identifier[mx] . identifier[nd] . identifier[zeros] (( identifier[opt] . identifier[batch_size] ,), identifier[ctx] = identifier[ctx] ) identifier[metric] = identifier[mx] . identifier[metric] . identifier[Accuracy] () identifier[print] ( literal[string] ) identifier[stamp] = identifier[datetime] . identifier[now] (). identifier[strftime] ( literal[string] ) identifier[iter] = literal[int] identifier[loss_d] =[] identifier[loss_g] =[] identifier[inception_score] =[] keyword[for] identifier[epoch] keyword[in] identifier[range] ( identifier[opt] . identifier[nepoch] ): identifier[tic] = identifier[time] . identifier[time] () identifier[btic] = identifier[time] . identifier[time] () keyword[for] identifier[data] , identifier[_] keyword[in] identifier[train_data] : identifier[data] = identifier[data] . identifier[as_in_context] ( identifier[ctx] ) identifier[noise] = identifier[mx] . identifier[nd] . identifier[random] . identifier[normal] ( literal[int] , literal[int] , identifier[shape] =( identifier[opt] . identifier[batch_size] , identifier[nz] , literal[int] , literal[int] ), identifier[ctx] = identifier[ctx] ) keyword[with] identifier[autograd] . identifier[record] (): identifier[output] = identifier[netD] ( identifier[data] ) identifier[output] = identifier[output] . identifier[reshape] (( identifier[opt] . identifier[batch_size] , literal[int] )) identifier[errD_real] = identifier[loss] ( identifier[output] , identifier[real_label] ) identifier[metric] . identifier[update] ([ identifier[real_label] ,],[ identifier[output] ,]) keyword[with] identifier[autograd] . identifier[record] (): identifier[fake] = identifier[netG] ( identifier[noise] ) identifier[output] = identifier[netD] ( identifier[fake] . identifier[detach] ()) identifier[output] = identifier[output] . identifier[reshape] (( identifier[opt] . identifier[batch_size] , literal[int] )) identifier[errD_fake] = identifier[loss] ( identifier[output] , identifier[fake_label] ) identifier[errD] = identifier[errD_real] + identifier[errD_fake] identifier[errD] . identifier[backward] () identifier[metric] . identifier[update] ([ identifier[fake_label] ,],[ identifier[output] ,]) identifier[trainerD] . identifier[step] ( identifier[opt] . identifier[batch_size] ) keyword[with] identifier[autograd] . identifier[record] (): identifier[output] = identifier[netD] ( identifier[fake] ) identifier[output] = identifier[output] . identifier[reshape] ((- literal[int] , literal[int] )) identifier[errG] = identifier[loss] ( identifier[output] , identifier[real_label] ) identifier[errG] . identifier[backward] () identifier[trainerG] . identifier[step] ( identifier[opt] . identifier[batch_size] ) identifier[name] , identifier[acc] = identifier[metric] . identifier[get] () identifier[logging] . identifier[info] ( literal[string] , identifier[mx] . identifier[nd] . identifier[mean] ( identifier[errD] ). identifier[asscalar] (), identifier[mx] . identifier[nd] . identifier[mean] ( identifier[errG] ). identifier[asscalar] (), identifier[acc] , identifier[iter] , identifier[epoch] ) keyword[if] identifier[iter] % identifier[niter] == literal[int] : identifier[visual] ( literal[string] , identifier[fake] . identifier[asnumpy] (), identifier[name] = identifier[os] . identifier[path] . identifier[join] ( identifier[outf] , literal[string] % identifier[iter] )) identifier[visual] ( literal[string] , identifier[data] . identifier[asnumpy] (), identifier[name] = identifier[os] . identifier[path] . identifier[join] ( identifier[outf] , literal[string] % identifier[iter] )) identifier[loss_d] . identifier[append] ( identifier[errD] ) identifier[loss_g] . identifier[append] ( identifier[errG] ) keyword[if] identifier[opt] . identifier[inception_score] : identifier[score] , identifier[_] = identifier[get_inception_score] ( identifier[fake] ) identifier[inception_score] . identifier[append] ( identifier[score] ) identifier[iter] = identifier[iter] + literal[int] identifier[btic] = identifier[time] . identifier[time] () identifier[name] , identifier[acc] = identifier[metric] . identifier[get] () identifier[metric] . identifier[reset] () identifier[logging] . identifier[info] ( literal[string] , identifier[epoch] , identifier[name] , identifier[acc] ) identifier[logging] . identifier[info] ( literal[string] , identifier[time] . identifier[time] ()- identifier[tic] ) keyword[if] identifier[check_point] : identifier[netG] . identifier[save_parameters] ( identifier[os] . identifier[path] . identifier[join] ( identifier[outf] , literal[string] % identifier[epoch] )) identifier[netD] . identifier[save_parameters] ( identifier[os] . identifier[path] . identifier[join] ( identifier[outf] , literal[string] % identifier[epoch] )) identifier[netG] . identifier[save_parameters] ( identifier[os] . identifier[path] . identifier[join] ( identifier[outf] , literal[string] )) identifier[netD] . identifier[save_parameters] ( identifier[os] . identifier[path] . identifier[join] ( identifier[outf] , literal[string] )) keyword[if] identifier[opt] . identifier[inception_score] : identifier[ins_save] ( identifier[inception_score] )
def main(): """Entry point to dcgan""" print('|------- new changes!!!!!!!!!') # to get the dataset and net configuration (train_data, val_data) = get_dataset(dataset) netG = get_netG() netD = get_netD() (loss, trainerG, trainerD) = get_configurations(netG, netD) # set labels real_label = mx.nd.ones((opt.batch_size,), ctx=ctx) fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx) metric = mx.metric.Accuracy() print('Training... ') stamp = datetime.now().strftime('%Y_%m_%d-%H_%M') iter = 0 # to metric the network loss_d = [] loss_g = [] inception_score = [] for epoch in range(opt.nepoch): tic = time.time() btic = time.time() for (data, _) in train_data: ############################ # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) ########################### # train with real_t data = data.as_in_context(ctx) noise = mx.nd.random.normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx) with autograd.record(): output = netD(data) # reshape output from (opt.batch_size, 2, 1, 1) to (opt.batch_size, 2) output = output.reshape((opt.batch_size, 2)) errD_real = loss(output, real_label) # depends on [control=['with'], data=[]] metric.update([real_label], [output]) with autograd.record(): fake = netG(noise) output = netD(fake.detach()) output = output.reshape((opt.batch_size, 2)) errD_fake = loss(output, fake_label) errD = errD_real + errD_fake # depends on [control=['with'], data=[]] errD.backward() metric.update([fake_label], [output]) trainerD.step(opt.batch_size) ############################ # (2) Update G network: maximize log(D(G(z))) ########################### with autograd.record(): output = netD(fake) output = output.reshape((-1, 2)) errG = loss(output, real_label) # depends on [control=['with'], data=[]] errG.backward() trainerG.step(opt.batch_size) (name, acc) = metric.get() logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d', mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch) if iter % niter == 0: visual('gout', fake.asnumpy(), name=os.path.join(outf, 'fake_img_iter_%d.png' % iter)) visual('data', data.asnumpy(), name=os.path.join(outf, 'real_img_iter_%d.png' % iter)) # record the metric data loss_d.append(errD) loss_g.append(errG) if opt.inception_score: (score, _) = get_inception_score(fake) inception_score.append(score) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] iter = iter + 1 btic = time.time() # depends on [control=['for'], data=[]] (name, acc) = metric.get() metric.reset() logging.info('\nbinary training acc at epoch %d: %s=%f', epoch, name, acc) logging.info('time: %f', time.time() - tic) # save check_point if check_point: netG.save_parameters(os.path.join(outf, 'generator_epoch_%d.params' % epoch)) netD.save_parameters(os.path.join(outf, 'discriminator_epoch_%d.params' % epoch)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['epoch']] # save parameter netG.save_parameters(os.path.join(outf, 'generator.params')) netD.save_parameters(os.path.join(outf, 'discriminator.params')) # visualization the inception_score as a picture if opt.inception_score: ins_save(inception_score) # depends on [control=['if'], data=[]]
def _text_image(page): """ returns text image URL """ img = None alt = page.data.get('label') or page.data.get('title') source = _image(page) if source: img = "![%s](%s)" % (alt, source) return img
def function[_text_image, parameter[page]]: constant[ returns text image URL ] variable[img] assign[=] constant[None] variable[alt] assign[=] <ast.BoolOp object at 0x7da1b12a8f10> variable[source] assign[=] call[name[_image], parameter[name[page]]] if name[source] begin[:] variable[img] assign[=] binary_operation[constant[![%s](%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b138eb30>, <ast.Name object at 0x7da1b138d300>]]] return[name[img]]
keyword[def] identifier[_text_image] ( identifier[page] ): literal[string] identifier[img] = keyword[None] identifier[alt] = identifier[page] . identifier[data] . identifier[get] ( literal[string] ) keyword[or] identifier[page] . identifier[data] . identifier[get] ( literal[string] ) identifier[source] = identifier[_image] ( identifier[page] ) keyword[if] identifier[source] : identifier[img] = literal[string] %( identifier[alt] , identifier[source] ) keyword[return] identifier[img]
def _text_image(page): """ returns text image URL """ img = None alt = page.data.get('label') or page.data.get('title') source = _image(page) if source: img = '![%s](%s)' % (alt, source) # depends on [control=['if'], data=[]] return img
def input_to_fastq( self, input_file, sample_name, paired_end, fastq_folder, output_file=None, multiclass=False): """ Builds a command to convert input file to fastq, for various inputs. Takes either .bam, .fastq.gz, or .fastq input and returns commands that will create the .fastq file, regardless of input type. This is useful to made your pipeline easily accept any of these input types seamlessly, standardizing you to the fastq which is still the most common format for adapter trimmers, etc. It will place the output fastq file in given `fastq_folder`. :param str input_file: filename of input you want to convert to fastq :return str: A command (to be run with PipelineManager) that will ensure your fastq file exists. """ fastq_prefix = os.path.join(fastq_folder, sample_name) self.make_sure_path_exists(fastq_folder) # this expects a list; if it gets a string, convert it to a list. if type(input_file) != list: input_file = [input_file] if len(input_file) > 1: cmd = [] output_file = [] for in_i, in_arg in enumerate(input_file): output = fastq_prefix + "_R" + str(in_i + 1) + ".fastq" result_cmd, uf, result_file = \ self.input_to_fastq(in_arg, sample_name, paired_end, fastq_folder, output, multiclass=True) cmd.append(result_cmd) output_file.append(result_file) else: # There was only 1 input class. # Convert back into a string input_file = input_file[0] if not output_file: output_file = fastq_prefix + "_R1.fastq" input_ext = self.get_input_ext(input_file) if input_ext == ".bam": print("Found .bam file") #cmd = self.bam_to_fastq(input_file, fastq_prefix, paired_end) cmd, fq1, fq2 = self.bam_to_fastq_awk(input_file, fastq_prefix, paired_end) # pm.run(cmd, output_file, follow=check_fastq) elif input_ext == ".fastq.gz": print("Found .fastq.gz file") if paired_end and not multiclass: # For paired-end reads in one fastq file, we must split the file into 2. script_path = os.path.join( self.tools.scripts_dir, "fastq_split.py") cmd = self.tools.python + " -u " + script_path cmd += " -i " + input_file cmd += " -o " + fastq_prefix # Must also return the set of output files output_file = [fastq_prefix + "_R1.fastq", fastq_prefix + "_R2.fastq"] else: # For single-end reads, we just unzip the fastq.gz file. # or, paired-end reads that were already split. cmd = self.ziptool + " -d -c " + input_file + " > " + output_file # a non-shell version # cmd1 = "gunzip --force " + input_file # cmd2 = "mv " + os.path.splitext(input_file)[0] + " " + output_file # cmd = [cmd1, cmd2] elif input_ext == ".fastq": cmd = "ln -sf " + input_file + " " + output_file print("Found .fastq file; no conversion necessary") return [cmd, fastq_prefix, output_file]
def function[input_to_fastq, parameter[self, input_file, sample_name, paired_end, fastq_folder, output_file, multiclass]]: constant[ Builds a command to convert input file to fastq, for various inputs. Takes either .bam, .fastq.gz, or .fastq input and returns commands that will create the .fastq file, regardless of input type. This is useful to made your pipeline easily accept any of these input types seamlessly, standardizing you to the fastq which is still the most common format for adapter trimmers, etc. It will place the output fastq file in given `fastq_folder`. :param str input_file: filename of input you want to convert to fastq :return str: A command (to be run with PipelineManager) that will ensure your fastq file exists. ] variable[fastq_prefix] assign[=] call[name[os].path.join, parameter[name[fastq_folder], name[sample_name]]] call[name[self].make_sure_path_exists, parameter[name[fastq_folder]]] if compare[call[name[type], parameter[name[input_file]]] not_equal[!=] name[list]] begin[:] variable[input_file] assign[=] list[[<ast.Name object at 0x7da1b032e800>]] if compare[call[name[len], parameter[name[input_file]]] greater[>] constant[1]] begin[:] variable[cmd] assign[=] list[[]] variable[output_file] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b0309090>, <ast.Name object at 0x7da1b030a830>]]] in starred[call[name[enumerate], parameter[name[input_file]]]] begin[:] variable[output] assign[=] binary_operation[binary_operation[binary_operation[name[fastq_prefix] + constant[_R]] + call[name[str], parameter[binary_operation[name[in_i] + constant[1]]]]] + constant[.fastq]] <ast.Tuple object at 0x7da1b030bc40> assign[=] call[name[self].input_to_fastq, parameter[name[in_arg], name[sample_name], name[paired_end], name[fastq_folder], name[output]]] call[name[cmd].append, parameter[name[result_cmd]]] call[name[output_file].append, parameter[name[result_file]]] return[list[[<ast.Name object at 0x7da20c990d30>, <ast.Name object at 0x7da20c990250>, <ast.Name object at 0x7da20c991540>]]]
keyword[def] identifier[input_to_fastq] ( identifier[self] , identifier[input_file] , identifier[sample_name] , identifier[paired_end] , identifier[fastq_folder] , identifier[output_file] = keyword[None] , identifier[multiclass] = keyword[False] ): literal[string] identifier[fastq_prefix] = identifier[os] . identifier[path] . identifier[join] ( identifier[fastq_folder] , identifier[sample_name] ) identifier[self] . identifier[make_sure_path_exists] ( identifier[fastq_folder] ) keyword[if] identifier[type] ( identifier[input_file] )!= identifier[list] : identifier[input_file] =[ identifier[input_file] ] keyword[if] identifier[len] ( identifier[input_file] )> literal[int] : identifier[cmd] =[] identifier[output_file] =[] keyword[for] identifier[in_i] , identifier[in_arg] keyword[in] identifier[enumerate] ( identifier[input_file] ): identifier[output] = identifier[fastq_prefix] + literal[string] + identifier[str] ( identifier[in_i] + literal[int] )+ literal[string] identifier[result_cmd] , identifier[uf] , identifier[result_file] = identifier[self] . identifier[input_to_fastq] ( identifier[in_arg] , identifier[sample_name] , identifier[paired_end] , identifier[fastq_folder] , identifier[output] , identifier[multiclass] = keyword[True] ) identifier[cmd] . identifier[append] ( identifier[result_cmd] ) identifier[output_file] . identifier[append] ( identifier[result_file] ) keyword[else] : identifier[input_file] = identifier[input_file] [ literal[int] ] keyword[if] keyword[not] identifier[output_file] : identifier[output_file] = identifier[fastq_prefix] + literal[string] identifier[input_ext] = identifier[self] . identifier[get_input_ext] ( identifier[input_file] ) keyword[if] identifier[input_ext] == literal[string] : identifier[print] ( literal[string] ) identifier[cmd] , identifier[fq1] , identifier[fq2] = identifier[self] . identifier[bam_to_fastq_awk] ( identifier[input_file] , identifier[fastq_prefix] , identifier[paired_end] ) keyword[elif] identifier[input_ext] == literal[string] : identifier[print] ( literal[string] ) keyword[if] identifier[paired_end] keyword[and] keyword[not] identifier[multiclass] : identifier[script_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[tools] . identifier[scripts_dir] , literal[string] ) identifier[cmd] = identifier[self] . identifier[tools] . identifier[python] + literal[string] + identifier[script_path] identifier[cmd] += literal[string] + identifier[input_file] identifier[cmd] += literal[string] + identifier[fastq_prefix] identifier[output_file] =[ identifier[fastq_prefix] + literal[string] , identifier[fastq_prefix] + literal[string] ] keyword[else] : identifier[cmd] = identifier[self] . identifier[ziptool] + literal[string] + identifier[input_file] + literal[string] + identifier[output_file] keyword[elif] identifier[input_ext] == literal[string] : identifier[cmd] = literal[string] + identifier[input_file] + literal[string] + identifier[output_file] identifier[print] ( literal[string] ) keyword[return] [ identifier[cmd] , identifier[fastq_prefix] , identifier[output_file] ]
def input_to_fastq(self, input_file, sample_name, paired_end, fastq_folder, output_file=None, multiclass=False): """ Builds a command to convert input file to fastq, for various inputs. Takes either .bam, .fastq.gz, or .fastq input and returns commands that will create the .fastq file, regardless of input type. This is useful to made your pipeline easily accept any of these input types seamlessly, standardizing you to the fastq which is still the most common format for adapter trimmers, etc. It will place the output fastq file in given `fastq_folder`. :param str input_file: filename of input you want to convert to fastq :return str: A command (to be run with PipelineManager) that will ensure your fastq file exists. """ fastq_prefix = os.path.join(fastq_folder, sample_name) self.make_sure_path_exists(fastq_folder) # this expects a list; if it gets a string, convert it to a list. if type(input_file) != list: input_file = [input_file] # depends on [control=['if'], data=[]] if len(input_file) > 1: cmd = [] output_file = [] for (in_i, in_arg) in enumerate(input_file): output = fastq_prefix + '_R' + str(in_i + 1) + '.fastq' (result_cmd, uf, result_file) = self.input_to_fastq(in_arg, sample_name, paired_end, fastq_folder, output, multiclass=True) cmd.append(result_cmd) output_file.append(result_file) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: # There was only 1 input class. # Convert back into a string input_file = input_file[0] if not output_file: output_file = fastq_prefix + '_R1.fastq' # depends on [control=['if'], data=[]] input_ext = self.get_input_ext(input_file) if input_ext == '.bam': print('Found .bam file') #cmd = self.bam_to_fastq(input_file, fastq_prefix, paired_end) (cmd, fq1, fq2) = self.bam_to_fastq_awk(input_file, fastq_prefix, paired_end) # depends on [control=['if'], data=[]] # pm.run(cmd, output_file, follow=check_fastq) elif input_ext == '.fastq.gz': print('Found .fastq.gz file') if paired_end and (not multiclass): # For paired-end reads in one fastq file, we must split the file into 2. script_path = os.path.join(self.tools.scripts_dir, 'fastq_split.py') cmd = self.tools.python + ' -u ' + script_path cmd += ' -i ' + input_file cmd += ' -o ' + fastq_prefix # Must also return the set of output files output_file = [fastq_prefix + '_R1.fastq', fastq_prefix + '_R2.fastq'] # depends on [control=['if'], data=[]] else: # For single-end reads, we just unzip the fastq.gz file. # or, paired-end reads that were already split. cmd = self.ziptool + ' -d -c ' + input_file + ' > ' + output_file # depends on [control=['if'], data=[]] # a non-shell version # cmd1 = "gunzip --force " + input_file # cmd2 = "mv " + os.path.splitext(input_file)[0] + " " + output_file # cmd = [cmd1, cmd2] elif input_ext == '.fastq': cmd = 'ln -sf ' + input_file + ' ' + output_file print('Found .fastq file; no conversion necessary') # depends on [control=['if'], data=[]] return [cmd, fastq_prefix, output_file]
def traceroute(self, destination, source=C.TRACEROUTE_SOURCE, ttl=C.TRACEROUTE_TTL, timeout=C.TRACEROUTE_TIMEOUT, vrf=C.TRACEROUTE_VRF): """Execute traceroute and return results.""" traceroute_result = {} # calling form RPC does not work properly :( # but defined junos_route_instance_table just in case source_str = '' maxttl_str = '' wait_str = '' vrf_str = '' if source: source_str = ' source {source}'.format(source=source) if ttl: maxttl_str = ' ttl {ttl}'.format(ttl=ttl) if timeout: wait_str = ' wait {timeout}'.format(timeout=timeout) if vrf: vrf_str = ' routing-instance {vrf}'.format(vrf=vrf) traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format( destination=destination, source=source_str, maxttl=maxttl_str, wait=wait_str, vrf=vrf_str ) traceroute_rpc = E('command', traceroute_command) rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc # make direct RPC call via NETCONF traceroute_results = rpc_reply.find('.//traceroute-results') traceroute_failure = napalm_base.helpers.find_txt( traceroute_results, 'traceroute-failure', '') error_message = napalm_base.helpers.find_txt( traceroute_results, 'rpc-error/error-message', '') if traceroute_failure and error_message: return {'error': '{}: {}'.format(traceroute_failure, error_message)} traceroute_result['success'] = {} for hop in traceroute_results.findall('hop'): ttl_value = napalm_base.helpers.convert( int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1) if ttl_value not in traceroute_result['success']: traceroute_result['success'][ttl_value] = {'probes': {}} for probe in hop.findall('probe-result'): probe_index = napalm_base.helpers.convert( int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0) ip_address = napalm_base.helpers.convert( napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*') host_name = py23_compat.text_type( napalm_base.helpers.find_txt(probe, 'host-name', '*')) rtt = napalm_base.helpers.convert( float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms traceroute_result['success'][ttl_value]['probes'][probe_index] = { 'ip_address': ip_address, 'host_name': host_name, 'rtt': rtt } return traceroute_result
def function[traceroute, parameter[self, destination, source, ttl, timeout, vrf]]: constant[Execute traceroute and return results.] variable[traceroute_result] assign[=] dictionary[[], []] variable[source_str] assign[=] constant[] variable[maxttl_str] assign[=] constant[] variable[wait_str] assign[=] constant[] variable[vrf_str] assign[=] constant[] if name[source] begin[:] variable[source_str] assign[=] call[constant[ source {source}].format, parameter[]] if name[ttl] begin[:] variable[maxttl_str] assign[=] call[constant[ ttl {ttl}].format, parameter[]] if name[timeout] begin[:] variable[wait_str] assign[=] call[constant[ wait {timeout}].format, parameter[]] if name[vrf] begin[:] variable[vrf_str] assign[=] call[constant[ routing-instance {vrf}].format, parameter[]] variable[traceroute_command] assign[=] call[constant[traceroute {destination}{source}{maxttl}{wait}{vrf}].format, parameter[]] variable[traceroute_rpc] assign[=] call[name[E], parameter[constant[command], name[traceroute_command]]] variable[rpc_reply] assign[=] call[name[self].device._conn.rpc, parameter[name[traceroute_rpc]]]._NCElement__doc variable[traceroute_results] assign[=] call[name[rpc_reply].find, parameter[constant[.//traceroute-results]]] variable[traceroute_failure] assign[=] call[name[napalm_base].helpers.find_txt, parameter[name[traceroute_results], constant[traceroute-failure], constant[]]] variable[error_message] assign[=] call[name[napalm_base].helpers.find_txt, parameter[name[traceroute_results], constant[rpc-error/error-message], constant[]]] if <ast.BoolOp object at 0x7da1b115ec50> begin[:] return[dictionary[[<ast.Constant object at 0x7da1b115f4c0>], [<ast.Call object at 0x7da1b115e5f0>]]] call[name[traceroute_result]][constant[success]] assign[=] dictionary[[], []] for taget[name[hop]] in starred[call[name[traceroute_results].findall, parameter[constant[hop]]]] begin[:] variable[ttl_value] assign[=] call[name[napalm_base].helpers.convert, parameter[name[int], call[name[napalm_base].helpers.find_txt, parameter[name[hop], constant[ttl-value]]], constant[1]]] if compare[name[ttl_value] <ast.NotIn object at 0x7da2590d7190> call[name[traceroute_result]][constant[success]]] begin[:] call[call[name[traceroute_result]][constant[success]]][name[ttl_value]] assign[=] dictionary[[<ast.Constant object at 0x7da1b115e6b0>], [<ast.Dict object at 0x7da1b115f400>]] for taget[name[probe]] in starred[call[name[hop].findall, parameter[constant[probe-result]]]] begin[:] variable[probe_index] assign[=] call[name[napalm_base].helpers.convert, parameter[name[int], call[name[napalm_base].helpers.find_txt, parameter[name[probe], constant[probe-index]]], constant[0]]] variable[ip_address] assign[=] call[name[napalm_base].helpers.convert, parameter[name[napalm_base].helpers.ip, call[name[napalm_base].helpers.find_txt, parameter[name[probe], constant[ip-address]]], constant[*]]] variable[host_name] assign[=] call[name[py23_compat].text_type, parameter[call[name[napalm_base].helpers.find_txt, parameter[name[probe], constant[host-name], constant[*]]]]] variable[rtt] assign[=] binary_operation[call[name[napalm_base].helpers.convert, parameter[name[float], call[name[napalm_base].helpers.find_txt, parameter[name[probe], constant[rtt]]], constant[0]]] * constant[0.001]] call[call[call[call[name[traceroute_result]][constant[success]]][name[ttl_value]]][constant[probes]]][name[probe_index]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f13460>, <ast.Constant object at 0x7da1b0f11510>, <ast.Constant object at 0x7da1b0f11810>], [<ast.Name object at 0x7da1b0f123e0>, <ast.Name object at 0x7da1b0f13f10>, <ast.Name object at 0x7da1b0f136d0>]] return[name[traceroute_result]]
keyword[def] identifier[traceroute] ( identifier[self] , identifier[destination] , identifier[source] = identifier[C] . identifier[TRACEROUTE_SOURCE] , identifier[ttl] = identifier[C] . identifier[TRACEROUTE_TTL] , identifier[timeout] = identifier[C] . identifier[TRACEROUTE_TIMEOUT] , identifier[vrf] = identifier[C] . identifier[TRACEROUTE_VRF] ): literal[string] identifier[traceroute_result] ={} identifier[source_str] = literal[string] identifier[maxttl_str] = literal[string] identifier[wait_str] = literal[string] identifier[vrf_str] = literal[string] keyword[if] identifier[source] : identifier[source_str] = literal[string] . identifier[format] ( identifier[source] = identifier[source] ) keyword[if] identifier[ttl] : identifier[maxttl_str] = literal[string] . identifier[format] ( identifier[ttl] = identifier[ttl] ) keyword[if] identifier[timeout] : identifier[wait_str] = literal[string] . identifier[format] ( identifier[timeout] = identifier[timeout] ) keyword[if] identifier[vrf] : identifier[vrf_str] = literal[string] . identifier[format] ( identifier[vrf] = identifier[vrf] ) identifier[traceroute_command] = literal[string] . identifier[format] ( identifier[destination] = identifier[destination] , identifier[source] = identifier[source_str] , identifier[maxttl] = identifier[maxttl_str] , identifier[wait] = identifier[wait_str] , identifier[vrf] = identifier[vrf_str] ) identifier[traceroute_rpc] = identifier[E] ( literal[string] , identifier[traceroute_command] ) identifier[rpc_reply] = identifier[self] . identifier[device] . identifier[_conn] . identifier[rpc] ( identifier[traceroute_rpc] ). identifier[_NCElement__doc] identifier[traceroute_results] = identifier[rpc_reply] . identifier[find] ( literal[string] ) identifier[traceroute_failure] = identifier[napalm_base] . identifier[helpers] . identifier[find_txt] ( identifier[traceroute_results] , literal[string] , literal[string] ) identifier[error_message] = identifier[napalm_base] . identifier[helpers] . identifier[find_txt] ( identifier[traceroute_results] , literal[string] , literal[string] ) keyword[if] identifier[traceroute_failure] keyword[and] identifier[error_message] : keyword[return] { literal[string] : literal[string] . identifier[format] ( identifier[traceroute_failure] , identifier[error_message] )} identifier[traceroute_result] [ literal[string] ]={} keyword[for] identifier[hop] keyword[in] identifier[traceroute_results] . identifier[findall] ( literal[string] ): identifier[ttl_value] = identifier[napalm_base] . identifier[helpers] . identifier[convert] ( identifier[int] , identifier[napalm_base] . identifier[helpers] . identifier[find_txt] ( identifier[hop] , literal[string] ), literal[int] ) keyword[if] identifier[ttl_value] keyword[not] keyword[in] identifier[traceroute_result] [ literal[string] ]: identifier[traceroute_result] [ literal[string] ][ identifier[ttl_value] ]={ literal[string] :{}} keyword[for] identifier[probe] keyword[in] identifier[hop] . identifier[findall] ( literal[string] ): identifier[probe_index] = identifier[napalm_base] . identifier[helpers] . identifier[convert] ( identifier[int] , identifier[napalm_base] . identifier[helpers] . identifier[find_txt] ( identifier[probe] , literal[string] ), literal[int] ) identifier[ip_address] = identifier[napalm_base] . identifier[helpers] . identifier[convert] ( identifier[napalm_base] . identifier[helpers] . identifier[ip] , identifier[napalm_base] . identifier[helpers] . identifier[find_txt] ( identifier[probe] , literal[string] ), literal[string] ) identifier[host_name] = identifier[py23_compat] . identifier[text_type] ( identifier[napalm_base] . identifier[helpers] . identifier[find_txt] ( identifier[probe] , literal[string] , literal[string] )) identifier[rtt] = identifier[napalm_base] . identifier[helpers] . identifier[convert] ( identifier[float] , identifier[napalm_base] . identifier[helpers] . identifier[find_txt] ( identifier[probe] , literal[string] ), literal[int] )* literal[int] identifier[traceroute_result] [ literal[string] ][ identifier[ttl_value] ][ literal[string] ][ identifier[probe_index] ]={ literal[string] : identifier[ip_address] , literal[string] : identifier[host_name] , literal[string] : identifier[rtt] } keyword[return] identifier[traceroute_result]
def traceroute(self, destination, source=C.TRACEROUTE_SOURCE, ttl=C.TRACEROUTE_TTL, timeout=C.TRACEROUTE_TIMEOUT, vrf=C.TRACEROUTE_VRF): """Execute traceroute and return results.""" traceroute_result = {} # calling form RPC does not work properly :( # but defined junos_route_instance_table just in case source_str = '' maxttl_str = '' wait_str = '' vrf_str = '' if source: source_str = ' source {source}'.format(source=source) # depends on [control=['if'], data=[]] if ttl: maxttl_str = ' ttl {ttl}'.format(ttl=ttl) # depends on [control=['if'], data=[]] if timeout: wait_str = ' wait {timeout}'.format(timeout=timeout) # depends on [control=['if'], data=[]] if vrf: vrf_str = ' routing-instance {vrf}'.format(vrf=vrf) # depends on [control=['if'], data=[]] traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(destination=destination, source=source_str, maxttl=maxttl_str, wait=wait_str, vrf=vrf_str) traceroute_rpc = E('command', traceroute_command) rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc # make direct RPC call via NETCONF traceroute_results = rpc_reply.find('.//traceroute-results') traceroute_failure = napalm_base.helpers.find_txt(traceroute_results, 'traceroute-failure', '') error_message = napalm_base.helpers.find_txt(traceroute_results, 'rpc-error/error-message', '') if traceroute_failure and error_message: return {'error': '{}: {}'.format(traceroute_failure, error_message)} # depends on [control=['if'], data=[]] traceroute_result['success'] = {} for hop in traceroute_results.findall('hop'): ttl_value = napalm_base.helpers.convert(int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1) if ttl_value not in traceroute_result['success']: traceroute_result['success'][ttl_value] = {'probes': {}} # depends on [control=['if'], data=['ttl_value']] for probe in hop.findall('probe-result'): probe_index = napalm_base.helpers.convert(int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0) ip_address = napalm_base.helpers.convert(napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*') host_name = py23_compat.text_type(napalm_base.helpers.find_txt(probe, 'host-name', '*')) rtt = napalm_base.helpers.convert(float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 0.001 # ms traceroute_result['success'][ttl_value]['probes'][probe_index] = {'ip_address': ip_address, 'host_name': host_name, 'rtt': rtt} # depends on [control=['for'], data=['probe']] # depends on [control=['for'], data=['hop']] return traceroute_result
def create(self, root_url, mount=None, host_network=False, nics=DefaultNetworking, port=None, hostname=None, privileged=False, storage=None, name=None, tags=None, identity=None, env=None, cgroups=None): """ Creater a new container with the given root flist, mount points and zerotier id, and connected to the given bridges :param root_url: The root filesystem flist :param mount: a dict with {host_source: container_target} mount points. where host_source directory must exists. host_source can be a url to a flist to mount. :param host_network: Specify if the container should share the same network stack as the host. if True, container creation ignores both zerotier, bridge and ports arguments below. Not giving errors if provided. :param nics: Configure the attached nics to the container each nic object is a dict of the format { 'type': nic_type # one of default, bridge, zerotier, macvlan, passthrough, vlan, or vxlan (note, vlan and vxlan only supported by ovs) 'id': id # depends on the type bridge: bridge name, zerotier: network id, macvlan: the parent link name, passthrough: the link name, vlan: the vlan tag, vxlan: the vxlan id 'name': name of the nic inside the container (ignored in zerotier type) 'hwaddr': Mac address of nic. 'config': { # config is only honored for bridge, vlan, and vxlan types 'dhcp': bool, 'cidr': static_ip # ip/mask 'gateway': gateway 'dns': [dns] } } :param port: A dict of host_port: container_port pairs (only if default networking is enabled) Example: `port={8080: 80, 7000:7000}` :param hostname: Specific hostname you want to give to the container. if None it will automatically be set to core-x, x beeing the ID of the container :param privileged: If true, container runs in privileged mode. :param storage: A Url to the ardb storage to use to mount the root flist (or any other mount that requires g8fs) if not provided, the default one from core0 configuration will be used. :param name: Optional name for the container :param identity: Container Zerotier identity, Only used if at least one of the nics is of type zerotier :param env: a dict with the environment variables needed to be set for the container :param cgroups: custom list of cgroups to apply to this container on creation. formated as [(subsystem, name), ...] please refer to the cgroup api for more detailes. """ if nics == self.DefaultNetworking: nics = [{'type': 'default'}] elif nics is None: nics = [] args = { 'root': root_url, 'mount': mount, 'host_network': host_network, 'nics': nics, 'port': port, 'hostname': hostname, 'privileged': privileged, 'storage': storage, 'name': name, 'identity': identity, 'env': env, 'cgroups': cgroups, } # validate input self._create_chk.check(args) response = self._client.raw('corex.create', args, tags=tags) return JSONResponse(response)
def function[create, parameter[self, root_url, mount, host_network, nics, port, hostname, privileged, storage, name, tags, identity, env, cgroups]]: constant[ Creater a new container with the given root flist, mount points and zerotier id, and connected to the given bridges :param root_url: The root filesystem flist :param mount: a dict with {host_source: container_target} mount points. where host_source directory must exists. host_source can be a url to a flist to mount. :param host_network: Specify if the container should share the same network stack as the host. if True, container creation ignores both zerotier, bridge and ports arguments below. Not giving errors if provided. :param nics: Configure the attached nics to the container each nic object is a dict of the format { 'type': nic_type # one of default, bridge, zerotier, macvlan, passthrough, vlan, or vxlan (note, vlan and vxlan only supported by ovs) 'id': id # depends on the type bridge: bridge name, zerotier: network id, macvlan: the parent link name, passthrough: the link name, vlan: the vlan tag, vxlan: the vxlan id 'name': name of the nic inside the container (ignored in zerotier type) 'hwaddr': Mac address of nic. 'config': { # config is only honored for bridge, vlan, and vxlan types 'dhcp': bool, 'cidr': static_ip # ip/mask 'gateway': gateway 'dns': [dns] } } :param port: A dict of host_port: container_port pairs (only if default networking is enabled) Example: `port={8080: 80, 7000:7000}` :param hostname: Specific hostname you want to give to the container. if None it will automatically be set to core-x, x beeing the ID of the container :param privileged: If true, container runs in privileged mode. :param storage: A Url to the ardb storage to use to mount the root flist (or any other mount that requires g8fs) if not provided, the default one from core0 configuration will be used. :param name: Optional name for the container :param identity: Container Zerotier identity, Only used if at least one of the nics is of type zerotier :param env: a dict with the environment variables needed to be set for the container :param cgroups: custom list of cgroups to apply to this container on creation. formated as [(subsystem, name), ...] please refer to the cgroup api for more detailes. ] if compare[name[nics] equal[==] name[self].DefaultNetworking] begin[:] variable[nics] assign[=] list[[<ast.Dict object at 0x7da1b0416530>]] variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da1b0417fa0>, <ast.Constant object at 0x7da1b0417c10>, <ast.Constant object at 0x7da1b0414400>, <ast.Constant object at 0x7da1b0415a20>, <ast.Constant object at 0x7da1b0415360>, <ast.Constant object at 0x7da1b0416320>, <ast.Constant object at 0x7da1b04152d0>, <ast.Constant object at 0x7da1b04175e0>, <ast.Constant object at 0x7da1b0417430>, <ast.Constant object at 0x7da1b0415f90>, <ast.Constant object at 0x7da1b04173d0>, <ast.Constant object at 0x7da1b04160e0>], [<ast.Name object at 0x7da1b0415690>, <ast.Name object at 0x7da1b0417280>, <ast.Name object at 0x7da1b04159f0>, <ast.Name object at 0x7da1b0415990>, <ast.Name object at 0x7da1b0417640>, <ast.Name object at 0x7da1b04153f0>, <ast.Name object at 0x7da1b0415570>, <ast.Name object at 0x7da1b0417af0>, <ast.Name object at 0x7da1b0417730>, <ast.Name object at 0x7da1b0417d60>, <ast.Name object at 0x7da1b0416440>, <ast.Name object at 0x7da1b0415db0>]] call[name[self]._create_chk.check, parameter[name[args]]] variable[response] assign[=] call[name[self]._client.raw, parameter[constant[corex.create], name[args]]] return[call[name[JSONResponse], parameter[name[response]]]]
keyword[def] identifier[create] ( identifier[self] , identifier[root_url] , identifier[mount] = keyword[None] , identifier[host_network] = keyword[False] , identifier[nics] = identifier[DefaultNetworking] , identifier[port] = keyword[None] , identifier[hostname] = keyword[None] , identifier[privileged] = keyword[False] , identifier[storage] = keyword[None] , identifier[name] = keyword[None] , identifier[tags] = keyword[None] , identifier[identity] = keyword[None] , identifier[env] = keyword[None] , identifier[cgroups] = keyword[None] ): literal[string] keyword[if] identifier[nics] == identifier[self] . identifier[DefaultNetworking] : identifier[nics] =[{ literal[string] : literal[string] }] keyword[elif] identifier[nics] keyword[is] keyword[None] : identifier[nics] =[] identifier[args] ={ literal[string] : identifier[root_url] , literal[string] : identifier[mount] , literal[string] : identifier[host_network] , literal[string] : identifier[nics] , literal[string] : identifier[port] , literal[string] : identifier[hostname] , literal[string] : identifier[privileged] , literal[string] : identifier[storage] , literal[string] : identifier[name] , literal[string] : identifier[identity] , literal[string] : identifier[env] , literal[string] : identifier[cgroups] , } identifier[self] . identifier[_create_chk] . identifier[check] ( identifier[args] ) identifier[response] = identifier[self] . identifier[_client] . identifier[raw] ( literal[string] , identifier[args] , identifier[tags] = identifier[tags] ) keyword[return] identifier[JSONResponse] ( identifier[response] )
def create(self, root_url, mount=None, host_network=False, nics=DefaultNetworking, port=None, hostname=None, privileged=False, storage=None, name=None, tags=None, identity=None, env=None, cgroups=None): """ Creater a new container with the given root flist, mount points and zerotier id, and connected to the given bridges :param root_url: The root filesystem flist :param mount: a dict with {host_source: container_target} mount points. where host_source directory must exists. host_source can be a url to a flist to mount. :param host_network: Specify if the container should share the same network stack as the host. if True, container creation ignores both zerotier, bridge and ports arguments below. Not giving errors if provided. :param nics: Configure the attached nics to the container each nic object is a dict of the format { 'type': nic_type # one of default, bridge, zerotier, macvlan, passthrough, vlan, or vxlan (note, vlan and vxlan only supported by ovs) 'id': id # depends on the type bridge: bridge name, zerotier: network id, macvlan: the parent link name, passthrough: the link name, vlan: the vlan tag, vxlan: the vxlan id 'name': name of the nic inside the container (ignored in zerotier type) 'hwaddr': Mac address of nic. 'config': { # config is only honored for bridge, vlan, and vxlan types 'dhcp': bool, 'cidr': static_ip # ip/mask 'gateway': gateway 'dns': [dns] } } :param port: A dict of host_port: container_port pairs (only if default networking is enabled) Example: `port={8080: 80, 7000:7000}` :param hostname: Specific hostname you want to give to the container. if None it will automatically be set to core-x, x beeing the ID of the container :param privileged: If true, container runs in privileged mode. :param storage: A Url to the ardb storage to use to mount the root flist (or any other mount that requires g8fs) if not provided, the default one from core0 configuration will be used. :param name: Optional name for the container :param identity: Container Zerotier identity, Only used if at least one of the nics is of type zerotier :param env: a dict with the environment variables needed to be set for the container :param cgroups: custom list of cgroups to apply to this container on creation. formated as [(subsystem, name), ...] please refer to the cgroup api for more detailes. """ if nics == self.DefaultNetworking: nics = [{'type': 'default'}] # depends on [control=['if'], data=['nics']] elif nics is None: nics = [] # depends on [control=['if'], data=['nics']] args = {'root': root_url, 'mount': mount, 'host_network': host_network, 'nics': nics, 'port': port, 'hostname': hostname, 'privileged': privileged, 'storage': storage, 'name': name, 'identity': identity, 'env': env, 'cgroups': cgroups} # validate input self._create_chk.check(args) response = self._client.raw('corex.create', args, tags=tags) return JSONResponse(response)
def version_from_schema(schema_el): """ returns: API version number <str> raises: <VersionNotFound> NOTE: relies on presence of comment tags in the XSD, which are currently present for both ebaySvc.xsd (TradingAPI) and ShoppingService.xsd (ShoppingAPI) """ vc_el = schema_el while True: vc_el = vc_el.getprevious() if vc_el is None: break if vc_el.tag is etree.Comment: match = VERSION_COMMENT.search(vc_el.text) if match: try: return match.group(1) except IndexError: pass raise VersionNotFound('Version comment not found preceeding schema node')
def function[version_from_schema, parameter[schema_el]]: constant[ returns: API version number <str> raises: <VersionNotFound> NOTE: relies on presence of comment tags in the XSD, which are currently present for both ebaySvc.xsd (TradingAPI) and ShoppingService.xsd (ShoppingAPI) ] variable[vc_el] assign[=] name[schema_el] while constant[True] begin[:] variable[vc_el] assign[=] call[name[vc_el].getprevious, parameter[]] if compare[name[vc_el] is constant[None]] begin[:] break if compare[name[vc_el].tag is name[etree].Comment] begin[:] variable[match] assign[=] call[name[VERSION_COMMENT].search, parameter[name[vc_el].text]] if name[match] begin[:] <ast.Try object at 0x7da207f9a590> <ast.Raise object at 0x7da204564eb0>
keyword[def] identifier[version_from_schema] ( identifier[schema_el] ): literal[string] identifier[vc_el] = identifier[schema_el] keyword[while] keyword[True] : identifier[vc_el] = identifier[vc_el] . identifier[getprevious] () keyword[if] identifier[vc_el] keyword[is] keyword[None] : keyword[break] keyword[if] identifier[vc_el] . identifier[tag] keyword[is] identifier[etree] . identifier[Comment] : identifier[match] = identifier[VERSION_COMMENT] . identifier[search] ( identifier[vc_el] . identifier[text] ) keyword[if] identifier[match] : keyword[try] : keyword[return] identifier[match] . identifier[group] ( literal[int] ) keyword[except] identifier[IndexError] : keyword[pass] keyword[raise] identifier[VersionNotFound] ( literal[string] )
def version_from_schema(schema_el): """ returns: API version number <str> raises: <VersionNotFound> NOTE: relies on presence of comment tags in the XSD, which are currently present for both ebaySvc.xsd (TradingAPI) and ShoppingService.xsd (ShoppingAPI) """ vc_el = schema_el while True: vc_el = vc_el.getprevious() if vc_el is None: break # depends on [control=['if'], data=[]] if vc_el.tag is etree.Comment: match = VERSION_COMMENT.search(vc_el.text) if match: try: return match.group(1) # depends on [control=['try'], data=[]] except IndexError: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] raise VersionNotFound('Version comment not found preceeding schema node')
def _set_notification(self, conn, char, enabled, timeout=1.0): """Enable/disable notifications on a GATT characteristic Args: conn (int): The connection handle for the device we should interact with char (dict): The characteristic we should modify enabled (bool): Should we enable or disable notifications timeout (float): How long to wait before failing """ if 'client_configuration' not in char: return False, {'reason': 'Cannot enable notification without a client configuration attribute for characteristic'} props = char['properties'] if not props.notify: return False, {'reason': 'Cannot enable notification on a characteristic that does not support it'} value = char['client_configuration']['value'] #Check if we don't have to do anything current_state = bool(value & (1 << 0)) if current_state == enabled: return if enabled: value |= 1 << 0 else: value &= ~(1 << 0) char['client_configuration']['value'] = value valarray = struct.pack("<H", value) return self._write_handle(conn, char['client_configuration']['handle'], True, valarray, timeout)
def function[_set_notification, parameter[self, conn, char, enabled, timeout]]: constant[Enable/disable notifications on a GATT characteristic Args: conn (int): The connection handle for the device we should interact with char (dict): The characteristic we should modify enabled (bool): Should we enable or disable notifications timeout (float): How long to wait before failing ] if compare[constant[client_configuration] <ast.NotIn object at 0x7da2590d7190> name[char]] begin[:] return[tuple[[<ast.Constant object at 0x7da207f02e30>, <ast.Dict object at 0x7da207f00910>]]] variable[props] assign[=] call[name[char]][constant[properties]] if <ast.UnaryOp object at 0x7da207f02e90> begin[:] return[tuple[[<ast.Constant object at 0x7da207f01c60>, <ast.Dict object at 0x7da207f01d20>]]] variable[value] assign[=] call[call[name[char]][constant[client_configuration]]][constant[value]] variable[current_state] assign[=] call[name[bool], parameter[binary_operation[name[value] <ast.BitAnd object at 0x7da2590d6b60> binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> constant[0]]]]] if compare[name[current_state] equal[==] name[enabled]] begin[:] return[None] if name[enabled] begin[:] <ast.AugAssign object at 0x7da207f00cd0> call[call[name[char]][constant[client_configuration]]][constant[value]] assign[=] name[value] variable[valarray] assign[=] call[name[struct].pack, parameter[constant[<H], name[value]]] return[call[name[self]._write_handle, parameter[name[conn], call[call[name[char]][constant[client_configuration]]][constant[handle]], constant[True], name[valarray], name[timeout]]]]
keyword[def] identifier[_set_notification] ( identifier[self] , identifier[conn] , identifier[char] , identifier[enabled] , identifier[timeout] = literal[int] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[char] : keyword[return] keyword[False] ,{ literal[string] : literal[string] } identifier[props] = identifier[char] [ literal[string] ] keyword[if] keyword[not] identifier[props] . identifier[notify] : keyword[return] keyword[False] ,{ literal[string] : literal[string] } identifier[value] = identifier[char] [ literal[string] ][ literal[string] ] identifier[current_state] = identifier[bool] ( identifier[value] &( literal[int] << literal[int] )) keyword[if] identifier[current_state] == identifier[enabled] : keyword[return] keyword[if] identifier[enabled] : identifier[value] |= literal[int] << literal[int] keyword[else] : identifier[value] &=~( literal[int] << literal[int] ) identifier[char] [ literal[string] ][ literal[string] ]= identifier[value] identifier[valarray] = identifier[struct] . identifier[pack] ( literal[string] , identifier[value] ) keyword[return] identifier[self] . identifier[_write_handle] ( identifier[conn] , identifier[char] [ literal[string] ][ literal[string] ], keyword[True] , identifier[valarray] , identifier[timeout] )
def _set_notification(self, conn, char, enabled, timeout=1.0): """Enable/disable notifications on a GATT characteristic Args: conn (int): The connection handle for the device we should interact with char (dict): The characteristic we should modify enabled (bool): Should we enable or disable notifications timeout (float): How long to wait before failing """ if 'client_configuration' not in char: return (False, {'reason': 'Cannot enable notification without a client configuration attribute for characteristic'}) # depends on [control=['if'], data=[]] props = char['properties'] if not props.notify: return (False, {'reason': 'Cannot enable notification on a characteristic that does not support it'}) # depends on [control=['if'], data=[]] value = char['client_configuration']['value'] #Check if we don't have to do anything current_state = bool(value & 1 << 0) if current_state == enabled: return # depends on [control=['if'], data=[]] if enabled: value |= 1 << 0 # depends on [control=['if'], data=[]] else: value &= ~(1 << 0) char['client_configuration']['value'] = value valarray = struct.pack('<H', value) return self._write_handle(conn, char['client_configuration']['handle'], True, valarray, timeout)
def one_of(self, items): """ Check if the value is contained in a list or generator. >>> Query().f1.one_of(['value 1', 'value 2']) :param items: The list of items to check with """ return self._generate_test( lambda value: value in items, ('one_of', self._path, freeze(items)) )
def function[one_of, parameter[self, items]]: constant[ Check if the value is contained in a list or generator. >>> Query().f1.one_of(['value 1', 'value 2']) :param items: The list of items to check with ] return[call[name[self]._generate_test, parameter[<ast.Lambda object at 0x7da1b1a67370>, tuple[[<ast.Constant object at 0x7da18f09e620>, <ast.Attribute object at 0x7da18f09d120>, <ast.Call object at 0x7da18f09f880>]]]]]
keyword[def] identifier[one_of] ( identifier[self] , identifier[items] ): literal[string] keyword[return] identifier[self] . identifier[_generate_test] ( keyword[lambda] identifier[value] : identifier[value] keyword[in] identifier[items] , ( literal[string] , identifier[self] . identifier[_path] , identifier[freeze] ( identifier[items] )) )
def one_of(self, items): """ Check if the value is contained in a list or generator. >>> Query().f1.one_of(['value 1', 'value 2']) :param items: The list of items to check with """ return self._generate_test(lambda value: value in items, ('one_of', self._path, freeze(items)))
def set_group_conditions(self, group_id, conditions, trigger_mode=None): """ Set the group conditions. This replaces any existing conditions on the group and member conditions for all trigger modes. :param group_id: Group to be updated :param conditions: New conditions to replace old ones :param trigger_mode: Optional TriggerMode used :type conditions: GroupConditionsInfo :type trigger_mode: TriggerMode :return: The new Group conditions """ data = self._serialize_object(conditions) if trigger_mode is not None: url = self._service_url(['triggers', 'groups', group_id, 'conditions', trigger_mode]) else: url = self._service_url(['triggers', 'groups', group_id, 'conditions']) response = self._put(url, data) return Condition.list_to_object_list(response)
def function[set_group_conditions, parameter[self, group_id, conditions, trigger_mode]]: constant[ Set the group conditions. This replaces any existing conditions on the group and member conditions for all trigger modes. :param group_id: Group to be updated :param conditions: New conditions to replace old ones :param trigger_mode: Optional TriggerMode used :type conditions: GroupConditionsInfo :type trigger_mode: TriggerMode :return: The new Group conditions ] variable[data] assign[=] call[name[self]._serialize_object, parameter[name[conditions]]] if compare[name[trigger_mode] is_not constant[None]] begin[:] variable[url] assign[=] call[name[self]._service_url, parameter[list[[<ast.Constant object at 0x7da2054a5660>, <ast.Constant object at 0x7da2054a7130>, <ast.Name object at 0x7da2054a4550>, <ast.Constant object at 0x7da2054a5ff0>, <ast.Name object at 0x7da2054a6fe0>]]]] variable[response] assign[=] call[name[self]._put, parameter[name[url], name[data]]] return[call[name[Condition].list_to_object_list, parameter[name[response]]]]
keyword[def] identifier[set_group_conditions] ( identifier[self] , identifier[group_id] , identifier[conditions] , identifier[trigger_mode] = keyword[None] ): literal[string] identifier[data] = identifier[self] . identifier[_serialize_object] ( identifier[conditions] ) keyword[if] identifier[trigger_mode] keyword[is] keyword[not] keyword[None] : identifier[url] = identifier[self] . identifier[_service_url] ([ literal[string] , literal[string] , identifier[group_id] , literal[string] , identifier[trigger_mode] ]) keyword[else] : identifier[url] = identifier[self] . identifier[_service_url] ([ literal[string] , literal[string] , identifier[group_id] , literal[string] ]) identifier[response] = identifier[self] . identifier[_put] ( identifier[url] , identifier[data] ) keyword[return] identifier[Condition] . identifier[list_to_object_list] ( identifier[response] )
def set_group_conditions(self, group_id, conditions, trigger_mode=None): """ Set the group conditions. This replaces any existing conditions on the group and member conditions for all trigger modes. :param group_id: Group to be updated :param conditions: New conditions to replace old ones :param trigger_mode: Optional TriggerMode used :type conditions: GroupConditionsInfo :type trigger_mode: TriggerMode :return: The new Group conditions """ data = self._serialize_object(conditions) if trigger_mode is not None: url = self._service_url(['triggers', 'groups', group_id, 'conditions', trigger_mode]) # depends on [control=['if'], data=['trigger_mode']] else: url = self._service_url(['triggers', 'groups', group_id, 'conditions']) response = self._put(url, data) return Condition.list_to_object_list(response)
def call_actions_parallel(self, service_name, actions, **kwargs): """ Build and send multiple job requests to one service, each job with one action, to be executed in parallel, and return once all responses have been received. Returns a list of action responses, one for each action in the same order as provided, or raises an exception if any action response is an error (unless `raise_action_errors` is passed as `False`) or if any job response is an error (unless `raise_job_errors` is passed as `False`). This method performs expansions if the Client is configured with an expansion converter. :param service_name: The name of the service to call :type service_name: union[str, unicode] :param actions: A list of `ActionRequest` objects and/or dicts that can be converted to `ActionRequest` objects :type actions: iterable[union[ActionRequest, dict]] :param expansions: A dictionary representing the expansions to perform :type expansions: dict :param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults to `True`) :type raise_action_errors: bool :param timeout: If provided, this will override the default transport timeout values to; requests will expire after this number of seconds plus some buffer defined by the transport, and the client will not block waiting for a response for longer than this amount of time. :type timeout: int :param switches: A list of switch value integers :type switches: list :param correlation_id: The request correlation ID :type correlation_id: union[str, unicode] :param continue_on_error: Whether to continue executing further actions once one action has returned errors :type continue_on_error: bool :param context: A dictionary of extra values to include in the context header :type context: dict :param control_extra: A dictionary of extra values to include in the control header :type control_extra: dict :return: A generator of action responses :rtype: Generator[ActionResponse] :raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge, MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError """ return self.call_actions_parallel_future(service_name, actions, **kwargs).result()
def function[call_actions_parallel, parameter[self, service_name, actions]]: constant[ Build and send multiple job requests to one service, each job with one action, to be executed in parallel, and return once all responses have been received. Returns a list of action responses, one for each action in the same order as provided, or raises an exception if any action response is an error (unless `raise_action_errors` is passed as `False`) or if any job response is an error (unless `raise_job_errors` is passed as `False`). This method performs expansions if the Client is configured with an expansion converter. :param service_name: The name of the service to call :type service_name: union[str, unicode] :param actions: A list of `ActionRequest` objects and/or dicts that can be converted to `ActionRequest` objects :type actions: iterable[union[ActionRequest, dict]] :param expansions: A dictionary representing the expansions to perform :type expansions: dict :param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults to `True`) :type raise_action_errors: bool :param timeout: If provided, this will override the default transport timeout values to; requests will expire after this number of seconds plus some buffer defined by the transport, and the client will not block waiting for a response for longer than this amount of time. :type timeout: int :param switches: A list of switch value integers :type switches: list :param correlation_id: The request correlation ID :type correlation_id: union[str, unicode] :param continue_on_error: Whether to continue executing further actions once one action has returned errors :type continue_on_error: bool :param context: A dictionary of extra values to include in the context header :type context: dict :param control_extra: A dictionary of extra values to include in the control header :type control_extra: dict :return: A generator of action responses :rtype: Generator[ActionResponse] :raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge, MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError ] return[call[call[name[self].call_actions_parallel_future, parameter[name[service_name], name[actions]]].result, parameter[]]]
keyword[def] identifier[call_actions_parallel] ( identifier[self] , identifier[service_name] , identifier[actions] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[call_actions_parallel_future] ( identifier[service_name] , identifier[actions] ,** identifier[kwargs] ). identifier[result] ()
def call_actions_parallel(self, service_name, actions, **kwargs): """ Build and send multiple job requests to one service, each job with one action, to be executed in parallel, and return once all responses have been received. Returns a list of action responses, one for each action in the same order as provided, or raises an exception if any action response is an error (unless `raise_action_errors` is passed as `False`) or if any job response is an error (unless `raise_job_errors` is passed as `False`). This method performs expansions if the Client is configured with an expansion converter. :param service_name: The name of the service to call :type service_name: union[str, unicode] :param actions: A list of `ActionRequest` objects and/or dicts that can be converted to `ActionRequest` objects :type actions: iterable[union[ActionRequest, dict]] :param expansions: A dictionary representing the expansions to perform :type expansions: dict :param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults to `True`) :type raise_action_errors: bool :param timeout: If provided, this will override the default transport timeout values to; requests will expire after this number of seconds plus some buffer defined by the transport, and the client will not block waiting for a response for longer than this amount of time. :type timeout: int :param switches: A list of switch value integers :type switches: list :param correlation_id: The request correlation ID :type correlation_id: union[str, unicode] :param continue_on_error: Whether to continue executing further actions once one action has returned errors :type continue_on_error: bool :param context: A dictionary of extra values to include in the context header :type context: dict :param control_extra: A dictionary of extra values to include in the control header :type control_extra: dict :return: A generator of action responses :rtype: Generator[ActionResponse] :raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge, MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError """ return self.call_actions_parallel_future(service_name, actions, **kwargs).result()
def cmd_quick(action, action_space, ability_id, queued): """Do a quick command like 'Stop' or 'Stim'.""" action_cmd = spatial(action, action_space).unit_command action_cmd.ability_id = ability_id action_cmd.queue_command = queued
def function[cmd_quick, parameter[action, action_space, ability_id, queued]]: constant[Do a quick command like 'Stop' or 'Stim'.] variable[action_cmd] assign[=] call[name[spatial], parameter[name[action], name[action_space]]].unit_command name[action_cmd].ability_id assign[=] name[ability_id] name[action_cmd].queue_command assign[=] name[queued]
keyword[def] identifier[cmd_quick] ( identifier[action] , identifier[action_space] , identifier[ability_id] , identifier[queued] ): literal[string] identifier[action_cmd] = identifier[spatial] ( identifier[action] , identifier[action_space] ). identifier[unit_command] identifier[action_cmd] . identifier[ability_id] = identifier[ability_id] identifier[action_cmd] . identifier[queue_command] = identifier[queued]
def cmd_quick(action, action_space, ability_id, queued): """Do a quick command like 'Stop' or 'Stim'.""" action_cmd = spatial(action, action_space).unit_command action_cmd.ability_id = ability_id action_cmd.queue_command = queued
def getall(self): """Returns all local users configuration as a resource dict Returns: dict: A dict of usernames with a nested resource dict object """ users = self.users_re.findall(self.config, re.M) resources = dict() for user in users: resources.update(self._parse_username(user)) return resources
def function[getall, parameter[self]]: constant[Returns all local users configuration as a resource dict Returns: dict: A dict of usernames with a nested resource dict object ] variable[users] assign[=] call[name[self].users_re.findall, parameter[name[self].config, name[re].M]] variable[resources] assign[=] call[name[dict], parameter[]] for taget[name[user]] in starred[name[users]] begin[:] call[name[resources].update, parameter[call[name[self]._parse_username, parameter[name[user]]]]] return[name[resources]]
keyword[def] identifier[getall] ( identifier[self] ): literal[string] identifier[users] = identifier[self] . identifier[users_re] . identifier[findall] ( identifier[self] . identifier[config] , identifier[re] . identifier[M] ) identifier[resources] = identifier[dict] () keyword[for] identifier[user] keyword[in] identifier[users] : identifier[resources] . identifier[update] ( identifier[self] . identifier[_parse_username] ( identifier[user] )) keyword[return] identifier[resources]
def getall(self): """Returns all local users configuration as a resource dict Returns: dict: A dict of usernames with a nested resource dict object """ users = self.users_re.findall(self.config, re.M) resources = dict() for user in users: resources.update(self._parse_username(user)) # depends on [control=['for'], data=['user']] return resources
def lintersects(self, span): """ If this span intersects the left (starting) side of the given span. """ if isinstance(span, list): return [sp for sp in span if self._lintersects(sp)] return self._lintersects(span)
def function[lintersects, parameter[self, span]]: constant[ If this span intersects the left (starting) side of the given span. ] if call[name[isinstance], parameter[name[span], name[list]]] begin[:] return[<ast.ListComp object at 0x7da204623250>] return[call[name[self]._lintersects, parameter[name[span]]]]
keyword[def] identifier[lintersects] ( identifier[self] , identifier[span] ): literal[string] keyword[if] identifier[isinstance] ( identifier[span] , identifier[list] ): keyword[return] [ identifier[sp] keyword[for] identifier[sp] keyword[in] identifier[span] keyword[if] identifier[self] . identifier[_lintersects] ( identifier[sp] )] keyword[return] identifier[self] . identifier[_lintersects] ( identifier[span] )
def lintersects(self, span): """ If this span intersects the left (starting) side of the given span. """ if isinstance(span, list): return [sp for sp in span if self._lintersects(sp)] # depends on [control=['if'], data=[]] return self._lintersects(span)
def set_speed(self, value): ''' set total axes movement speed in mm/second''' self._combined_speed = float(value) speed_per_min = int(self._combined_speed * SEC_PER_MIN) command = GCODES['SET_SPEED'] + str(speed_per_min) log.debug("set_speed: {}".format(command)) self._send_command(command)
def function[set_speed, parameter[self, value]]: constant[ set total axes movement speed in mm/second] name[self]._combined_speed assign[=] call[name[float], parameter[name[value]]] variable[speed_per_min] assign[=] call[name[int], parameter[binary_operation[name[self]._combined_speed * name[SEC_PER_MIN]]]] variable[command] assign[=] binary_operation[call[name[GCODES]][constant[SET_SPEED]] + call[name[str], parameter[name[speed_per_min]]]] call[name[log].debug, parameter[call[constant[set_speed: {}].format, parameter[name[command]]]]] call[name[self]._send_command, parameter[name[command]]]
keyword[def] identifier[set_speed] ( identifier[self] , identifier[value] ): literal[string] identifier[self] . identifier[_combined_speed] = identifier[float] ( identifier[value] ) identifier[speed_per_min] = identifier[int] ( identifier[self] . identifier[_combined_speed] * identifier[SEC_PER_MIN] ) identifier[command] = identifier[GCODES] [ literal[string] ]+ identifier[str] ( identifier[speed_per_min] ) identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[command] )) identifier[self] . identifier[_send_command] ( identifier[command] )
def set_speed(self, value): """ set total axes movement speed in mm/second""" self._combined_speed = float(value) speed_per_min = int(self._combined_speed * SEC_PER_MIN) command = GCODES['SET_SPEED'] + str(speed_per_min) log.debug('set_speed: {}'.format(command)) self._send_command(command)
def precursor_sequence(loci, reference): """Get sequence from genome""" region = "%s\t%s\t%s\t.\t.\t%s" % (loci[1], loci[2], loci[3], loci[4]) precursor = pybedtools.BedTool(str(region), from_string=True).sequence(fi=reference, s=True) return open(precursor.seqfn).read().split("\n")[1]
def function[precursor_sequence, parameter[loci, reference]]: constant[Get sequence from genome] variable[region] assign[=] binary_operation[constant[%s %s %s . . %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b0339810>, <ast.Subscript object at 0x7da1b0338130>, <ast.Subscript object at 0x7da1b033b010>, <ast.Subscript object at 0x7da1b0339e40>]]] variable[precursor] assign[=] call[call[name[pybedtools].BedTool, parameter[call[name[str], parameter[name[region]]]]].sequence, parameter[]] return[call[call[call[call[name[open], parameter[name[precursor].seqfn]].read, parameter[]].split, parameter[constant[ ]]]][constant[1]]]
keyword[def] identifier[precursor_sequence] ( identifier[loci] , identifier[reference] ): literal[string] identifier[region] = literal[string] %( identifier[loci] [ literal[int] ], identifier[loci] [ literal[int] ], identifier[loci] [ literal[int] ], identifier[loci] [ literal[int] ]) identifier[precursor] = identifier[pybedtools] . identifier[BedTool] ( identifier[str] ( identifier[region] ), identifier[from_string] = keyword[True] ). identifier[sequence] ( identifier[fi] = identifier[reference] , identifier[s] = keyword[True] ) keyword[return] identifier[open] ( identifier[precursor] . identifier[seqfn] ). identifier[read] (). identifier[split] ( literal[string] )[ literal[int] ]
def precursor_sequence(loci, reference): """Get sequence from genome""" region = '%s\t%s\t%s\t.\t.\t%s' % (loci[1], loci[2], loci[3], loci[4]) precursor = pybedtools.BedTool(str(region), from_string=True).sequence(fi=reference, s=True) return open(precursor.seqfn).read().split('\n')[1]
def create(kind, name, **kwargs): """Creates an instance of a registered word embedding evaluation function. Parameters ---------- kind : ['similarity', 'analogy'] Return only valid names for similarity, analogy or both kinds of functions. name : str The evaluation function name (case-insensitive). Returns ------- An instance of :class:`gluonnlp.embedding.evaluation.WordEmbeddingAnalogyFunction`: or :class:`gluonnlp.embedding.evaluation.WordEmbeddingSimilarityFunction`: An instance of the specified evaluation function. """ if kind not in _REGSITRY_KIND_CLASS_MAP.keys(): raise KeyError( 'Cannot find `kind` {}. Use ' '`list_evaluation_functions(kind=None).keys()` to get' 'all the valid kinds of evaluation functions.'.format(kind)) create_ = registry.get_create_func( _REGSITRY_KIND_CLASS_MAP[kind], 'word embedding {} evaluation function'.format(kind)) return create_(name, **kwargs)
def function[create, parameter[kind, name]]: constant[Creates an instance of a registered word embedding evaluation function. Parameters ---------- kind : ['similarity', 'analogy'] Return only valid names for similarity, analogy or both kinds of functions. name : str The evaluation function name (case-insensitive). Returns ------- An instance of :class:`gluonnlp.embedding.evaluation.WordEmbeddingAnalogyFunction`: or :class:`gluonnlp.embedding.evaluation.WordEmbeddingSimilarityFunction`: An instance of the specified evaluation function. ] if compare[name[kind] <ast.NotIn object at 0x7da2590d7190> call[name[_REGSITRY_KIND_CLASS_MAP].keys, parameter[]]] begin[:] <ast.Raise object at 0x7da1b1c21f00> variable[create_] assign[=] call[name[registry].get_create_func, parameter[call[name[_REGSITRY_KIND_CLASS_MAP]][name[kind]], call[constant[word embedding {} evaluation function].format, parameter[name[kind]]]]] return[call[name[create_], parameter[name[name]]]]
keyword[def] identifier[create] ( identifier[kind] , identifier[name] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[kind] keyword[not] keyword[in] identifier[_REGSITRY_KIND_CLASS_MAP] . identifier[keys] (): keyword[raise] identifier[KeyError] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[kind] )) identifier[create_] = identifier[registry] . identifier[get_create_func] ( identifier[_REGSITRY_KIND_CLASS_MAP] [ identifier[kind] ], literal[string] . identifier[format] ( identifier[kind] )) keyword[return] identifier[create_] ( identifier[name] ,** identifier[kwargs] )
def create(kind, name, **kwargs): """Creates an instance of a registered word embedding evaluation function. Parameters ---------- kind : ['similarity', 'analogy'] Return only valid names for similarity, analogy or both kinds of functions. name : str The evaluation function name (case-insensitive). Returns ------- An instance of :class:`gluonnlp.embedding.evaluation.WordEmbeddingAnalogyFunction`: or :class:`gluonnlp.embedding.evaluation.WordEmbeddingSimilarityFunction`: An instance of the specified evaluation function. """ if kind not in _REGSITRY_KIND_CLASS_MAP.keys(): raise KeyError('Cannot find `kind` {}. Use `list_evaluation_functions(kind=None).keys()` to getall the valid kinds of evaluation functions.'.format(kind)) # depends on [control=['if'], data=['kind']] create_ = registry.get_create_func(_REGSITRY_KIND_CLASS_MAP[kind], 'word embedding {} evaluation function'.format(kind)) return create_(name, **kwargs)
def getShare(self, shareID): """ Retrieve a proxy object for a given shareID, previously shared with this role or one of its group roles via L{Role.shareItem}. @return: a L{SharedProxy}. This is a wrapper around the shared item which only exposes those interfaces explicitly allowed for the given role. @raise: L{NoSuchShare} if there is no item shared to the given role for the given shareID. """ shares = list( self.store.query(Share, AND(Share.shareID == shareID, Share.sharedTo.oneOf(self.allRoles())))) interfaces = [] for share in shares: interfaces += share.sharedInterfaces if shares: return SharedProxy(shares[0].sharedItem, interfaces, shareID) raise NoSuchShare()
def function[getShare, parameter[self, shareID]]: constant[ Retrieve a proxy object for a given shareID, previously shared with this role or one of its group roles via L{Role.shareItem}. @return: a L{SharedProxy}. This is a wrapper around the shared item which only exposes those interfaces explicitly allowed for the given role. @raise: L{NoSuchShare} if there is no item shared to the given role for the given shareID. ] variable[shares] assign[=] call[name[list], parameter[call[name[self].store.query, parameter[name[Share], call[name[AND], parameter[compare[name[Share].shareID equal[==] name[shareID]], call[name[Share].sharedTo.oneOf, parameter[call[name[self].allRoles, parameter[]]]]]]]]]] variable[interfaces] assign[=] list[[]] for taget[name[share]] in starred[name[shares]] begin[:] <ast.AugAssign object at 0x7da1b0ba88e0> if name[shares] begin[:] return[call[name[SharedProxy], parameter[call[name[shares]][constant[0]].sharedItem, name[interfaces], name[shareID]]]] <ast.Raise object at 0x7da1b0ba8c70>
keyword[def] identifier[getShare] ( identifier[self] , identifier[shareID] ): literal[string] identifier[shares] = identifier[list] ( identifier[self] . identifier[store] . identifier[query] ( identifier[Share] , identifier[AND] ( identifier[Share] . identifier[shareID] == identifier[shareID] , identifier[Share] . identifier[sharedTo] . identifier[oneOf] ( identifier[self] . identifier[allRoles] ())))) identifier[interfaces] =[] keyword[for] identifier[share] keyword[in] identifier[shares] : identifier[interfaces] += identifier[share] . identifier[sharedInterfaces] keyword[if] identifier[shares] : keyword[return] identifier[SharedProxy] ( identifier[shares] [ literal[int] ]. identifier[sharedItem] , identifier[interfaces] , identifier[shareID] ) keyword[raise] identifier[NoSuchShare] ()
def getShare(self, shareID): """ Retrieve a proxy object for a given shareID, previously shared with this role or one of its group roles via L{Role.shareItem}. @return: a L{SharedProxy}. This is a wrapper around the shared item which only exposes those interfaces explicitly allowed for the given role. @raise: L{NoSuchShare} if there is no item shared to the given role for the given shareID. """ shares = list(self.store.query(Share, AND(Share.shareID == shareID, Share.sharedTo.oneOf(self.allRoles())))) interfaces = [] for share in shares: interfaces += share.sharedInterfaces # depends on [control=['for'], data=['share']] if shares: return SharedProxy(shares[0].sharedItem, interfaces, shareID) # depends on [control=['if'], data=[]] raise NoSuchShare()
def find_minimum_spanning_tree(graph): """Calculates a minimum spanning tree for a graph. Returns a list of edges that define the tree. Returns an empty list for an empty graph. """ mst = [] if graph.num_nodes() == 0: return mst if graph.num_edges() == 0: return mst connected_components = get_connected_components(graph) if len(connected_components) > 1: raise DisconnectedGraphError edge_list = kruskal_mst(graph) return edge_list
def function[find_minimum_spanning_tree, parameter[graph]]: constant[Calculates a minimum spanning tree for a graph. Returns a list of edges that define the tree. Returns an empty list for an empty graph. ] variable[mst] assign[=] list[[]] if compare[call[name[graph].num_nodes, parameter[]] equal[==] constant[0]] begin[:] return[name[mst]] if compare[call[name[graph].num_edges, parameter[]] equal[==] constant[0]] begin[:] return[name[mst]] variable[connected_components] assign[=] call[name[get_connected_components], parameter[name[graph]]] if compare[call[name[len], parameter[name[connected_components]]] greater[>] constant[1]] begin[:] <ast.Raise object at 0x7da1b2866e00> variable[edge_list] assign[=] call[name[kruskal_mst], parameter[name[graph]]] return[name[edge_list]]
keyword[def] identifier[find_minimum_spanning_tree] ( identifier[graph] ): literal[string] identifier[mst] =[] keyword[if] identifier[graph] . identifier[num_nodes] ()== literal[int] : keyword[return] identifier[mst] keyword[if] identifier[graph] . identifier[num_edges] ()== literal[int] : keyword[return] identifier[mst] identifier[connected_components] = identifier[get_connected_components] ( identifier[graph] ) keyword[if] identifier[len] ( identifier[connected_components] )> literal[int] : keyword[raise] identifier[DisconnectedGraphError] identifier[edge_list] = identifier[kruskal_mst] ( identifier[graph] ) keyword[return] identifier[edge_list]
def find_minimum_spanning_tree(graph): """Calculates a minimum spanning tree for a graph. Returns a list of edges that define the tree. Returns an empty list for an empty graph. """ mst = [] if graph.num_nodes() == 0: return mst # depends on [control=['if'], data=[]] if graph.num_edges() == 0: return mst # depends on [control=['if'], data=[]] connected_components = get_connected_components(graph) if len(connected_components) > 1: raise DisconnectedGraphError # depends on [control=['if'], data=[]] edge_list = kruskal_mst(graph) return edge_list
def update_repository_method(namespace, method, synopsis, wdl, doc=None, comment=""): """Create/Update workflow definition. FireCloud will create a new snapshot_id for the given workflow. Args: namespace (str): Methods namespace method (str): method name synopsis (str): short (<80 char) description of method wdl (file): Workflow Description Language file doc (file): (Optional) Additional documentation comment (str): (Optional) Comment specific to this snapshot Swagger: https://api.firecloud.org/#!/Method_Repository/post_api_methods """ with open(wdl, 'r') as wf: wdl_payload = wf.read() if doc is not None: with open (doc, 'r') as df: doc = df.read() body = { "namespace": namespace, "name": method, "entityType": "Workflow", "payload": wdl_payload, "documentation": doc, "synopsis": synopsis, "snapshotComment": comment } return __post("methods", json={key: value for key, value in body.items() if value})
def function[update_repository_method, parameter[namespace, method, synopsis, wdl, doc, comment]]: constant[Create/Update workflow definition. FireCloud will create a new snapshot_id for the given workflow. Args: namespace (str): Methods namespace method (str): method name synopsis (str): short (<80 char) description of method wdl (file): Workflow Description Language file doc (file): (Optional) Additional documentation comment (str): (Optional) Comment specific to this snapshot Swagger: https://api.firecloud.org/#!/Method_Repository/post_api_methods ] with call[name[open], parameter[name[wdl], constant[r]]] begin[:] variable[wdl_payload] assign[=] call[name[wf].read, parameter[]] if compare[name[doc] is_not constant[None]] begin[:] with call[name[open], parameter[name[doc], constant[r]]] begin[:] variable[doc] assign[=] call[name[df].read, parameter[]] variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b03970>, <ast.Constant object at 0x7da1b1b00e80>, <ast.Constant object at 0x7da1b1b01f90>, <ast.Constant object at 0x7da1b1b037c0>, <ast.Constant object at 0x7da1b1b02800>, <ast.Constant object at 0x7da1b1b03850>, <ast.Constant object at 0x7da1b1b009a0>], [<ast.Name object at 0x7da1b1b00370>, <ast.Name object at 0x7da1b1b02950>, <ast.Constant object at 0x7da1b1b01030>, <ast.Name object at 0x7da1b1b03d30>, <ast.Name object at 0x7da1b1b00190>, <ast.Name object at 0x7da1b1b03400>, <ast.Name object at 0x7da1b1b011b0>]] return[call[name[__post], parameter[constant[methods]]]]
keyword[def] identifier[update_repository_method] ( identifier[namespace] , identifier[method] , identifier[synopsis] , identifier[wdl] , identifier[doc] = keyword[None] , identifier[comment] = literal[string] ): literal[string] keyword[with] identifier[open] ( identifier[wdl] , literal[string] ) keyword[as] identifier[wf] : identifier[wdl_payload] = identifier[wf] . identifier[read] () keyword[if] identifier[doc] keyword[is] keyword[not] keyword[None] : keyword[with] identifier[open] ( identifier[doc] , literal[string] ) keyword[as] identifier[df] : identifier[doc] = identifier[df] . identifier[read] () identifier[body] ={ literal[string] : identifier[namespace] , literal[string] : identifier[method] , literal[string] : literal[string] , literal[string] : identifier[wdl_payload] , literal[string] : identifier[doc] , literal[string] : identifier[synopsis] , literal[string] : identifier[comment] } keyword[return] identifier[__post] ( literal[string] , identifier[json] ={ identifier[key] : identifier[value] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[body] . identifier[items] () keyword[if] identifier[value] })
def update_repository_method(namespace, method, synopsis, wdl, doc=None, comment=''): """Create/Update workflow definition. FireCloud will create a new snapshot_id for the given workflow. Args: namespace (str): Methods namespace method (str): method name synopsis (str): short (<80 char) description of method wdl (file): Workflow Description Language file doc (file): (Optional) Additional documentation comment (str): (Optional) Comment specific to this snapshot Swagger: https://api.firecloud.org/#!/Method_Repository/post_api_methods """ with open(wdl, 'r') as wf: wdl_payload = wf.read() # depends on [control=['with'], data=['wf']] if doc is not None: with open(doc, 'r') as df: doc = df.read() # depends on [control=['with'], data=['df']] # depends on [control=['if'], data=['doc']] body = {'namespace': namespace, 'name': method, 'entityType': 'Workflow', 'payload': wdl_payload, 'documentation': doc, 'synopsis': synopsis, 'snapshotComment': comment} return __post('methods', json={key: value for (key, value) in body.items() if value})
def _element(cls): ''' find the element with controls ''' if not cls.__is_selector(): raise Exception("Invalid selector[%s]." %cls.__control["by"]) driver = Web.driver try: elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"])) except: raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"])) if len(elements) < cls.__control["index"] + 1: raise Exception("Element [%s]: Element Index Issue! There are [%s] Elements! Index=[%s]" % (cls.__name__, len(elements), cls.__control["index"])) if len(elements) > 1: print("Element [%s]: There are [%d] elements, choosed index=%d" %(cls.__name__,len(elements),cls.__control["index"])) elm = elements[cls.__control["index"]] cls.__control["index"] = 0 return elm
def function[_element, parameter[cls]]: constant[ find the element with controls ] if <ast.UnaryOp object at 0x7da2054a6a40> begin[:] <ast.Raise object at 0x7da2054a4550> variable[driver] assign[=] name[Web].driver <ast.Try object at 0x7da2054a5f30> if compare[call[name[len], parameter[name[elements]]] less[<] binary_operation[call[name[cls].__control][constant[index]] + constant[1]]] begin[:] <ast.Raise object at 0x7da2054a6cb0> if compare[call[name[len], parameter[name[elements]]] greater[>] constant[1]] begin[:] call[name[print], parameter[binary_operation[constant[Element [%s]: There are [%d] elements, choosed index=%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da2054a6950>, <ast.Call object at 0x7da2054a5ea0>, <ast.Subscript object at 0x7da2054a4340>]]]]] variable[elm] assign[=] call[name[elements]][call[name[cls].__control][constant[index]]] call[name[cls].__control][constant[index]] assign[=] constant[0] return[name[elm]]
keyword[def] identifier[_element] ( identifier[cls] ): literal[string] keyword[if] keyword[not] identifier[cls] . identifier[__is_selector] (): keyword[raise] identifier[Exception] ( literal[string] % identifier[cls] . identifier[__control] [ literal[string] ]) identifier[driver] = identifier[Web] . identifier[driver] keyword[try] : identifier[elements] = identifier[WebDriverWait] ( identifier[driver] , identifier[cls] . identifier[__control] [ literal[string] ]). identifier[until] ( keyword[lambda] identifier[driver] : identifier[getattr] ( identifier[driver] , literal[string] )( identifier[cls] . identifier[__control] [ literal[string] ], identifier[cls] . identifier[__control] [ literal[string] ])) keyword[except] : keyword[raise] identifier[Exception] ( literal[string] %( identifier[cls] . identifier[__control] [ literal[string] ], identifier[cls] . identifier[__control] [ literal[string] ])) keyword[if] identifier[len] ( identifier[elements] )< identifier[cls] . identifier[__control] [ literal[string] ]+ literal[int] : keyword[raise] identifier[Exception] ( literal[string] %( identifier[cls] . identifier[__name__] , identifier[len] ( identifier[elements] ), identifier[cls] . identifier[__control] [ literal[string] ])) keyword[if] identifier[len] ( identifier[elements] )> literal[int] : identifier[print] ( literal[string] %( identifier[cls] . identifier[__name__] , identifier[len] ( identifier[elements] ), identifier[cls] . identifier[__control] [ literal[string] ])) identifier[elm] = identifier[elements] [ identifier[cls] . identifier[__control] [ literal[string] ]] identifier[cls] . identifier[__control] [ literal[string] ]= literal[int] keyword[return] identifier[elm]
def _element(cls): """ find the element with controls """ if not cls.__is_selector(): raise Exception('Invalid selector[%s].' % cls.__control['by']) # depends on [control=['if'], data=[]] driver = Web.driver try: elements = WebDriverWait(driver, cls.__control['timeout']).until(lambda driver: getattr(driver, 'find_elements')(cls.__control['by'], cls.__control['value'])) # depends on [control=['try'], data=[]] except: raise Exception('Timeout at %d seconds.Element(%s) not found.' % (cls.__control['timeout'], cls.__control['by'])) # depends on [control=['except'], data=[]] if len(elements) < cls.__control['index'] + 1: raise Exception('Element [%s]: Element Index Issue! There are [%s] Elements! Index=[%s]' % (cls.__name__, len(elements), cls.__control['index'])) # depends on [control=['if'], data=[]] if len(elements) > 1: print('Element [%s]: There are [%d] elements, choosed index=%d' % (cls.__name__, len(elements), cls.__control['index'])) # depends on [control=['if'], data=[]] elm = elements[cls.__control['index']] cls.__control['index'] = 0 return elm
def __graceful_shutdown(self): """ call shutdown routines """ retcode = 1 self.log.info("Trying to shutdown gracefully...") retcode = self.core.plugins_end_test(retcode) retcode = self.core.plugins_post_process(retcode) self.log.info("Done graceful shutdown") return retcode
def function[__graceful_shutdown, parameter[self]]: constant[ call shutdown routines ] variable[retcode] assign[=] constant[1] call[name[self].log.info, parameter[constant[Trying to shutdown gracefully...]]] variable[retcode] assign[=] call[name[self].core.plugins_end_test, parameter[name[retcode]]] variable[retcode] assign[=] call[name[self].core.plugins_post_process, parameter[name[retcode]]] call[name[self].log.info, parameter[constant[Done graceful shutdown]]] return[name[retcode]]
keyword[def] identifier[__graceful_shutdown] ( identifier[self] ): literal[string] identifier[retcode] = literal[int] identifier[self] . identifier[log] . identifier[info] ( literal[string] ) identifier[retcode] = identifier[self] . identifier[core] . identifier[plugins_end_test] ( identifier[retcode] ) identifier[retcode] = identifier[self] . identifier[core] . identifier[plugins_post_process] ( identifier[retcode] ) identifier[self] . identifier[log] . identifier[info] ( literal[string] ) keyword[return] identifier[retcode]
def __graceful_shutdown(self): """ call shutdown routines """ retcode = 1 self.log.info('Trying to shutdown gracefully...') retcode = self.core.plugins_end_test(retcode) retcode = self.core.plugins_post_process(retcode) self.log.info('Done graceful shutdown') return retcode
def urls(self, version=None): """Returns all URLS that are mapped to this interface""" urls = [] for base_url, routes in self.api.http.routes.items(): for url, methods in routes.items(): for method, versions in methods.items(): for interface_version, interface in versions.items(): if interface_version == version and interface == self: if not url in urls: urls.append(('/v{0}'.format(version) if version else '') + url) return urls
def function[urls, parameter[self, version]]: constant[Returns all URLS that are mapped to this interface] variable[urls] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b1b47250>, <ast.Name object at 0x7da1b1b47d30>]]] in starred[call[name[self].api.http.routes.items, parameter[]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b1b477f0>, <ast.Name object at 0x7da1b1b47dc0>]]] in starred[call[name[routes].items, parameter[]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b1b47c70>, <ast.Name object at 0x7da1b1b47610>]]] in starred[call[name[methods].items, parameter[]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b1b44fd0>, <ast.Name object at 0x7da1b1b464d0>]]] in starred[call[name[versions].items, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da1b1b44730> begin[:] if <ast.UnaryOp object at 0x7da1b1b47760> begin[:] call[name[urls].append, parameter[binary_operation[<ast.IfExp object at 0x7da1b1b45de0> + name[url]]]] return[name[urls]]
keyword[def] identifier[urls] ( identifier[self] , identifier[version] = keyword[None] ): literal[string] identifier[urls] =[] keyword[for] identifier[base_url] , identifier[routes] keyword[in] identifier[self] . identifier[api] . identifier[http] . identifier[routes] . identifier[items] (): keyword[for] identifier[url] , identifier[methods] keyword[in] identifier[routes] . identifier[items] (): keyword[for] identifier[method] , identifier[versions] keyword[in] identifier[methods] . identifier[items] (): keyword[for] identifier[interface_version] , identifier[interface] keyword[in] identifier[versions] . identifier[items] (): keyword[if] identifier[interface_version] == identifier[version] keyword[and] identifier[interface] == identifier[self] : keyword[if] keyword[not] identifier[url] keyword[in] identifier[urls] : identifier[urls] . identifier[append] (( literal[string] . identifier[format] ( identifier[version] ) keyword[if] identifier[version] keyword[else] literal[string] )+ identifier[url] ) keyword[return] identifier[urls]
def urls(self, version=None): """Returns all URLS that are mapped to this interface""" urls = [] for (base_url, routes) in self.api.http.routes.items(): for (url, methods) in routes.items(): for (method, versions) in methods.items(): for (interface_version, interface) in versions.items(): if interface_version == version and interface == self: if not url in urls: urls.append(('/v{0}'.format(version) if version else '') + url) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] return urls
def start(self): """ Start the WBEM listener threads, if they are not yet running. A thread serving CIM-XML over HTTP is started if an HTTP port was specified for the listener. A thread serving CIM-XML over HTTPS is started if an HTTPS port was specified for the listener. These server threads will handle the ExportIndication export message described in :term:`DSP0200` and they will invoke the registered callback functions for any received CIM indications. The listener must be stopped again in order to free the TCP/IP port it listens on. The listener can be stopped explicitly using the :meth:`~pywbem.WBEMListener.stop` method. The listener will be automatically stopped when the main thread terminates (i.e. when the Python process terminates), or when :class:`~pywbem.WBEMListener` is used as a context manager when leaving its scope. Raises: :exc:`~py:exceptions.OSError`: with :attr:`~OSError.errno` = :data:`py:errno.EADDRINUSE` when the WBEM listener port is already in use. """ if self._http_port: if not self._http_server: try: server = ThreadedHTTPServer((self._host, self._http_port), ListenerRequestHandler) except Exception as exc: # Linux+py2: socket.error; Linux+py3: OSError; # Windows does not raise any exception. if getattr(exc, 'errno', None) == errno.EADDRINUSE: # Reraise with improved error message msg = _format("WBEM listener port {0} already in use", self._http_port) exc_type = OSError six.reraise(exc_type, exc_type(errno.EADDRINUSE, msg), sys.exc_info()[2]) raise # pylint: disable=attribute-defined-outside-init server.listener = self thread = threading.Thread(target=server.serve_forever) thread.daemon = True # Exit server thread upon main thread exit self._http_server = server self._http_thread = thread thread.start() else: # Just in case someone changed self._http_port after init... self._http_server = None self._http_thread = None if self._https_port: if not self._https_server: try: server = ThreadedHTTPServer((self._host, self._https_port), ListenerRequestHandler) except Exception as exc: # Linux+py2: socket.error; Linux+py3: OSError; # Windows does not raise any exception. if getattr(exc, 'errno', None) == errno.EADDRINUSE: # Reraise with improved error message msg = _format("WBEM listener port {0} already in use", self._http_port) exc_type = OSError six.reraise(exc_type, exc_type(errno.EADDRINUSE, msg), sys.exc_info()[2]) raise # pylint: disable=attribute-defined-outside-init server.listener = self server.socket = ssl.wrap_socket(server.socket, certfile=self._certfile, keyfile=self._keyfile, server_side=True) thread = threading.Thread(target=server.serve_forever) thread.daemon = True # Exit server thread upon main thread exit self._https_server = server self._https_thread = thread thread.start() else: # Just in case someone changed self._https_port after init... self._https_server = None self._https_thread = None
def function[start, parameter[self]]: constant[ Start the WBEM listener threads, if they are not yet running. A thread serving CIM-XML over HTTP is started if an HTTP port was specified for the listener. A thread serving CIM-XML over HTTPS is started if an HTTPS port was specified for the listener. These server threads will handle the ExportIndication export message described in :term:`DSP0200` and they will invoke the registered callback functions for any received CIM indications. The listener must be stopped again in order to free the TCP/IP port it listens on. The listener can be stopped explicitly using the :meth:`~pywbem.WBEMListener.stop` method. The listener will be automatically stopped when the main thread terminates (i.e. when the Python process terminates), or when :class:`~pywbem.WBEMListener` is used as a context manager when leaving its scope. Raises: :exc:`~py:exceptions.OSError`: with :attr:`~OSError.errno` = :data:`py:errno.EADDRINUSE` when the WBEM listener port is already in use. ] if name[self]._http_port begin[:] if <ast.UnaryOp object at 0x7da18f09c1f0> begin[:] <ast.Try object at 0x7da18f09ea10> name[server].listener assign[=] name[self] variable[thread] assign[=] call[name[threading].Thread, parameter[]] name[thread].daemon assign[=] constant[True] name[self]._http_server assign[=] name[server] name[self]._http_thread assign[=] name[thread] call[name[thread].start, parameter[]] if name[self]._https_port begin[:] if <ast.UnaryOp object at 0x7da20e960280> begin[:] <ast.Try object at 0x7da20e961b40> name[server].listener assign[=] name[self] name[server].socket assign[=] call[name[ssl].wrap_socket, parameter[name[server].socket]] variable[thread] assign[=] call[name[threading].Thread, parameter[]] name[thread].daemon assign[=] constant[True] name[self]._https_server assign[=] name[server] name[self]._https_thread assign[=] name[thread] call[name[thread].start, parameter[]]
keyword[def] identifier[start] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_http_port] : keyword[if] keyword[not] identifier[self] . identifier[_http_server] : keyword[try] : identifier[server] = identifier[ThreadedHTTPServer] (( identifier[self] . identifier[_host] , identifier[self] . identifier[_http_port] ), identifier[ListenerRequestHandler] ) keyword[except] identifier[Exception] keyword[as] identifier[exc] : keyword[if] identifier[getattr] ( identifier[exc] , literal[string] , keyword[None] )== identifier[errno] . identifier[EADDRINUSE] : identifier[msg] = identifier[_format] ( literal[string] , identifier[self] . identifier[_http_port] ) identifier[exc_type] = identifier[OSError] identifier[six] . identifier[reraise] ( identifier[exc_type] , identifier[exc_type] ( identifier[errno] . identifier[EADDRINUSE] , identifier[msg] ), identifier[sys] . identifier[exc_info] ()[ literal[int] ]) keyword[raise] identifier[server] . identifier[listener] = identifier[self] identifier[thread] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[server] . identifier[serve_forever] ) identifier[thread] . identifier[daemon] = keyword[True] identifier[self] . identifier[_http_server] = identifier[server] identifier[self] . identifier[_http_thread] = identifier[thread] identifier[thread] . identifier[start] () keyword[else] : identifier[self] . identifier[_http_server] = keyword[None] identifier[self] . identifier[_http_thread] = keyword[None] keyword[if] identifier[self] . identifier[_https_port] : keyword[if] keyword[not] identifier[self] . identifier[_https_server] : keyword[try] : identifier[server] = identifier[ThreadedHTTPServer] (( identifier[self] . identifier[_host] , identifier[self] . identifier[_https_port] ), identifier[ListenerRequestHandler] ) keyword[except] identifier[Exception] keyword[as] identifier[exc] : keyword[if] identifier[getattr] ( identifier[exc] , literal[string] , keyword[None] )== identifier[errno] . identifier[EADDRINUSE] : identifier[msg] = identifier[_format] ( literal[string] , identifier[self] . identifier[_http_port] ) identifier[exc_type] = identifier[OSError] identifier[six] . identifier[reraise] ( identifier[exc_type] , identifier[exc_type] ( identifier[errno] . identifier[EADDRINUSE] , identifier[msg] ), identifier[sys] . identifier[exc_info] ()[ literal[int] ]) keyword[raise] identifier[server] . identifier[listener] = identifier[self] identifier[server] . identifier[socket] = identifier[ssl] . identifier[wrap_socket] ( identifier[server] . identifier[socket] , identifier[certfile] = identifier[self] . identifier[_certfile] , identifier[keyfile] = identifier[self] . identifier[_keyfile] , identifier[server_side] = keyword[True] ) identifier[thread] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[server] . identifier[serve_forever] ) identifier[thread] . identifier[daemon] = keyword[True] identifier[self] . identifier[_https_server] = identifier[server] identifier[self] . identifier[_https_thread] = identifier[thread] identifier[thread] . identifier[start] () keyword[else] : identifier[self] . identifier[_https_server] = keyword[None] identifier[self] . identifier[_https_thread] = keyword[None]
def start(self): """ Start the WBEM listener threads, if they are not yet running. A thread serving CIM-XML over HTTP is started if an HTTP port was specified for the listener. A thread serving CIM-XML over HTTPS is started if an HTTPS port was specified for the listener. These server threads will handle the ExportIndication export message described in :term:`DSP0200` and they will invoke the registered callback functions for any received CIM indications. The listener must be stopped again in order to free the TCP/IP port it listens on. The listener can be stopped explicitly using the :meth:`~pywbem.WBEMListener.stop` method. The listener will be automatically stopped when the main thread terminates (i.e. when the Python process terminates), or when :class:`~pywbem.WBEMListener` is used as a context manager when leaving its scope. Raises: :exc:`~py:exceptions.OSError`: with :attr:`~OSError.errno` = :data:`py:errno.EADDRINUSE` when the WBEM listener port is already in use. """ if self._http_port: if not self._http_server: try: server = ThreadedHTTPServer((self._host, self._http_port), ListenerRequestHandler) # depends on [control=['try'], data=[]] except Exception as exc: # Linux+py2: socket.error; Linux+py3: OSError; # Windows does not raise any exception. if getattr(exc, 'errno', None) == errno.EADDRINUSE: # Reraise with improved error message msg = _format('WBEM listener port {0} already in use', self._http_port) exc_type = OSError six.reraise(exc_type, exc_type(errno.EADDRINUSE, msg), sys.exc_info()[2]) # depends on [control=['if'], data=[]] raise # depends on [control=['except'], data=['exc']] # pylint: disable=attribute-defined-outside-init server.listener = self thread = threading.Thread(target=server.serve_forever) thread.daemon = True # Exit server thread upon main thread exit self._http_server = server self._http_thread = thread thread.start() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: # Just in case someone changed self._http_port after init... self._http_server = None self._http_thread = None if self._https_port: if not self._https_server: try: server = ThreadedHTTPServer((self._host, self._https_port), ListenerRequestHandler) # depends on [control=['try'], data=[]] except Exception as exc: # Linux+py2: socket.error; Linux+py3: OSError; # Windows does not raise any exception. if getattr(exc, 'errno', None) == errno.EADDRINUSE: # Reraise with improved error message msg = _format('WBEM listener port {0} already in use', self._http_port) exc_type = OSError six.reraise(exc_type, exc_type(errno.EADDRINUSE, msg), sys.exc_info()[2]) # depends on [control=['if'], data=[]] raise # depends on [control=['except'], data=['exc']] # pylint: disable=attribute-defined-outside-init server.listener = self server.socket = ssl.wrap_socket(server.socket, certfile=self._certfile, keyfile=self._keyfile, server_side=True) thread = threading.Thread(target=server.serve_forever) thread.daemon = True # Exit server thread upon main thread exit self._https_server = server self._https_thread = thread thread.start() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: # Just in case someone changed self._https_port after init... self._https_server = None self._https_thread = None
def delete(cls, id): ''' Destroy a Union object ''' client = cls._new_api_client() return client.make_request(cls, 'delete', url_params={'id': id})
def function[delete, parameter[cls, id]]: constant[ Destroy a Union object ] variable[client] assign[=] call[name[cls]._new_api_client, parameter[]] return[call[name[client].make_request, parameter[name[cls], constant[delete]]]]
keyword[def] identifier[delete] ( identifier[cls] , identifier[id] ): literal[string] identifier[client] = identifier[cls] . identifier[_new_api_client] () keyword[return] identifier[client] . identifier[make_request] ( identifier[cls] , literal[string] , identifier[url_params] ={ literal[string] : identifier[id] })
def delete(cls, id): """ Destroy a Union object """ client = cls._new_api_client() return client.make_request(cls, 'delete', url_params={'id': id})
def clone(self, instance): ''' Create a shallow clone of an *instance*. **Note:** the clone and the original instance **does not** have to be part of the same metaclass. ''' metaclass = get_metaclass(instance) metaclass = self.find_metaclass(metaclass.kind) return metaclass.clone(instance)
def function[clone, parameter[self, instance]]: constant[ Create a shallow clone of an *instance*. **Note:** the clone and the original instance **does not** have to be part of the same metaclass. ] variable[metaclass] assign[=] call[name[get_metaclass], parameter[name[instance]]] variable[metaclass] assign[=] call[name[self].find_metaclass, parameter[name[metaclass].kind]] return[call[name[metaclass].clone, parameter[name[instance]]]]
keyword[def] identifier[clone] ( identifier[self] , identifier[instance] ): literal[string] identifier[metaclass] = identifier[get_metaclass] ( identifier[instance] ) identifier[metaclass] = identifier[self] . identifier[find_metaclass] ( identifier[metaclass] . identifier[kind] ) keyword[return] identifier[metaclass] . identifier[clone] ( identifier[instance] )
def clone(self, instance): """ Create a shallow clone of an *instance*. **Note:** the clone and the original instance **does not** have to be part of the same metaclass. """ metaclass = get_metaclass(instance) metaclass = self.find_metaclass(metaclass.kind) return metaclass.clone(instance)
def resnext(units, num_stages, filter_list, num_classes, num_group, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, dtype='float32', memonger=False): """Return ResNeXt symbol of Parameters ---------- units : list Number of units in each stage num_stages : int Number of stage filter_list : list Channel size of each stage num_classes : int Ouput size of symbol num_groupes: int Number of conv groups dataset : str Dataset type, only cifar10 and imagenet supports workspace : int Workspace used in convolution operator dtype : str Precision (float32 or float16) """ num_unit = len(units) assert(num_unit == num_stages) data = mx.sym.Variable(name='data') if dtype == 'float32': data = mx.sym.identity(data=data, name='id') else: if dtype == 'float16': data = mx.sym.Cast(data=data, dtype=np.float16) data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data') (nchannel, height, width) = image_shape if height <= 32: # such as cifar10 body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1), no_bias=True, name="conv0", workspace=workspace) else: # often expected to be 224 such as imagenet body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3), no_bias=True, name="conv0", workspace=workspace) body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0') body = mx.sym.Activation(data=body, act_type='relu', name='relu0') body = mx.sym.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max') for i in range(num_stages): body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False, name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, num_group=num_group, bn_mom=bn_mom, workspace=workspace, memonger=memonger) for j in range(units[i]-1): body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2), bottle_neck=bottle_neck, num_group=num_group, bn_mom=bn_mom, workspace=workspace, memonger=memonger) pool1 = mx.sym.Pooling(data=body, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1') flat = mx.sym.Flatten(data=pool1) fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_classes, name='fc1') if dtype == 'float16': fc1 = mx.sym.Cast(data=fc1, dtype=np.float32) return mx.sym.SoftmaxOutput(data=fc1, name='softmax')
def function[resnext, parameter[units, num_stages, filter_list, num_classes, num_group, image_shape, bottle_neck, bn_mom, workspace, dtype, memonger]]: constant[Return ResNeXt symbol of Parameters ---------- units : list Number of units in each stage num_stages : int Number of stage filter_list : list Channel size of each stage num_classes : int Ouput size of symbol num_groupes: int Number of conv groups dataset : str Dataset type, only cifar10 and imagenet supports workspace : int Workspace used in convolution operator dtype : str Precision (float32 or float16) ] variable[num_unit] assign[=] call[name[len], parameter[name[units]]] assert[compare[name[num_unit] equal[==] name[num_stages]]] variable[data] assign[=] call[name[mx].sym.Variable, parameter[]] if compare[name[dtype] equal[==] constant[float32]] begin[:] variable[data] assign[=] call[name[mx].sym.identity, parameter[]] variable[data] assign[=] call[name[mx].sym.BatchNorm, parameter[]] <ast.Tuple object at 0x7da1b200c6a0> assign[=] name[image_shape] if compare[name[height] less_or_equal[<=] constant[32]] begin[:] variable[body] assign[=] call[name[mx].sym.Convolution, parameter[]] for taget[name[i]] in starred[call[name[range], parameter[name[num_stages]]]] begin[:] variable[body] assign[=] call[name[residual_unit], parameter[name[body], call[name[filter_list]][binary_operation[name[i] + constant[1]]], tuple[[<ast.IfExp object at 0x7da1b200e3e0>, <ast.IfExp object at 0x7da1b200d450>]], constant[False]]] for taget[name[j]] in starred[call[name[range], parameter[binary_operation[call[name[units]][name[i]] - constant[1]]]]] begin[:] variable[body] assign[=] call[name[residual_unit], parameter[name[body], call[name[filter_list]][binary_operation[name[i] + constant[1]]], tuple[[<ast.Constant object at 0x7da1b200f340>, <ast.Constant object at 0x7da1b200e770>]], constant[True]]] variable[pool1] assign[=] call[name[mx].sym.Pooling, parameter[]] variable[flat] assign[=] call[name[mx].sym.Flatten, parameter[]] variable[fc1] assign[=] call[name[mx].sym.FullyConnected, parameter[]] if compare[name[dtype] equal[==] constant[float16]] begin[:] variable[fc1] assign[=] call[name[mx].sym.Cast, parameter[]] return[call[name[mx].sym.SoftmaxOutput, parameter[]]]
keyword[def] identifier[resnext] ( identifier[units] , identifier[num_stages] , identifier[filter_list] , identifier[num_classes] , identifier[num_group] , identifier[image_shape] , identifier[bottle_neck] = keyword[True] , identifier[bn_mom] = literal[int] , identifier[workspace] = literal[int] , identifier[dtype] = literal[string] , identifier[memonger] = keyword[False] ): literal[string] identifier[num_unit] = identifier[len] ( identifier[units] ) keyword[assert] ( identifier[num_unit] == identifier[num_stages] ) identifier[data] = identifier[mx] . identifier[sym] . identifier[Variable] ( identifier[name] = literal[string] ) keyword[if] identifier[dtype] == literal[string] : identifier[data] = identifier[mx] . identifier[sym] . identifier[identity] ( identifier[data] = identifier[data] , identifier[name] = literal[string] ) keyword[else] : keyword[if] identifier[dtype] == literal[string] : identifier[data] = identifier[mx] . identifier[sym] . identifier[Cast] ( identifier[data] = identifier[data] , identifier[dtype] = identifier[np] . identifier[float16] ) identifier[data] = identifier[mx] . identifier[sym] . identifier[BatchNorm] ( identifier[data] = identifier[data] , identifier[fix_gamma] = keyword[True] , identifier[eps] = literal[int] , identifier[momentum] = identifier[bn_mom] , identifier[name] = literal[string] ) ( identifier[nchannel] , identifier[height] , identifier[width] )= identifier[image_shape] keyword[if] identifier[height] <= literal[int] : identifier[body] = identifier[mx] . identifier[sym] . identifier[Convolution] ( identifier[data] = identifier[data] , identifier[num_filter] = identifier[filter_list] [ literal[int] ], identifier[kernel] =( literal[int] , literal[int] ), identifier[stride] =( literal[int] , literal[int] ), identifier[pad] =( literal[int] , literal[int] ), identifier[no_bias] = keyword[True] , identifier[name] = literal[string] , identifier[workspace] = identifier[workspace] ) keyword[else] : identifier[body] = identifier[mx] . identifier[sym] . identifier[Convolution] ( identifier[data] = identifier[data] , identifier[num_filter] = identifier[filter_list] [ literal[int] ], identifier[kernel] =( literal[int] , literal[int] ), identifier[stride] =( literal[int] , literal[int] ), identifier[pad] =( literal[int] , literal[int] ), identifier[no_bias] = keyword[True] , identifier[name] = literal[string] , identifier[workspace] = identifier[workspace] ) identifier[body] = identifier[mx] . identifier[sym] . identifier[BatchNorm] ( identifier[data] = identifier[body] , identifier[fix_gamma] = keyword[False] , identifier[eps] = literal[int] , identifier[momentum] = identifier[bn_mom] , identifier[name] = literal[string] ) identifier[body] = identifier[mx] . identifier[sym] . identifier[Activation] ( identifier[data] = identifier[body] , identifier[act_type] = literal[string] , identifier[name] = literal[string] ) identifier[body] = identifier[mx] . identifier[sym] . identifier[Pooling] ( identifier[data] = identifier[body] , identifier[kernel] =( literal[int] , literal[int] ), identifier[stride] =( literal[int] , literal[int] ), identifier[pad] =( literal[int] , literal[int] ), identifier[pool_type] = literal[string] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_stages] ): identifier[body] = identifier[residual_unit] ( identifier[body] , identifier[filter_list] [ identifier[i] + literal[int] ],( literal[int] keyword[if] identifier[i] == literal[int] keyword[else] literal[int] , literal[int] keyword[if] identifier[i] == literal[int] keyword[else] literal[int] ), keyword[False] , identifier[name] = literal[string] %( identifier[i] + literal[int] , literal[int] ), identifier[bottle_neck] = identifier[bottle_neck] , identifier[num_group] = identifier[num_group] , identifier[bn_mom] = identifier[bn_mom] , identifier[workspace] = identifier[workspace] , identifier[memonger] = identifier[memonger] ) keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[units] [ identifier[i] ]- literal[int] ): identifier[body] = identifier[residual_unit] ( identifier[body] , identifier[filter_list] [ identifier[i] + literal[int] ],( literal[int] , literal[int] ), keyword[True] , identifier[name] = literal[string] %( identifier[i] + literal[int] , identifier[j] + literal[int] ), identifier[bottle_neck] = identifier[bottle_neck] , identifier[num_group] = identifier[num_group] , identifier[bn_mom] = identifier[bn_mom] , identifier[workspace] = identifier[workspace] , identifier[memonger] = identifier[memonger] ) identifier[pool1] = identifier[mx] . identifier[sym] . identifier[Pooling] ( identifier[data] = identifier[body] , identifier[global_pool] = keyword[True] , identifier[kernel] =( literal[int] , literal[int] ), identifier[pool_type] = literal[string] , identifier[name] = literal[string] ) identifier[flat] = identifier[mx] . identifier[sym] . identifier[Flatten] ( identifier[data] = identifier[pool1] ) identifier[fc1] = identifier[mx] . identifier[sym] . identifier[FullyConnected] ( identifier[data] = identifier[flat] , identifier[num_hidden] = identifier[num_classes] , identifier[name] = literal[string] ) keyword[if] identifier[dtype] == literal[string] : identifier[fc1] = identifier[mx] . identifier[sym] . identifier[Cast] ( identifier[data] = identifier[fc1] , identifier[dtype] = identifier[np] . identifier[float32] ) keyword[return] identifier[mx] . identifier[sym] . identifier[SoftmaxOutput] ( identifier[data] = identifier[fc1] , identifier[name] = literal[string] )
def resnext(units, num_stages, filter_list, num_classes, num_group, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, dtype='float32', memonger=False): """Return ResNeXt symbol of Parameters ---------- units : list Number of units in each stage num_stages : int Number of stage filter_list : list Channel size of each stage num_classes : int Ouput size of symbol num_groupes: int Number of conv groups dataset : str Dataset type, only cifar10 and imagenet supports workspace : int Workspace used in convolution operator dtype : str Precision (float32 or float16) """ num_unit = len(units) assert num_unit == num_stages data = mx.sym.Variable(name='data') if dtype == 'float32': data = mx.sym.identity(data=data, name='id') # depends on [control=['if'], data=[]] elif dtype == 'float16': data = mx.sym.Cast(data=data, dtype=np.float16) # depends on [control=['if'], data=[]] data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-05, momentum=bn_mom, name='bn_data') (nchannel, height, width) = image_shape if height <= 32: # such as cifar10 body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, name='conv0', workspace=workspace) # depends on [control=['if'], data=[]] else: # often expected to be 224 such as imagenet body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2, 2), pad=(3, 3), no_bias=True, name='conv0', workspace=workspace) body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-05, momentum=bn_mom, name='bn0') body = mx.sym.Activation(data=body, act_type='relu', name='relu0') body = mx.sym.Pooling(data=body, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max') for i in range(num_stages): body = residual_unit(body, filter_list[i + 1], (1 if i == 0 else 2, 1 if i == 0 else 2), False, name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, num_group=num_group, bn_mom=bn_mom, workspace=workspace, memonger=memonger) for j in range(units[i] - 1): body = residual_unit(body, filter_list[i + 1], (1, 1), True, name='stage%d_unit%d' % (i + 1, j + 2), bottle_neck=bottle_neck, num_group=num_group, bn_mom=bn_mom, workspace=workspace, memonger=memonger) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']] pool1 = mx.sym.Pooling(data=body, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1') flat = mx.sym.Flatten(data=pool1) fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_classes, name='fc1') if dtype == 'float16': fc1 = mx.sym.Cast(data=fc1, dtype=np.float32) # depends on [control=['if'], data=[]] return mx.sym.SoftmaxOutput(data=fc1, name='softmax')
def refresh(self, force=False): """Refreshes the `access_token` and sets the praw instance `reddit_client` with a valid one. :param force: Boolean. Refresh will be done only when last refresh was done before `EXPIRY_DURATION`, which is 3500 seconds. However passing `force` will overrides this and refresh operation will be done everytime. """ if self._is_token_expired() or force: tokens = self._get_refresh_access() self.access_token = tokens['access_token'] self.refresh_token = tokens['refresh_token'] self._set_access_credentials()
def function[refresh, parameter[self, force]]: constant[Refreshes the `access_token` and sets the praw instance `reddit_client` with a valid one. :param force: Boolean. Refresh will be done only when last refresh was done before `EXPIRY_DURATION`, which is 3500 seconds. However passing `force` will overrides this and refresh operation will be done everytime. ] if <ast.BoolOp object at 0x7da18fe93e20> begin[:] variable[tokens] assign[=] call[name[self]._get_refresh_access, parameter[]] name[self].access_token assign[=] call[name[tokens]][constant[access_token]] name[self].refresh_token assign[=] call[name[tokens]][constant[refresh_token]] call[name[self]._set_access_credentials, parameter[]]
keyword[def] identifier[refresh] ( identifier[self] , identifier[force] = keyword[False] ): literal[string] keyword[if] identifier[self] . identifier[_is_token_expired] () keyword[or] identifier[force] : identifier[tokens] = identifier[self] . identifier[_get_refresh_access] () identifier[self] . identifier[access_token] = identifier[tokens] [ literal[string] ] identifier[self] . identifier[refresh_token] = identifier[tokens] [ literal[string] ] identifier[self] . identifier[_set_access_credentials] ()
def refresh(self, force=False): """Refreshes the `access_token` and sets the praw instance `reddit_client` with a valid one. :param force: Boolean. Refresh will be done only when last refresh was done before `EXPIRY_DURATION`, which is 3500 seconds. However passing `force` will overrides this and refresh operation will be done everytime. """ if self._is_token_expired() or force: tokens = self._get_refresh_access() self.access_token = tokens['access_token'] self.refresh_token = tokens['refresh_token'] self._set_access_credentials() # depends on [control=['if'], data=[]]
def next(self): """returns the next result :return: The next result. :rtype: dict :raises StopIteration: If no more result is left. """ if self._orderByPQ.size() > 0: targetRangeExContext = self._orderByPQ.pop() res = next(targetRangeExContext) try: """TODO: we can also use more_itertools.peekable to be more python friendly""" targetRangeExContext.peek() self._orderByPQ.push(targetRangeExContext) except StopIteration: pass return res raise StopIteration
def function[next, parameter[self]]: constant[returns the next result :return: The next result. :rtype: dict :raises StopIteration: If no more result is left. ] if compare[call[name[self]._orderByPQ.size, parameter[]] greater[>] constant[0]] begin[:] variable[targetRangeExContext] assign[=] call[name[self]._orderByPQ.pop, parameter[]] variable[res] assign[=] call[name[next], parameter[name[targetRangeExContext]]] <ast.Try object at 0x7da20c6a9180> return[name[res]] <ast.Raise object at 0x7da20c6aa890>
keyword[def] identifier[next] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_orderByPQ] . identifier[size] ()> literal[int] : identifier[targetRangeExContext] = identifier[self] . identifier[_orderByPQ] . identifier[pop] () identifier[res] = identifier[next] ( identifier[targetRangeExContext] ) keyword[try] : literal[string] identifier[targetRangeExContext] . identifier[peek] () identifier[self] . identifier[_orderByPQ] . identifier[push] ( identifier[targetRangeExContext] ) keyword[except] identifier[StopIteration] : keyword[pass] keyword[return] identifier[res] keyword[raise] identifier[StopIteration]
def next(self): """returns the next result :return: The next result. :rtype: dict :raises StopIteration: If no more result is left. """ if self._orderByPQ.size() > 0: targetRangeExContext = self._orderByPQ.pop() res = next(targetRangeExContext) try: 'TODO: we can also use more_itertools.peekable to be more python friendly' targetRangeExContext.peek() self._orderByPQ.push(targetRangeExContext) # depends on [control=['try'], data=[]] except StopIteration: pass # depends on [control=['except'], data=[]] return res # depends on [control=['if'], data=[]] raise StopIteration
def compile_results(self): """Compile all results for the current test """ self._init_dataframes() self.total_transactions = len(self.main_results['raw']) self._init_dates()
def function[compile_results, parameter[self]]: constant[Compile all results for the current test ] call[name[self]._init_dataframes, parameter[]] name[self].total_transactions assign[=] call[name[len], parameter[call[name[self].main_results][constant[raw]]]] call[name[self]._init_dates, parameter[]]
keyword[def] identifier[compile_results] ( identifier[self] ): literal[string] identifier[self] . identifier[_init_dataframes] () identifier[self] . identifier[total_transactions] = identifier[len] ( identifier[self] . identifier[main_results] [ literal[string] ]) identifier[self] . identifier[_init_dates] ()
def compile_results(self): """Compile all results for the current test """ self._init_dataframes() self.total_transactions = len(self.main_results['raw']) self._init_dates()
def _log(self, lvl, msg, type, args, kwargs): """ Internal method to filter into the formatter before being passed to the main Python logger """ extra = kwargs.get('extra', {}) extra.setdefault("fastlog-type", type) extra.setdefault("fastlog-indent", self._indent) kwargs['extra'] = extra self._lastlevel = lvl self.inner.log(lvl, msg, *args, **kwargs)
def function[_log, parameter[self, lvl, msg, type, args, kwargs]]: constant[ Internal method to filter into the formatter before being passed to the main Python logger ] variable[extra] assign[=] call[name[kwargs].get, parameter[constant[extra], dictionary[[], []]]] call[name[extra].setdefault, parameter[constant[fastlog-type], name[type]]] call[name[extra].setdefault, parameter[constant[fastlog-indent], name[self]._indent]] call[name[kwargs]][constant[extra]] assign[=] name[extra] name[self]._lastlevel assign[=] name[lvl] call[name[self].inner.log, parameter[name[lvl], name[msg], <ast.Starred object at 0x7da204344160>]]
keyword[def] identifier[_log] ( identifier[self] , identifier[lvl] , identifier[msg] , identifier[type] , identifier[args] , identifier[kwargs] ): literal[string] identifier[extra] = identifier[kwargs] . identifier[get] ( literal[string] ,{}) identifier[extra] . identifier[setdefault] ( literal[string] , identifier[type] ) identifier[extra] . identifier[setdefault] ( literal[string] , identifier[self] . identifier[_indent] ) identifier[kwargs] [ literal[string] ]= identifier[extra] identifier[self] . identifier[_lastlevel] = identifier[lvl] identifier[self] . identifier[inner] . identifier[log] ( identifier[lvl] , identifier[msg] ,* identifier[args] ,** identifier[kwargs] )
def _log(self, lvl, msg, type, args, kwargs): """ Internal method to filter into the formatter before being passed to the main Python logger """ extra = kwargs.get('extra', {}) extra.setdefault('fastlog-type', type) extra.setdefault('fastlog-indent', self._indent) kwargs['extra'] = extra self._lastlevel = lvl self.inner.log(lvl, msg, *args, **kwargs)
def count(self): """ Returns the number of actions associated with this button. :return <int> """ actions = self._actionGroup.actions() if len(actions) == 1 and actions[0].objectName() == 'place_holder': return 0 return len(actions)
def function[count, parameter[self]]: constant[ Returns the number of actions associated with this button. :return <int> ] variable[actions] assign[=] call[name[self]._actionGroup.actions, parameter[]] if <ast.BoolOp object at 0x7da18f09c0a0> begin[:] return[constant[0]] return[call[name[len], parameter[name[actions]]]]
keyword[def] identifier[count] ( identifier[self] ): literal[string] identifier[actions] = identifier[self] . identifier[_actionGroup] . identifier[actions] () keyword[if] identifier[len] ( identifier[actions] )== literal[int] keyword[and] identifier[actions] [ literal[int] ]. identifier[objectName] ()== literal[string] : keyword[return] literal[int] keyword[return] identifier[len] ( identifier[actions] )
def count(self): """ Returns the number of actions associated with this button. :return <int> """ actions = self._actionGroup.actions() if len(actions) == 1 and actions[0].objectName() == 'place_holder': return 0 # depends on [control=['if'], data=[]] return len(actions)
def columns(self): """List[:class:`~.external_config.BigtableColumn`]: Lists of columns that should be exposed as individual fields. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).bigtableOptions.columnFamilies.columns https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.bigtableOptions.columnFamilies.columns """ prop = self._properties.get("columns", []) return [BigtableColumn.from_api_repr(col) for col in prop]
def function[columns, parameter[self]]: constant[List[:class:`~.external_config.BigtableColumn`]: Lists of columns that should be exposed as individual fields. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).bigtableOptions.columnFamilies.columns https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.bigtableOptions.columnFamilies.columns ] variable[prop] assign[=] call[name[self]._properties.get, parameter[constant[columns], list[[]]]] return[<ast.ListComp object at 0x7da20e9b0a60>]
keyword[def] identifier[columns] ( identifier[self] ): literal[string] identifier[prop] = identifier[self] . identifier[_properties] . identifier[get] ( literal[string] ,[]) keyword[return] [ identifier[BigtableColumn] . identifier[from_api_repr] ( identifier[col] ) keyword[for] identifier[col] keyword[in] identifier[prop] ]
def columns(self): """List[:class:`~.external_config.BigtableColumn`]: Lists of columns that should be exposed as individual fields. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).bigtableOptions.columnFamilies.columns https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.bigtableOptions.columnFamilies.columns """ prop = self._properties.get('columns', []) return [BigtableColumn.from_api_repr(col) for col in prop]
def ascii_listing2program_dump(self, basic_program_ascii, program_start=None): """ convert a ASCII BASIC program listing into tokens. This tokens list can be used to insert it into the Emulator RAM. """ if program_start is None: program_start = self.DEFAULT_PROGRAM_START basic_lines = self.ascii_listing2basic_lines(basic_program_ascii, program_start) program_dump=self.listing.basic_lines2program_dump(basic_lines, program_start) assert isinstance(program_dump, bytearray), ( "is type: %s and not bytearray: %s" % (type(program_dump), repr(program_dump)) ) return program_dump
def function[ascii_listing2program_dump, parameter[self, basic_program_ascii, program_start]]: constant[ convert a ASCII BASIC program listing into tokens. This tokens list can be used to insert it into the Emulator RAM. ] if compare[name[program_start] is constant[None]] begin[:] variable[program_start] assign[=] name[self].DEFAULT_PROGRAM_START variable[basic_lines] assign[=] call[name[self].ascii_listing2basic_lines, parameter[name[basic_program_ascii], name[program_start]]] variable[program_dump] assign[=] call[name[self].listing.basic_lines2program_dump, parameter[name[basic_lines], name[program_start]]] assert[call[name[isinstance], parameter[name[program_dump], name[bytearray]]]] return[name[program_dump]]
keyword[def] identifier[ascii_listing2program_dump] ( identifier[self] , identifier[basic_program_ascii] , identifier[program_start] = keyword[None] ): literal[string] keyword[if] identifier[program_start] keyword[is] keyword[None] : identifier[program_start] = identifier[self] . identifier[DEFAULT_PROGRAM_START] identifier[basic_lines] = identifier[self] . identifier[ascii_listing2basic_lines] ( identifier[basic_program_ascii] , identifier[program_start] ) identifier[program_dump] = identifier[self] . identifier[listing] . identifier[basic_lines2program_dump] ( identifier[basic_lines] , identifier[program_start] ) keyword[assert] identifier[isinstance] ( identifier[program_dump] , identifier[bytearray] ),( literal[string] %( identifier[type] ( identifier[program_dump] ), identifier[repr] ( identifier[program_dump] )) ) keyword[return] identifier[program_dump]
def ascii_listing2program_dump(self, basic_program_ascii, program_start=None): """ convert a ASCII BASIC program listing into tokens. This tokens list can be used to insert it into the Emulator RAM. """ if program_start is None: program_start = self.DEFAULT_PROGRAM_START # depends on [control=['if'], data=['program_start']] basic_lines = self.ascii_listing2basic_lines(basic_program_ascii, program_start) program_dump = self.listing.basic_lines2program_dump(basic_lines, program_start) assert isinstance(program_dump, bytearray), 'is type: %s and not bytearray: %s' % (type(program_dump), repr(program_dump)) return program_dump
def _remove_non_serializable_store_entries(store: dict): """ This function is called if there are non-serializable items in the global script storage. This function removes all such items. """ removed_key_list = [] for key, value in store.items(): if not (_is_serializable(key) and _is_serializable(value)): _logger.info("Remove non-serializable item from the global script store. Key: '{}', Value: '{}'. " "This item cannot be saved and therefore will be lost.".format(key, value)) removed_key_list.append(key) for key in removed_key_list: del store[key]
def function[_remove_non_serializable_store_entries, parameter[store]]: constant[ This function is called if there are non-serializable items in the global script storage. This function removes all such items. ] variable[removed_key_list] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da18dc98c10>, <ast.Name object at 0x7da18dc98b80>]]] in starred[call[name[store].items, parameter[]]] begin[:] if <ast.UnaryOp object at 0x7da18dc9b6a0> begin[:] call[name[_logger].info, parameter[call[constant[Remove non-serializable item from the global script store. Key: '{}', Value: '{}'. This item cannot be saved and therefore will be lost.].format, parameter[name[key], name[value]]]]] call[name[removed_key_list].append, parameter[name[key]]] for taget[name[key]] in starred[name[removed_key_list]] begin[:] <ast.Delete object at 0x7da204566a70>
keyword[def] identifier[_remove_non_serializable_store_entries] ( identifier[store] : identifier[dict] ): literal[string] identifier[removed_key_list] =[] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[store] . identifier[items] (): keyword[if] keyword[not] ( identifier[_is_serializable] ( identifier[key] ) keyword[and] identifier[_is_serializable] ( identifier[value] )): identifier[_logger] . identifier[info] ( literal[string] literal[string] . identifier[format] ( identifier[key] , identifier[value] )) identifier[removed_key_list] . identifier[append] ( identifier[key] ) keyword[for] identifier[key] keyword[in] identifier[removed_key_list] : keyword[del] identifier[store] [ identifier[key] ]
def _remove_non_serializable_store_entries(store: dict): """ This function is called if there are non-serializable items in the global script storage. This function removes all such items. """ removed_key_list = [] for (key, value) in store.items(): if not (_is_serializable(key) and _is_serializable(value)): _logger.info("Remove non-serializable item from the global script store. Key: '{}', Value: '{}'. This item cannot be saved and therefore will be lost.".format(key, value)) removed_key_list.append(key) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] for key in removed_key_list: del store[key] # depends on [control=['for'], data=['key']]
def relaxNGNewParserCtxt(URL): """Create an XML RelaxNGs parse context for that file/resource expected to contain an XML RelaxNGs file. """ ret = libxml2mod.xmlRelaxNGNewParserCtxt(URL) if ret is None:raise parserError('xmlRelaxNGNewParserCtxt() failed') return relaxNgParserCtxt(_obj=ret)
def function[relaxNGNewParserCtxt, parameter[URL]]: constant[Create an XML RelaxNGs parse context for that file/resource expected to contain an XML RelaxNGs file. ] variable[ret] assign[=] call[name[libxml2mod].xmlRelaxNGNewParserCtxt, parameter[name[URL]]] if compare[name[ret] is constant[None]] begin[:] <ast.Raise object at 0x7da1b1fa6140> return[call[name[relaxNgParserCtxt], parameter[]]]
keyword[def] identifier[relaxNGNewParserCtxt] ( identifier[URL] ): literal[string] identifier[ret] = identifier[libxml2mod] . identifier[xmlRelaxNGNewParserCtxt] ( identifier[URL] ) keyword[if] identifier[ret] keyword[is] keyword[None] : keyword[raise] identifier[parserError] ( literal[string] ) keyword[return] identifier[relaxNgParserCtxt] ( identifier[_obj] = identifier[ret] )
def relaxNGNewParserCtxt(URL): """Create an XML RelaxNGs parse context for that file/resource expected to contain an XML RelaxNGs file. """ ret = libxml2mod.xmlRelaxNGNewParserCtxt(URL) if ret is None: raise parserError('xmlRelaxNGNewParserCtxt() failed') # depends on [control=['if'], data=[]] return relaxNgParserCtxt(_obj=ret)
def handle_offchain_secretreveal( mediator_state: MediatorTransferState, mediator_state_change: ReceiveSecretReveal, channelidentifiers_to_channels: ChannelMap, pseudo_random_generator: random.Random, block_number: BlockNumber, block_hash: BlockHash, ) -> TransitionResult[MediatorTransferState]: """ Handles the secret reveal and sends SendBalanceProof/RevealSecret if necessary. """ is_valid_reveal = is_valid_secret_reveal( state_change=mediator_state_change, transfer_secrethash=mediator_state.secrethash, secret=mediator_state_change.secret, ) is_secret_unknown = mediator_state.secret is None # a SecretReveal should be rejected if the payer transfer # has expired. To check for this, we use the last # transfer pair. transfer_pair = mediator_state.transfers_pair[-1] payer_transfer = transfer_pair.payer_transfer channel_identifier = payer_transfer.balance_proof.channel_identifier payer_channel = channelidentifiers_to_channels.get(channel_identifier) if not payer_channel: return TransitionResult(mediator_state, list()) has_payer_transfer_expired = channel.is_transfer_expired( transfer=transfer_pair.payer_transfer, affected_channel=payer_channel, block_number=block_number, ) if is_secret_unknown and is_valid_reveal and not has_payer_transfer_expired: iteration = secret_learned( state=mediator_state, channelidentifiers_to_channels=channelidentifiers_to_channels, pseudo_random_generator=pseudo_random_generator, block_number=block_number, block_hash=block_hash, secret=mediator_state_change.secret, secrethash=mediator_state_change.secrethash, payee_address=mediator_state_change.sender, ) else: iteration = TransitionResult(mediator_state, list()) return iteration
def function[handle_offchain_secretreveal, parameter[mediator_state, mediator_state_change, channelidentifiers_to_channels, pseudo_random_generator, block_number, block_hash]]: constant[ Handles the secret reveal and sends SendBalanceProof/RevealSecret if necessary. ] variable[is_valid_reveal] assign[=] call[name[is_valid_secret_reveal], parameter[]] variable[is_secret_unknown] assign[=] compare[name[mediator_state].secret is constant[None]] variable[transfer_pair] assign[=] call[name[mediator_state].transfers_pair][<ast.UnaryOp object at 0x7da1b19d9480>] variable[payer_transfer] assign[=] name[transfer_pair].payer_transfer variable[channel_identifier] assign[=] name[payer_transfer].balance_proof.channel_identifier variable[payer_channel] assign[=] call[name[channelidentifiers_to_channels].get, parameter[name[channel_identifier]]] if <ast.UnaryOp object at 0x7da1b19d8460> begin[:] return[call[name[TransitionResult], parameter[name[mediator_state], call[name[list], parameter[]]]]] variable[has_payer_transfer_expired] assign[=] call[name[channel].is_transfer_expired, parameter[]] if <ast.BoolOp object at 0x7da1b19dbac0> begin[:] variable[iteration] assign[=] call[name[secret_learned], parameter[]] return[name[iteration]]
keyword[def] identifier[handle_offchain_secretreveal] ( identifier[mediator_state] : identifier[MediatorTransferState] , identifier[mediator_state_change] : identifier[ReceiveSecretReveal] , identifier[channelidentifiers_to_channels] : identifier[ChannelMap] , identifier[pseudo_random_generator] : identifier[random] . identifier[Random] , identifier[block_number] : identifier[BlockNumber] , identifier[block_hash] : identifier[BlockHash] , )-> identifier[TransitionResult] [ identifier[MediatorTransferState] ]: literal[string] identifier[is_valid_reveal] = identifier[is_valid_secret_reveal] ( identifier[state_change] = identifier[mediator_state_change] , identifier[transfer_secrethash] = identifier[mediator_state] . identifier[secrethash] , identifier[secret] = identifier[mediator_state_change] . identifier[secret] , ) identifier[is_secret_unknown] = identifier[mediator_state] . identifier[secret] keyword[is] keyword[None] identifier[transfer_pair] = identifier[mediator_state] . identifier[transfers_pair] [- literal[int] ] identifier[payer_transfer] = identifier[transfer_pair] . identifier[payer_transfer] identifier[channel_identifier] = identifier[payer_transfer] . identifier[balance_proof] . identifier[channel_identifier] identifier[payer_channel] = identifier[channelidentifiers_to_channels] . identifier[get] ( identifier[channel_identifier] ) keyword[if] keyword[not] identifier[payer_channel] : keyword[return] identifier[TransitionResult] ( identifier[mediator_state] , identifier[list] ()) identifier[has_payer_transfer_expired] = identifier[channel] . identifier[is_transfer_expired] ( identifier[transfer] = identifier[transfer_pair] . identifier[payer_transfer] , identifier[affected_channel] = identifier[payer_channel] , identifier[block_number] = identifier[block_number] , ) keyword[if] identifier[is_secret_unknown] keyword[and] identifier[is_valid_reveal] keyword[and] keyword[not] identifier[has_payer_transfer_expired] : identifier[iteration] = identifier[secret_learned] ( identifier[state] = identifier[mediator_state] , identifier[channelidentifiers_to_channels] = identifier[channelidentifiers_to_channels] , identifier[pseudo_random_generator] = identifier[pseudo_random_generator] , identifier[block_number] = identifier[block_number] , identifier[block_hash] = identifier[block_hash] , identifier[secret] = identifier[mediator_state_change] . identifier[secret] , identifier[secrethash] = identifier[mediator_state_change] . identifier[secrethash] , identifier[payee_address] = identifier[mediator_state_change] . identifier[sender] , ) keyword[else] : identifier[iteration] = identifier[TransitionResult] ( identifier[mediator_state] , identifier[list] ()) keyword[return] identifier[iteration]
def handle_offchain_secretreveal(mediator_state: MediatorTransferState, mediator_state_change: ReceiveSecretReveal, channelidentifiers_to_channels: ChannelMap, pseudo_random_generator: random.Random, block_number: BlockNumber, block_hash: BlockHash) -> TransitionResult[MediatorTransferState]: """ Handles the secret reveal and sends SendBalanceProof/RevealSecret if necessary. """ is_valid_reveal = is_valid_secret_reveal(state_change=mediator_state_change, transfer_secrethash=mediator_state.secrethash, secret=mediator_state_change.secret) is_secret_unknown = mediator_state.secret is None # a SecretReveal should be rejected if the payer transfer # has expired. To check for this, we use the last # transfer pair. transfer_pair = mediator_state.transfers_pair[-1] payer_transfer = transfer_pair.payer_transfer channel_identifier = payer_transfer.balance_proof.channel_identifier payer_channel = channelidentifiers_to_channels.get(channel_identifier) if not payer_channel: return TransitionResult(mediator_state, list()) # depends on [control=['if'], data=[]] has_payer_transfer_expired = channel.is_transfer_expired(transfer=transfer_pair.payer_transfer, affected_channel=payer_channel, block_number=block_number) if is_secret_unknown and is_valid_reveal and (not has_payer_transfer_expired): iteration = secret_learned(state=mediator_state, channelidentifiers_to_channels=channelidentifiers_to_channels, pseudo_random_generator=pseudo_random_generator, block_number=block_number, block_hash=block_hash, secret=mediator_state_change.secret, secrethash=mediator_state_change.secrethash, payee_address=mediator_state_change.sender) # depends on [control=['if'], data=[]] else: iteration = TransitionResult(mediator_state, list()) return iteration
def get_k8s_metadata(): """Get kubernetes container metadata, as on GCP GKE.""" k8s_metadata = {} gcp_cluster = (gcp_metadata_config.GcpMetadataConfig .get_attribute(gcp_metadata_config.CLUSTER_NAME_KEY)) if gcp_cluster is not None: k8s_metadata[CLUSTER_NAME_KEY] = gcp_cluster for attribute_key, attribute_env in _K8S_ENV_ATTRIBUTES.items(): attribute_value = os.environ.get(attribute_env) if attribute_value is not None: k8s_metadata[attribute_key] = attribute_value return k8s_metadata
def function[get_k8s_metadata, parameter[]]: constant[Get kubernetes container metadata, as on GCP GKE.] variable[k8s_metadata] assign[=] dictionary[[], []] variable[gcp_cluster] assign[=] call[name[gcp_metadata_config].GcpMetadataConfig.get_attribute, parameter[name[gcp_metadata_config].CLUSTER_NAME_KEY]] if compare[name[gcp_cluster] is_not constant[None]] begin[:] call[name[k8s_metadata]][name[CLUSTER_NAME_KEY]] assign[=] name[gcp_cluster] for taget[tuple[[<ast.Name object at 0x7da20e956a40>, <ast.Name object at 0x7da20e957280>]]] in starred[call[name[_K8S_ENV_ATTRIBUTES].items, parameter[]]] begin[:] variable[attribute_value] assign[=] call[name[os].environ.get, parameter[name[attribute_env]]] if compare[name[attribute_value] is_not constant[None]] begin[:] call[name[k8s_metadata]][name[attribute_key]] assign[=] name[attribute_value] return[name[k8s_metadata]]
keyword[def] identifier[get_k8s_metadata] (): literal[string] identifier[k8s_metadata] ={} identifier[gcp_cluster] =( identifier[gcp_metadata_config] . identifier[GcpMetadataConfig] . identifier[get_attribute] ( identifier[gcp_metadata_config] . identifier[CLUSTER_NAME_KEY] )) keyword[if] identifier[gcp_cluster] keyword[is] keyword[not] keyword[None] : identifier[k8s_metadata] [ identifier[CLUSTER_NAME_KEY] ]= identifier[gcp_cluster] keyword[for] identifier[attribute_key] , identifier[attribute_env] keyword[in] identifier[_K8S_ENV_ATTRIBUTES] . identifier[items] (): identifier[attribute_value] = identifier[os] . identifier[environ] . identifier[get] ( identifier[attribute_env] ) keyword[if] identifier[attribute_value] keyword[is] keyword[not] keyword[None] : identifier[k8s_metadata] [ identifier[attribute_key] ]= identifier[attribute_value] keyword[return] identifier[k8s_metadata]
def get_k8s_metadata(): """Get kubernetes container metadata, as on GCP GKE.""" k8s_metadata = {} gcp_cluster = gcp_metadata_config.GcpMetadataConfig.get_attribute(gcp_metadata_config.CLUSTER_NAME_KEY) if gcp_cluster is not None: k8s_metadata[CLUSTER_NAME_KEY] = gcp_cluster # depends on [control=['if'], data=['gcp_cluster']] for (attribute_key, attribute_env) in _K8S_ENV_ATTRIBUTES.items(): attribute_value = os.environ.get(attribute_env) if attribute_value is not None: k8s_metadata[attribute_key] = attribute_value # depends on [control=['if'], data=['attribute_value']] # depends on [control=['for'], data=[]] return k8s_metadata
def get_comment_lookup_session_for_book(self, book_id, proxy): """Gets the ``OsidSession`` associated with the comment lookup service for the given book. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` arg: proxy (osid.proxy.Proxy): a proxy return: (osid.commenting.CommentLookupSession) - a ``CommentLookupSession`` raise: NotFound - no ``Book`` found by the given ``Id`` raise: NullArgument - ``book_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_lookup()`` and ``supports_visible_federation()`` are ``true``* """ if not self.supports_comment_lookup(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.CommentLookupSession(book_id, proxy, self._runtime)
def function[get_comment_lookup_session_for_book, parameter[self, book_id, proxy]]: constant[Gets the ``OsidSession`` associated with the comment lookup service for the given book. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` arg: proxy (osid.proxy.Proxy): a proxy return: (osid.commenting.CommentLookupSession) - a ``CommentLookupSession`` raise: NotFound - no ``Book`` found by the given ``Id`` raise: NullArgument - ``book_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_lookup()`` and ``supports_visible_federation()`` are ``true``* ] if <ast.UnaryOp object at 0x7da20c6aafb0> begin[:] <ast.Raise object at 0x7da18dc04160> return[call[name[sessions].CommentLookupSession, parameter[name[book_id], name[proxy], name[self]._runtime]]]
keyword[def] identifier[get_comment_lookup_session_for_book] ( identifier[self] , identifier[book_id] , identifier[proxy] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[supports_comment_lookup] (): keyword[raise] identifier[errors] . identifier[Unimplemented] () keyword[return] identifier[sessions] . identifier[CommentLookupSession] ( identifier[book_id] , identifier[proxy] , identifier[self] . identifier[_runtime] )
def get_comment_lookup_session_for_book(self, book_id, proxy): """Gets the ``OsidSession`` associated with the comment lookup service for the given book. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` arg: proxy (osid.proxy.Proxy): a proxy return: (osid.commenting.CommentLookupSession) - a ``CommentLookupSession`` raise: NotFound - no ``Book`` found by the given ``Id`` raise: NullArgument - ``book_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_lookup()`` and ``supports_visible_federation()`` are ``true``* """ if not self.supports_comment_lookup(): raise errors.Unimplemented() # depends on [control=['if'], data=[]] ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.CommentLookupSession(book_id, proxy, self._runtime)
def _decode(self, obj, context): """ Get the python representation of the obj """ return b''.join(map(int2byte, [c + 0x60 for c in bytearray(obj)])).decode("utf8")
def function[_decode, parameter[self, obj, context]]: constant[ Get the python representation of the obj ] return[call[call[constant[b''].join, parameter[call[name[map], parameter[name[int2byte], <ast.ListComp object at 0x7da2047e8d30>]]]].decode, parameter[constant[utf8]]]]
keyword[def] identifier[_decode] ( identifier[self] , identifier[obj] , identifier[context] ): literal[string] keyword[return] literal[string] . identifier[join] ( identifier[map] ( identifier[int2byte] ,[ identifier[c] + literal[int] keyword[for] identifier[c] keyword[in] identifier[bytearray] ( identifier[obj] )])). identifier[decode] ( literal[string] )
def _decode(self, obj, context): """ Get the python representation of the obj """ return b''.join(map(int2byte, [c + 96 for c in bytearray(obj)])).decode('utf8')
def transform(self): # type: () -> _worker.Response """Take a request with input data, deserialize it, make a prediction, and return a serialized response. Returns: sagemaker_containers.beta.framework.worker.Response: a Flask response object with the following args: * response: the serialized data to return * accept: the content type that the data was serialized into """ request = _worker.Request() result = self._transform_fn(self._model, request.content, request.content_type, request.accept) if isinstance(result, tuple): # transforms tuple in Response for backwards compatibility return _worker.Response(response=result[0], mimetype=result[1]) return result
def function[transform, parameter[self]]: constant[Take a request with input data, deserialize it, make a prediction, and return a serialized response. Returns: sagemaker_containers.beta.framework.worker.Response: a Flask response object with the following args: * response: the serialized data to return * accept: the content type that the data was serialized into ] variable[request] assign[=] call[name[_worker].Request, parameter[]] variable[result] assign[=] call[name[self]._transform_fn, parameter[name[self]._model, name[request].content, name[request].content_type, name[request].accept]] if call[name[isinstance], parameter[name[result], name[tuple]]] begin[:] return[call[name[_worker].Response, parameter[]]] return[name[result]]
keyword[def] identifier[transform] ( identifier[self] ): literal[string] identifier[request] = identifier[_worker] . identifier[Request] () identifier[result] = identifier[self] . identifier[_transform_fn] ( identifier[self] . identifier[_model] , identifier[request] . identifier[content] , identifier[request] . identifier[content_type] , identifier[request] . identifier[accept] ) keyword[if] identifier[isinstance] ( identifier[result] , identifier[tuple] ): keyword[return] identifier[_worker] . identifier[Response] ( identifier[response] = identifier[result] [ literal[int] ], identifier[mimetype] = identifier[result] [ literal[int] ]) keyword[return] identifier[result]
def transform(self): # type: () -> _worker.Response 'Take a request with input data, deserialize it, make a prediction, and return a\n serialized response.\n\n Returns:\n sagemaker_containers.beta.framework.worker.Response: a Flask response object with\n the following args:\n\n * response: the serialized data to return\n * accept: the content type that the data was serialized into\n ' request = _worker.Request() result = self._transform_fn(self._model, request.content, request.content_type, request.accept) if isinstance(result, tuple): # transforms tuple in Response for backwards compatibility return _worker.Response(response=result[0], mimetype=result[1]) # depends on [control=['if'], data=[]] return result
def _handle_error(self, data, params): """Handle an error response from the SABnzbd API""" error = data.get('error', 'API call failed') mode = params.get('mode') raise SabnzbdApiException(error, mode=mode)
def function[_handle_error, parameter[self, data, params]]: constant[Handle an error response from the SABnzbd API] variable[error] assign[=] call[name[data].get, parameter[constant[error], constant[API call failed]]] variable[mode] assign[=] call[name[params].get, parameter[constant[mode]]] <ast.Raise object at 0x7da18dc05630>
keyword[def] identifier[_handle_error] ( identifier[self] , identifier[data] , identifier[params] ): literal[string] identifier[error] = identifier[data] . identifier[get] ( literal[string] , literal[string] ) identifier[mode] = identifier[params] . identifier[get] ( literal[string] ) keyword[raise] identifier[SabnzbdApiException] ( identifier[error] , identifier[mode] = identifier[mode] )
def _handle_error(self, data, params): """Handle an error response from the SABnzbd API""" error = data.get('error', 'API call failed') mode = params.get('mode') raise SabnzbdApiException(error, mode=mode)
async def verify_parent_task(chain, link): """Verify the parent task Link. Action task verification is currently in the same verification function as decision tasks, because sometimes we'll have an action task masquerading as a decision task, e.g. in templatized actions for release graphs. To make sure our guess of decision or action task isn't fatal, we call this function; this function uses ``is_action()`` to determine how to verify the task. Args: chain (ChainOfTrust): the chain we're operating on. link (LinkOfTrust): the task link we're checking. Raises: CoTError: on chain of trust verification error. """ worker_type = get_worker_type(link.task) if worker_type not in chain.context.config['valid_decision_worker_types']: raise CoTError("{} is not a valid decision workerType!".format(worker_type)) if chain is not link: # make sure all tasks generated from this parent task match the published # task-graph.json. Not applicable if this link is the ChainOfTrust object, # since this task won't have generated a task-graph.json yet. path = link.get_artifact_full_path('public/task-graph.json') if not os.path.exists(path): raise CoTError("{} {}: {} doesn't exist!".format(link.name, link.task_id, path)) link.task_graph = load_json_or_yaml( path, is_path=True, exception=CoTError, message="Can't load {}! %(exc)s".format(path) ) # This check may want to move to a per-task check? for target_link in chain.get_all_links_in_chain(): # Verify the target's task is in the parent task's task graph, unless # it's this task or a parent task. # (Decision tasks will not exist in a parent task's task-graph.json; # action tasks, which are generated later, will also be missing.) # https://github.com/mozilla-releng/scriptworker/issues/77 if target_link.parent_task_id == link.task_id and \ target_link.task_id != link.task_id and \ target_link.task_type not in PARENT_TASK_TYPES: verify_link_in_task_graph(chain, link, target_link) try: await verify_parent_task_definition(chain, link) except (BaseDownloadError, KeyError) as e: raise CoTError(e)
<ast.AsyncFunctionDef object at 0x7da204567a00>
keyword[async] keyword[def] identifier[verify_parent_task] ( identifier[chain] , identifier[link] ): literal[string] identifier[worker_type] = identifier[get_worker_type] ( identifier[link] . identifier[task] ) keyword[if] identifier[worker_type] keyword[not] keyword[in] identifier[chain] . identifier[context] . identifier[config] [ literal[string] ]: keyword[raise] identifier[CoTError] ( literal[string] . identifier[format] ( identifier[worker_type] )) keyword[if] identifier[chain] keyword[is] keyword[not] identifier[link] : identifier[path] = identifier[link] . identifier[get_artifact_full_path] ( literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ): keyword[raise] identifier[CoTError] ( literal[string] . identifier[format] ( identifier[link] . identifier[name] , identifier[link] . identifier[task_id] , identifier[path] )) identifier[link] . identifier[task_graph] = identifier[load_json_or_yaml] ( identifier[path] , identifier[is_path] = keyword[True] , identifier[exception] = identifier[CoTError] , identifier[message] = literal[string] . identifier[format] ( identifier[path] ) ) keyword[for] identifier[target_link] keyword[in] identifier[chain] . identifier[get_all_links_in_chain] (): keyword[if] identifier[target_link] . identifier[parent_task_id] == identifier[link] . identifier[task_id] keyword[and] identifier[target_link] . identifier[task_id] != identifier[link] . identifier[task_id] keyword[and] identifier[target_link] . identifier[task_type] keyword[not] keyword[in] identifier[PARENT_TASK_TYPES] : identifier[verify_link_in_task_graph] ( identifier[chain] , identifier[link] , identifier[target_link] ) keyword[try] : keyword[await] identifier[verify_parent_task_definition] ( identifier[chain] , identifier[link] ) keyword[except] ( identifier[BaseDownloadError] , identifier[KeyError] ) keyword[as] identifier[e] : keyword[raise] identifier[CoTError] ( identifier[e] )
async def verify_parent_task(chain, link): """Verify the parent task Link. Action task verification is currently in the same verification function as decision tasks, because sometimes we'll have an action task masquerading as a decision task, e.g. in templatized actions for release graphs. To make sure our guess of decision or action task isn't fatal, we call this function; this function uses ``is_action()`` to determine how to verify the task. Args: chain (ChainOfTrust): the chain we're operating on. link (LinkOfTrust): the task link we're checking. Raises: CoTError: on chain of trust verification error. """ worker_type = get_worker_type(link.task) if worker_type not in chain.context.config['valid_decision_worker_types']: raise CoTError('{} is not a valid decision workerType!'.format(worker_type)) # depends on [control=['if'], data=['worker_type']] if chain is not link: # make sure all tasks generated from this parent task match the published # task-graph.json. Not applicable if this link is the ChainOfTrust object, # since this task won't have generated a task-graph.json yet. path = link.get_artifact_full_path('public/task-graph.json') if not os.path.exists(path): raise CoTError("{} {}: {} doesn't exist!".format(link.name, link.task_id, path)) # depends on [control=['if'], data=[]] link.task_graph = load_json_or_yaml(path, is_path=True, exception=CoTError, message="Can't load {}! %(exc)s".format(path)) # This check may want to move to a per-task check? for target_link in chain.get_all_links_in_chain(): # Verify the target's task is in the parent task's task graph, unless # it's this task or a parent task. # (Decision tasks will not exist in a parent task's task-graph.json; # action tasks, which are generated later, will also be missing.) # https://github.com/mozilla-releng/scriptworker/issues/77 if target_link.parent_task_id == link.task_id and target_link.task_id != link.task_id and (target_link.task_type not in PARENT_TASK_TYPES): verify_link_in_task_graph(chain, link, target_link) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['target_link']] # depends on [control=['if'], data=['chain', 'link']] try: await verify_parent_task_definition(chain, link) # depends on [control=['try'], data=[]] except (BaseDownloadError, KeyError) as e: raise CoTError(e) # depends on [control=['except'], data=['e']]
def normalize(self): """ Makes sure this egg distribution is stored only as an egg file. The egg file will be created from another existing distribution format if needed. """ if self.has_egg_file(): if self.has_zip(): self.__remove_zip() else: if self.has_egg_folder(): if not self.has_zip(): self.__zip_egg_folder() self.__remove_egg_folder() self.__rename_zip_to_egg()
def function[normalize, parameter[self]]: constant[ Makes sure this egg distribution is stored only as an egg file. The egg file will be created from another existing distribution format if needed. ] if call[name[self].has_egg_file, parameter[]] begin[:] if call[name[self].has_zip, parameter[]] begin[:] call[name[self].__remove_zip, parameter[]]
keyword[def] identifier[normalize] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[has_egg_file] (): keyword[if] identifier[self] . identifier[has_zip] (): identifier[self] . identifier[__remove_zip] () keyword[else] : keyword[if] identifier[self] . identifier[has_egg_folder] (): keyword[if] keyword[not] identifier[self] . identifier[has_zip] (): identifier[self] . identifier[__zip_egg_folder] () identifier[self] . identifier[__remove_egg_folder] () identifier[self] . identifier[__rename_zip_to_egg] ()
def normalize(self): """ Makes sure this egg distribution is stored only as an egg file. The egg file will be created from another existing distribution format if needed. """ if self.has_egg_file(): if self.has_zip(): self.__remove_zip() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: if self.has_egg_folder(): if not self.has_zip(): self.__zip_egg_folder() # depends on [control=['if'], data=[]] self.__remove_egg_folder() # depends on [control=['if'], data=[]] self.__rename_zip_to_egg()
def get_choice_selected_value(self): """ Returns the default selection from a choice menu Throws an error if this is not a choice parameter. """ if 'choiceInfo' not in self.dto[self.name]: raise GPException('not a choice parameter') choice_info_dto = self.dto[self.name]['choiceInfo'] if 'selectedValue' in choice_info_dto: return self.dto[self.name]['choiceInfo']['selectedValue'] else: return None
def function[get_choice_selected_value, parameter[self]]: constant[ Returns the default selection from a choice menu Throws an error if this is not a choice parameter. ] if compare[constant[choiceInfo] <ast.NotIn object at 0x7da2590d7190> call[name[self].dto][name[self].name]] begin[:] <ast.Raise object at 0x7da2041dbc10> variable[choice_info_dto] assign[=] call[call[name[self].dto][name[self].name]][constant[choiceInfo]] if compare[constant[selectedValue] in name[choice_info_dto]] begin[:] return[call[call[call[name[self].dto][name[self].name]][constant[choiceInfo]]][constant[selectedValue]]]
keyword[def] identifier[get_choice_selected_value] ( identifier[self] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[dto] [ identifier[self] . identifier[name] ]: keyword[raise] identifier[GPException] ( literal[string] ) identifier[choice_info_dto] = identifier[self] . identifier[dto] [ identifier[self] . identifier[name] ][ literal[string] ] keyword[if] literal[string] keyword[in] identifier[choice_info_dto] : keyword[return] identifier[self] . identifier[dto] [ identifier[self] . identifier[name] ][ literal[string] ][ literal[string] ] keyword[else] : keyword[return] keyword[None]
def get_choice_selected_value(self): """ Returns the default selection from a choice menu Throws an error if this is not a choice parameter. """ if 'choiceInfo' not in self.dto[self.name]: raise GPException('not a choice parameter') # depends on [control=['if'], data=[]] choice_info_dto = self.dto[self.name]['choiceInfo'] if 'selectedValue' in choice_info_dto: return self.dto[self.name]['choiceInfo']['selectedValue'] # depends on [control=['if'], data=[]] else: return None
def _generate_sequences(self, primary_label, secondary_label, ngrams): """Generates aligned sequences between each witness labelled `primary_label` and each witness labelled `secondary_label`, based around `ngrams`. :param primary_label: label for one side of the pairs of witnesses to align :type primary_label: `str` :param secondary_label: label for the other side of the pairs of witnesses to align :type secondary_label: `str` :param ngrams: n-grams to base sequences off :type ngrams: `list` of `str` """ cols = [constants.WORK_FIELDNAME, constants.SIGLUM_FIELDNAME] primary_works = self._matches[self._matches[ constants.LABEL_FIELDNAME] == primary_label][ cols].drop_duplicates() secondary_works = self._matches[self._matches[ constants.LABEL_FIELDNAME] == secondary_label][ cols].drop_duplicates() for index, (work1, siglum1) in primary_works.iterrows(): text1 = self._get_text(self._corpus.get_witness(work1, siglum1)) label1 = '{}_{}'.format(work1, siglum1) for index, (work2, siglum2) in secondary_works.iterrows(): text2 = self._get_text(self._corpus.get_witness( work2, siglum2)) label2 = '{}_{}'.format(work2, siglum2) self._generate_sequences_for_texts(label1, text1, label2, text2, ngrams)
def function[_generate_sequences, parameter[self, primary_label, secondary_label, ngrams]]: constant[Generates aligned sequences between each witness labelled `primary_label` and each witness labelled `secondary_label`, based around `ngrams`. :param primary_label: label for one side of the pairs of witnesses to align :type primary_label: `str` :param secondary_label: label for the other side of the pairs of witnesses to align :type secondary_label: `str` :param ngrams: n-grams to base sequences off :type ngrams: `list` of `str` ] variable[cols] assign[=] list[[<ast.Attribute object at 0x7da1b19cf580>, <ast.Attribute object at 0x7da1b19cf670>]] variable[primary_works] assign[=] call[call[call[name[self]._matches][compare[call[name[self]._matches][name[constants].LABEL_FIELDNAME] equal[==] name[primary_label]]]][name[cols]].drop_duplicates, parameter[]] variable[secondary_works] assign[=] call[call[call[name[self]._matches][compare[call[name[self]._matches][name[constants].LABEL_FIELDNAME] equal[==] name[secondary_label]]]][name[cols]].drop_duplicates, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b19cefb0>, <ast.Tuple object at 0x7da1b19cd090>]]] in starred[call[name[primary_works].iterrows, parameter[]]] begin[:] variable[text1] assign[=] call[name[self]._get_text, parameter[call[name[self]._corpus.get_witness, parameter[name[work1], name[siglum1]]]]] variable[label1] assign[=] call[constant[{}_{}].format, parameter[name[work1], name[siglum1]]] for taget[tuple[[<ast.Name object at 0x7da1b19ceb00>, <ast.Tuple object at 0x7da1b19cf5e0>]]] in starred[call[name[secondary_works].iterrows, parameter[]]] begin[:] variable[text2] assign[=] call[name[self]._get_text, parameter[call[name[self]._corpus.get_witness, parameter[name[work2], name[siglum2]]]]] variable[label2] assign[=] call[constant[{}_{}].format, parameter[name[work2], name[siglum2]]] call[name[self]._generate_sequences_for_texts, parameter[name[label1], name[text1], name[label2], name[text2], name[ngrams]]]
keyword[def] identifier[_generate_sequences] ( identifier[self] , identifier[primary_label] , identifier[secondary_label] , identifier[ngrams] ): literal[string] identifier[cols] =[ identifier[constants] . identifier[WORK_FIELDNAME] , identifier[constants] . identifier[SIGLUM_FIELDNAME] ] identifier[primary_works] = identifier[self] . identifier[_matches] [ identifier[self] . identifier[_matches] [ identifier[constants] . identifier[LABEL_FIELDNAME] ]== identifier[primary_label] ][ identifier[cols] ]. identifier[drop_duplicates] () identifier[secondary_works] = identifier[self] . identifier[_matches] [ identifier[self] . identifier[_matches] [ identifier[constants] . identifier[LABEL_FIELDNAME] ]== identifier[secondary_label] ][ identifier[cols] ]. identifier[drop_duplicates] () keyword[for] identifier[index] ,( identifier[work1] , identifier[siglum1] ) keyword[in] identifier[primary_works] . identifier[iterrows] (): identifier[text1] = identifier[self] . identifier[_get_text] ( identifier[self] . identifier[_corpus] . identifier[get_witness] ( identifier[work1] , identifier[siglum1] )) identifier[label1] = literal[string] . identifier[format] ( identifier[work1] , identifier[siglum1] ) keyword[for] identifier[index] ,( identifier[work2] , identifier[siglum2] ) keyword[in] identifier[secondary_works] . identifier[iterrows] (): identifier[text2] = identifier[self] . identifier[_get_text] ( identifier[self] . identifier[_corpus] . identifier[get_witness] ( identifier[work2] , identifier[siglum2] )) identifier[label2] = literal[string] . identifier[format] ( identifier[work2] , identifier[siglum2] ) identifier[self] . identifier[_generate_sequences_for_texts] ( identifier[label1] , identifier[text1] , identifier[label2] , identifier[text2] , identifier[ngrams] )
def _generate_sequences(self, primary_label, secondary_label, ngrams): """Generates aligned sequences between each witness labelled `primary_label` and each witness labelled `secondary_label`, based around `ngrams`. :param primary_label: label for one side of the pairs of witnesses to align :type primary_label: `str` :param secondary_label: label for the other side of the pairs of witnesses to align :type secondary_label: `str` :param ngrams: n-grams to base sequences off :type ngrams: `list` of `str` """ cols = [constants.WORK_FIELDNAME, constants.SIGLUM_FIELDNAME] primary_works = self._matches[self._matches[constants.LABEL_FIELDNAME] == primary_label][cols].drop_duplicates() secondary_works = self._matches[self._matches[constants.LABEL_FIELDNAME] == secondary_label][cols].drop_duplicates() for (index, (work1, siglum1)) in primary_works.iterrows(): text1 = self._get_text(self._corpus.get_witness(work1, siglum1)) label1 = '{}_{}'.format(work1, siglum1) for (index, (work2, siglum2)) in secondary_works.iterrows(): text2 = self._get_text(self._corpus.get_witness(work2, siglum2)) label2 = '{}_{}'.format(work2, siglum2) self._generate_sequences_for_texts(label1, text1, label2, text2, ngrams) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
def debconf_set_selections(package, selections): """Given package and map config:(type,value), set selections""" text = '\n'.join(' '.join([package, k, t, v]) for k, (t, v) in selections.iteritems()) sudo('debconf-set-selections <<-HEREDOC\n{}\nHEREDOC'.format(text))
def function[debconf_set_selections, parameter[package, selections]]: constant[Given package and map config:(type,value), set selections] variable[text] assign[=] call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da1b09d3d60>]] call[name[sudo], parameter[call[constant[debconf-set-selections <<-HEREDOC {} HEREDOC].format, parameter[name[text]]]]]
keyword[def] identifier[debconf_set_selections] ( identifier[package] , identifier[selections] ): literal[string] identifier[text] = literal[string] . identifier[join] ( literal[string] . identifier[join] ([ identifier[package] , identifier[k] , identifier[t] , identifier[v] ]) keyword[for] identifier[k] ,( identifier[t] , identifier[v] ) keyword[in] identifier[selections] . identifier[iteritems] ()) identifier[sudo] ( literal[string] . identifier[format] ( identifier[text] ))
def debconf_set_selections(package, selections): """Given package and map config:(type,value), set selections""" text = '\n'.join((' '.join([package, k, t, v]) for (k, (t, v)) in selections.iteritems())) sudo('debconf-set-selections <<-HEREDOC\n{}\nHEREDOC'.format(text))
def addgroupmember(self, group_id, user_id, access_level): """ Adds a project member to a project :param user_id: user id :param access_level: access level, see gitlab help to know more :return: True if success """ if not isinstance(access_level, int): if access_level.lower() == 'owner': access_level = 50 elif access_level.lower() == 'master': access_level = 40 elif access_level.lower() == 'developer': access_level = 30 elif access_level.lower() == 'reporter': access_level = 20 elif access_level.lower() == 'guest': access_level = 10 else: return False data = {'id': group_id, 'user_id': user_id, 'access_level': access_level} request = requests.post( '{0}/{1}/members'.format(self.groups_url, group_id), headers=self.headers, data=data, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) return request.status_code == 201
def function[addgroupmember, parameter[self, group_id, user_id, access_level]]: constant[ Adds a project member to a project :param user_id: user id :param access_level: access level, see gitlab help to know more :return: True if success ] if <ast.UnaryOp object at 0x7da2041dadd0> begin[:] if compare[call[name[access_level].lower, parameter[]] equal[==] constant[owner]] begin[:] variable[access_level] assign[=] constant[50] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b28decb0>, <ast.Constant object at 0x7da1b28de410>, <ast.Constant object at 0x7da1b28de650>], [<ast.Name object at 0x7da1b28de6e0>, <ast.Name object at 0x7da1b28dce80>, <ast.Name object at 0x7da1b28dc5b0>]] variable[request] assign[=] call[name[requests].post, parameter[call[constant[{0}/{1}/members].format, parameter[name[self].groups_url, name[group_id]]]]] return[compare[name[request].status_code equal[==] constant[201]]]
keyword[def] identifier[addgroupmember] ( identifier[self] , identifier[group_id] , identifier[user_id] , identifier[access_level] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[access_level] , identifier[int] ): keyword[if] identifier[access_level] . identifier[lower] ()== literal[string] : identifier[access_level] = literal[int] keyword[elif] identifier[access_level] . identifier[lower] ()== literal[string] : identifier[access_level] = literal[int] keyword[elif] identifier[access_level] . identifier[lower] ()== literal[string] : identifier[access_level] = literal[int] keyword[elif] identifier[access_level] . identifier[lower] ()== literal[string] : identifier[access_level] = literal[int] keyword[elif] identifier[access_level] . identifier[lower] ()== literal[string] : identifier[access_level] = literal[int] keyword[else] : keyword[return] keyword[False] identifier[data] ={ literal[string] : identifier[group_id] , literal[string] : identifier[user_id] , literal[string] : identifier[access_level] } identifier[request] = identifier[requests] . identifier[post] ( literal[string] . identifier[format] ( identifier[self] . identifier[groups_url] , identifier[group_id] ), identifier[headers] = identifier[self] . identifier[headers] , identifier[data] = identifier[data] , identifier[verify] = identifier[self] . identifier[verify_ssl] , identifier[auth] = identifier[self] . identifier[auth] , identifier[timeout] = identifier[self] . identifier[timeout] ) keyword[return] identifier[request] . identifier[status_code] == literal[int]
def addgroupmember(self, group_id, user_id, access_level): """ Adds a project member to a project :param user_id: user id :param access_level: access level, see gitlab help to know more :return: True if success """ if not isinstance(access_level, int): if access_level.lower() == 'owner': access_level = 50 # depends on [control=['if'], data=[]] elif access_level.lower() == 'master': access_level = 40 # depends on [control=['if'], data=[]] elif access_level.lower() == 'developer': access_level = 30 # depends on [control=['if'], data=[]] elif access_level.lower() == 'reporter': access_level = 20 # depends on [control=['if'], data=[]] elif access_level.lower() == 'guest': access_level = 10 # depends on [control=['if'], data=[]] else: return False # depends on [control=['if'], data=[]] data = {'id': group_id, 'user_id': user_id, 'access_level': access_level} request = requests.post('{0}/{1}/members'.format(self.groups_url, group_id), headers=self.headers, data=data, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) return request.status_code == 201
def model(self, model, create=True): '''Returns the :class:`SessionModel` for ``model`` which can be :class:`Model`, or a :class:`MetaClass`, or an instance of :class:`Model`.''' manager = self.manager(model) sm = self._models.get(manager) if sm is None and create: sm = SessionModel(manager) self._models[manager] = sm return sm
def function[model, parameter[self, model, create]]: constant[Returns the :class:`SessionModel` for ``model`` which can be :class:`Model`, or a :class:`MetaClass`, or an instance of :class:`Model`.] variable[manager] assign[=] call[name[self].manager, parameter[name[model]]] variable[sm] assign[=] call[name[self]._models.get, parameter[name[manager]]] if <ast.BoolOp object at 0x7da1b0e5b880> begin[:] variable[sm] assign[=] call[name[SessionModel], parameter[name[manager]]] call[name[self]._models][name[manager]] assign[=] name[sm] return[name[sm]]
keyword[def] identifier[model] ( identifier[self] , identifier[model] , identifier[create] = keyword[True] ): literal[string] identifier[manager] = identifier[self] . identifier[manager] ( identifier[model] ) identifier[sm] = identifier[self] . identifier[_models] . identifier[get] ( identifier[manager] ) keyword[if] identifier[sm] keyword[is] keyword[None] keyword[and] identifier[create] : identifier[sm] = identifier[SessionModel] ( identifier[manager] ) identifier[self] . identifier[_models] [ identifier[manager] ]= identifier[sm] keyword[return] identifier[sm]
def model(self, model, create=True): """Returns the :class:`SessionModel` for ``model`` which can be :class:`Model`, or a :class:`MetaClass`, or an instance of :class:`Model`.""" manager = self.manager(model) sm = self._models.get(manager) if sm is None and create: sm = SessionModel(manager) self._models[manager] = sm # depends on [control=['if'], data=[]] return sm
def iter_scripts(scratch): """A generator for all scripts contained in a scratch file. yields stage scripts first, then scripts for each sprite """ for script in scratch.stage.scripts: if not isinstance(script, kurt.Comment): yield script for sprite in scratch.sprites: for script in sprite.scripts: if not isinstance(script, kurt.Comment): yield script
def function[iter_scripts, parameter[scratch]]: constant[A generator for all scripts contained in a scratch file. yields stage scripts first, then scripts for each sprite ] for taget[name[script]] in starred[name[scratch].stage.scripts] begin[:] if <ast.UnaryOp object at 0x7da2041d8dc0> begin[:] <ast.Yield object at 0x7da2041d9570> for taget[name[sprite]] in starred[name[scratch].sprites] begin[:] for taget[name[script]] in starred[name[sprite].scripts] begin[:] if <ast.UnaryOp object at 0x7da18f58f6d0> begin[:] <ast.Yield object at 0x7da18f58ca00>
keyword[def] identifier[iter_scripts] ( identifier[scratch] ): literal[string] keyword[for] identifier[script] keyword[in] identifier[scratch] . identifier[stage] . identifier[scripts] : keyword[if] keyword[not] identifier[isinstance] ( identifier[script] , identifier[kurt] . identifier[Comment] ): keyword[yield] identifier[script] keyword[for] identifier[sprite] keyword[in] identifier[scratch] . identifier[sprites] : keyword[for] identifier[script] keyword[in] identifier[sprite] . identifier[scripts] : keyword[if] keyword[not] identifier[isinstance] ( identifier[script] , identifier[kurt] . identifier[Comment] ): keyword[yield] identifier[script]
def iter_scripts(scratch): """A generator for all scripts contained in a scratch file. yields stage scripts first, then scripts for each sprite """ for script in scratch.stage.scripts: if not isinstance(script, kurt.Comment): yield script # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['script']] for sprite in scratch.sprites: for script in sprite.scripts: if not isinstance(script, kurt.Comment): yield script # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['script']] # depends on [control=['for'], data=['sprite']]
def parse_bookmark_data (data): """Parse data string. Return iterator for bookmarks of the form (url, name). Bookmarks are not sorted. """ for url, name in parse_bookmark_json(json.loads(data)): yield url, name
def function[parse_bookmark_data, parameter[data]]: constant[Parse data string. Return iterator for bookmarks of the form (url, name). Bookmarks are not sorted. ] for taget[tuple[[<ast.Name object at 0x7da18fe930d0>, <ast.Name object at 0x7da18fe90be0>]]] in starred[call[name[parse_bookmark_json], parameter[call[name[json].loads, parameter[name[data]]]]]] begin[:] <ast.Yield object at 0x7da18fe90910>
keyword[def] identifier[parse_bookmark_data] ( identifier[data] ): literal[string] keyword[for] identifier[url] , identifier[name] keyword[in] identifier[parse_bookmark_json] ( identifier[json] . identifier[loads] ( identifier[data] )): keyword[yield] identifier[url] , identifier[name]
def parse_bookmark_data(data): """Parse data string. Return iterator for bookmarks of the form (url, name). Bookmarks are not sorted. """ for (url, name) in parse_bookmark_json(json.loads(data)): yield (url, name) # depends on [control=['for'], data=[]]
def learn( network, env, seed=None, nsteps=5, total_timesteps=int(80e6), vf_coef=0.5, ent_coef=0.01, max_grad_norm=0.5, lr=7e-4, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, load_path=None, **network_kwargs): ''' Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm. Parameters: ----------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See baselines.common/policies.py/lstm for more details on using recurrent nets in policies env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py) seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible) nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int, total number of timesteps to train on (default: 80M) vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5) ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01) max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5) lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4) lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and returns fraction of the learning rate (specified as lr) as output epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5) alpha: float, RMSProp decay parameter (default: 0.99) gamma: float, reward discounting parameter (default: 0.99) log_interval: int, specifies how frequently the logs are printed out (default: 100) **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' set_global_seeds(seed) # Get the nb of env nenvs = env.num_envs policy = build_policy(env, network, **network_kwargs) # Instantiate the model object (that creates step_model and train_model) model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule) if load_path is not None: model.load(load_path) # Instantiate the runner object runner = Runner(env, model, nsteps=nsteps, gamma=gamma) epinfobuf = deque(maxlen=100) # Calculate the batch_size nbatch = nenvs*nsteps # Start total timer tstart = time.time() for update in range(1, total_timesteps//nbatch+1): # Get mini batch of experiences obs, states, rewards, masks, actions, values, epinfos = runner.run() epinfobuf.extend(epinfos) policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values) nseconds = time.time()-tstart # Calculate the fps (frame per second) fps = int((update*nbatch)/nseconds) if update % log_interval == 0 or update == 1: # Calculates if value function is a good predicator of the returns (ev > 1) # or if it's just worse than predicting nothing (ev =< 0) ev = explained_variance(values, rewards) logger.record_tabular("nupdates", update) logger.record_tabular("total_timesteps", update*nbatch) logger.record_tabular("fps", fps) logger.record_tabular("policy_entropy", float(policy_entropy)) logger.record_tabular("value_loss", float(value_loss)) logger.record_tabular("explained_variance", float(ev)) logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf])) logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf])) logger.dump_tabular() return model
def function[learn, parameter[network, env, seed, nsteps, total_timesteps, vf_coef, ent_coef, max_grad_norm, lr, lrschedule, epsilon, alpha, gamma, log_interval, load_path]]: constant[ Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm. Parameters: ----------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See baselines.common/policies.py/lstm for more details on using recurrent nets in policies env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py) seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible) nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int, total number of timesteps to train on (default: 80M) vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5) ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01) max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5) lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4) lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and returns fraction of the learning rate (specified as lr) as output epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5) alpha: float, RMSProp decay parameter (default: 0.99) gamma: float, reward discounting parameter (default: 0.99) log_interval: int, specifies how frequently the logs are printed out (default: 100) **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ] call[name[set_global_seeds], parameter[name[seed]]] variable[nenvs] assign[=] name[env].num_envs variable[policy] assign[=] call[name[build_policy], parameter[name[env], name[network]]] variable[model] assign[=] call[name[Model], parameter[]] if compare[name[load_path] is_not constant[None]] begin[:] call[name[model].load, parameter[name[load_path]]] variable[runner] assign[=] call[name[Runner], parameter[name[env], name[model]]] variable[epinfobuf] assign[=] call[name[deque], parameter[]] variable[nbatch] assign[=] binary_operation[name[nenvs] * name[nsteps]] variable[tstart] assign[=] call[name[time].time, parameter[]] for taget[name[update]] in starred[call[name[range], parameter[constant[1], binary_operation[binary_operation[name[total_timesteps] <ast.FloorDiv object at 0x7da2590d6bc0> name[nbatch]] + constant[1]]]]] begin[:] <ast.Tuple object at 0x7da18c4ce260> assign[=] call[name[runner].run, parameter[]] call[name[epinfobuf].extend, parameter[name[epinfos]]] <ast.Tuple object at 0x7da18c4cf3d0> assign[=] call[name[model].train, parameter[name[obs], name[states], name[rewards], name[masks], name[actions], name[values]]] variable[nseconds] assign[=] binary_operation[call[name[time].time, parameter[]] - name[tstart]] variable[fps] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[update] * name[nbatch]] / name[nseconds]]]] if <ast.BoolOp object at 0x7da18c4cc100> begin[:] variable[ev] assign[=] call[name[explained_variance], parameter[name[values], name[rewards]]] call[name[logger].record_tabular, parameter[constant[nupdates], name[update]]] call[name[logger].record_tabular, parameter[constant[total_timesteps], binary_operation[name[update] * name[nbatch]]]] call[name[logger].record_tabular, parameter[constant[fps], name[fps]]] call[name[logger].record_tabular, parameter[constant[policy_entropy], call[name[float], parameter[name[policy_entropy]]]]] call[name[logger].record_tabular, parameter[constant[value_loss], call[name[float], parameter[name[value_loss]]]]] call[name[logger].record_tabular, parameter[constant[explained_variance], call[name[float], parameter[name[ev]]]]] call[name[logger].record_tabular, parameter[constant[eprewmean], call[name[safemean], parameter[<ast.ListComp object at 0x7da18c4cf130>]]]] call[name[logger].record_tabular, parameter[constant[eplenmean], call[name[safemean], parameter[<ast.ListComp object at 0x7da20c7cac50>]]]] call[name[logger].dump_tabular, parameter[]] return[name[model]]
keyword[def] identifier[learn] ( identifier[network] , identifier[env] , identifier[seed] = keyword[None] , identifier[nsteps] = literal[int] , identifier[total_timesteps] = identifier[int] ( literal[int] ), identifier[vf_coef] = literal[int] , identifier[ent_coef] = literal[int] , identifier[max_grad_norm] = literal[int] , identifier[lr] = literal[int] , identifier[lrschedule] = literal[string] , identifier[epsilon] = literal[int] , identifier[alpha] = literal[int] , identifier[gamma] = literal[int] , identifier[log_interval] = literal[int] , identifier[load_path] = keyword[None] , ** identifier[network_kwargs] ): literal[string] identifier[set_global_seeds] ( identifier[seed] ) identifier[nenvs] = identifier[env] . identifier[num_envs] identifier[policy] = identifier[build_policy] ( identifier[env] , identifier[network] ,** identifier[network_kwargs] ) identifier[model] = identifier[Model] ( identifier[policy] = identifier[policy] , identifier[env] = identifier[env] , identifier[nsteps] = identifier[nsteps] , identifier[ent_coef] = identifier[ent_coef] , identifier[vf_coef] = identifier[vf_coef] , identifier[max_grad_norm] = identifier[max_grad_norm] , identifier[lr] = identifier[lr] , identifier[alpha] = identifier[alpha] , identifier[epsilon] = identifier[epsilon] , identifier[total_timesteps] = identifier[total_timesteps] , identifier[lrschedule] = identifier[lrschedule] ) keyword[if] identifier[load_path] keyword[is] keyword[not] keyword[None] : identifier[model] . identifier[load] ( identifier[load_path] ) identifier[runner] = identifier[Runner] ( identifier[env] , identifier[model] , identifier[nsteps] = identifier[nsteps] , identifier[gamma] = identifier[gamma] ) identifier[epinfobuf] = identifier[deque] ( identifier[maxlen] = literal[int] ) identifier[nbatch] = identifier[nenvs] * identifier[nsteps] identifier[tstart] = identifier[time] . identifier[time] () keyword[for] identifier[update] keyword[in] identifier[range] ( literal[int] , identifier[total_timesteps] // identifier[nbatch] + literal[int] ): identifier[obs] , identifier[states] , identifier[rewards] , identifier[masks] , identifier[actions] , identifier[values] , identifier[epinfos] = identifier[runner] . identifier[run] () identifier[epinfobuf] . identifier[extend] ( identifier[epinfos] ) identifier[policy_loss] , identifier[value_loss] , identifier[policy_entropy] = identifier[model] . identifier[train] ( identifier[obs] , identifier[states] , identifier[rewards] , identifier[masks] , identifier[actions] , identifier[values] ) identifier[nseconds] = identifier[time] . identifier[time] ()- identifier[tstart] identifier[fps] = identifier[int] (( identifier[update] * identifier[nbatch] )/ identifier[nseconds] ) keyword[if] identifier[update] % identifier[log_interval] == literal[int] keyword[or] identifier[update] == literal[int] : identifier[ev] = identifier[explained_variance] ( identifier[values] , identifier[rewards] ) identifier[logger] . identifier[record_tabular] ( literal[string] , identifier[update] ) identifier[logger] . identifier[record_tabular] ( literal[string] , identifier[update] * identifier[nbatch] ) identifier[logger] . identifier[record_tabular] ( literal[string] , identifier[fps] ) identifier[logger] . identifier[record_tabular] ( literal[string] , identifier[float] ( identifier[policy_entropy] )) identifier[logger] . identifier[record_tabular] ( literal[string] , identifier[float] ( identifier[value_loss] )) identifier[logger] . identifier[record_tabular] ( literal[string] , identifier[float] ( identifier[ev] )) identifier[logger] . identifier[record_tabular] ( literal[string] , identifier[safemean] ([ identifier[epinfo] [ literal[string] ] keyword[for] identifier[epinfo] keyword[in] identifier[epinfobuf] ])) identifier[logger] . identifier[record_tabular] ( literal[string] , identifier[safemean] ([ identifier[epinfo] [ literal[string] ] keyword[for] identifier[epinfo] keyword[in] identifier[epinfobuf] ])) identifier[logger] . identifier[dump_tabular] () keyword[return] identifier[model]
def learn(network, env, seed=None, nsteps=5, total_timesteps=int(80000000.0), vf_coef=0.5, ent_coef=0.01, max_grad_norm=0.5, lr=0.0007, lrschedule='linear', epsilon=1e-05, alpha=0.99, gamma=0.99, log_interval=100, load_path=None, **network_kwargs): """ Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm. Parameters: ----------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See baselines.common/policies.py/lstm for more details on using recurrent nets in policies env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py) seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible) nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int, total number of timesteps to train on (default: 80M) vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5) ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01) max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5) lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4) lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and returns fraction of the learning rate (specified as lr) as output epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5) alpha: float, RMSProp decay parameter (default: 0.99) gamma: float, reward discounting parameter (default: 0.99) log_interval: int, specifies how frequently the logs are printed out (default: 100) **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. """ set_global_seeds(seed) # Get the nb of env nenvs = env.num_envs policy = build_policy(env, network, **network_kwargs) # Instantiate the model object (that creates step_model and train_model) model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule) if load_path is not None: model.load(load_path) # depends on [control=['if'], data=['load_path']] # Instantiate the runner object runner = Runner(env, model, nsteps=nsteps, gamma=gamma) epinfobuf = deque(maxlen=100) # Calculate the batch_size nbatch = nenvs * nsteps # Start total timer tstart = time.time() for update in range(1, total_timesteps // nbatch + 1): # Get mini batch of experiences (obs, states, rewards, masks, actions, values, epinfos) = runner.run() epinfobuf.extend(epinfos) (policy_loss, value_loss, policy_entropy) = model.train(obs, states, rewards, masks, actions, values) nseconds = time.time() - tstart # Calculate the fps (frame per second) fps = int(update * nbatch / nseconds) if update % log_interval == 0 or update == 1: # Calculates if value function is a good predicator of the returns (ev > 1) # or if it's just worse than predicting nothing (ev =< 0) ev = explained_variance(values, rewards) logger.record_tabular('nupdates', update) logger.record_tabular('total_timesteps', update * nbatch) logger.record_tabular('fps', fps) logger.record_tabular('policy_entropy', float(policy_entropy)) logger.record_tabular('value_loss', float(value_loss)) logger.record_tabular('explained_variance', float(ev)) logger.record_tabular('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf])) logger.record_tabular('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf])) logger.dump_tabular() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['update']] return model
def participants(self, **kwargs): """List the participants. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: The list of participants """ path = '%s/%s/participants' % (self.manager.path, self.get_id()) return self.manager.gitlab.http_get(path, **kwargs)
def function[participants, parameter[self]]: constant[List the participants. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: The list of participants ] variable[path] assign[=] binary_operation[constant[%s/%s/participants] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da2043471c0>, <ast.Call object at 0x7da204344220>]]] return[call[name[self].manager.gitlab.http_get, parameter[name[path]]]]
keyword[def] identifier[participants] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[path] = literal[string] %( identifier[self] . identifier[manager] . identifier[path] , identifier[self] . identifier[get_id] ()) keyword[return] identifier[self] . identifier[manager] . identifier[gitlab] . identifier[http_get] ( identifier[path] ,** identifier[kwargs] )
def participants(self, **kwargs): """List the participants. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: The list of participants """ path = '%s/%s/participants' % (self.manager.path, self.get_id()) return self.manager.gitlab.http_get(path, **kwargs)
def parse_pisa_multimers_xml(pisa_multimers_xml, download_structures=False, outdir=None, force_rerun=False): """Retrieve PISA information from an XML results file See: http://www.ebi.ac.uk/pdbe/pisa/pi_download.html for more info XML description of macromolecular assemblies: http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?pdbcodelist where "pdbcodelist" is a comma-separated (strictly no spaces) list of PDB codes. The resulting file contain XML output of assembly data, equivalent to that displayed in PISA assembly pages, for each of the specified PDB entries. NOTE: If a mass-download is intended, please minimize the number of retrievals by specifying as many PDB codes in the URL as feasible (20-50 is a good range), and never send another URL request until the previous one has been completed (meaning that the multimers.pisa file has been downloaded). Excessive requests will silently die in the server queue. Args: pisa_multimers_xml (str): Path to PISA XML output file download_structures (bool): If assembly files should be downloaded outdir (str): Directory to output assembly files force_rerun (bool): Redownload files if they already exist Returns: dict: of parsed PISA information """ if not outdir: outdir = os.getcwd() parser = etree.XMLParser(ns_clean=True) tree = etree.parse(pisa_multimers_xml, parser) root = tree.getroot() pisa = defaultdict(dict) for pdb in root.findall('pdb_entry'): # Get the PDB ID pdb_id = pdb.find('pdb_code').text # Check the assembly status status = pdb.find('status').text errors = ['Entry not found', 'Overlapping structures', 'No symmetry operations'] if status in errors: pisa[pdb_id]['status'] = status continue # Check monomer status num_complexes = int(pdb.find('total_asm').text) if num_complexes == 0: pisa[pdb_id]['status'] = 'MONOMER' continue elif num_complexes > 0: # All "assembly sets" (see PISA sets for more info) sets = pdb.findall('asm_set') for s in sets: set_id = int(s.find('ser_no').text) # All assemblies assemblies = s.findall('assembly') for cplx in assemblies: ############################################################################################ # This part tells you the actual composition of the predicted complex (chains and ligands) parts = cplx.findall('molecule') chains = defaultdict(int) for part in parts: part_id = part.find('chain_id').text if part_id.startswith('['): part_id = 'LIG_' + part_id.split(']')[0].strip('[') chains[str(part_id)] += 1 ligands = {} for key in deepcopy(chains).keys(): if key.startswith('LIG_'): ligands[str(key.split('_')[1])] = chains.pop(key) ############################################################################################ adder = {} cplx_id = int(cplx.find('id').text) cplx_composition = str(cplx.find('composition').text) d_g_diss = float(cplx.find('diss_energy').text) d_g_int = float(cplx.find('int_energy').text) pdb_biomol = int(cplx.find('R350').text) if d_g_diss >= 0: stable = True else: stable = False adder['cplx_composition'] = cplx_composition.strip() adder['cplx_chains'] = chains adder['cplx_ligands'] = ligands adder['stable'] = stable adder['d_g_diss'] = d_g_diss adder['d_g_int'] = d_g_int adder['pdb_biomol'] = pdb_biomol pisa[pdb_id][(set_id, cplx_id)] = adder if download_structures: ident = '{}:{},{}'.format(pdb_id, set_id, cplx_id) filename = op.join(outdir, ident + '.pdb') if ssbio.utils.force_rerun(flag=force_rerun, outfile=filename): download_structure_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimer.pdb?{}'.format( ident) r = requests.get(download_structure_link) with open(filename, 'w') as f: f.write(r.text) log.debug('{}: downloaded structure file'.format(ident)) else: log.debug('{}: structure file already downloaded'.format(ident)) pisa[pdb_id][(set_id, cplx_id)]['structure_file'] = filename return pisa
def function[parse_pisa_multimers_xml, parameter[pisa_multimers_xml, download_structures, outdir, force_rerun]]: constant[Retrieve PISA information from an XML results file See: http://www.ebi.ac.uk/pdbe/pisa/pi_download.html for more info XML description of macromolecular assemblies: http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?pdbcodelist where "pdbcodelist" is a comma-separated (strictly no spaces) list of PDB codes. The resulting file contain XML output of assembly data, equivalent to that displayed in PISA assembly pages, for each of the specified PDB entries. NOTE: If a mass-download is intended, please minimize the number of retrievals by specifying as many PDB codes in the URL as feasible (20-50 is a good range), and never send another URL request until the previous one has been completed (meaning that the multimers.pisa file has been downloaded). Excessive requests will silently die in the server queue. Args: pisa_multimers_xml (str): Path to PISA XML output file download_structures (bool): If assembly files should be downloaded outdir (str): Directory to output assembly files force_rerun (bool): Redownload files if they already exist Returns: dict: of parsed PISA information ] if <ast.UnaryOp object at 0x7da1b0c676a0> begin[:] variable[outdir] assign[=] call[name[os].getcwd, parameter[]] variable[parser] assign[=] call[name[etree].XMLParser, parameter[]] variable[tree] assign[=] call[name[etree].parse, parameter[name[pisa_multimers_xml], name[parser]]] variable[root] assign[=] call[name[tree].getroot, parameter[]] variable[pisa] assign[=] call[name[defaultdict], parameter[name[dict]]] for taget[name[pdb]] in starred[call[name[root].findall, parameter[constant[pdb_entry]]]] begin[:] variable[pdb_id] assign[=] call[name[pdb].find, parameter[constant[pdb_code]]].text variable[status] assign[=] call[name[pdb].find, parameter[constant[status]]].text variable[errors] assign[=] list[[<ast.Constant object at 0x7da1b0c66c50>, <ast.Constant object at 0x7da1b0c66c20>, <ast.Constant object at 0x7da1b0c66bf0>]] if compare[name[status] in name[errors]] begin[:] call[call[name[pisa]][name[pdb_id]]][constant[status]] assign[=] name[status] continue variable[num_complexes] assign[=] call[name[int], parameter[call[name[pdb].find, parameter[constant[total_asm]]].text]] if compare[name[num_complexes] equal[==] constant[0]] begin[:] call[call[name[pisa]][name[pdb_id]]][constant[status]] assign[=] constant[MONOMER] continue return[name[pisa]]
keyword[def] identifier[parse_pisa_multimers_xml] ( identifier[pisa_multimers_xml] , identifier[download_structures] = keyword[False] , identifier[outdir] = keyword[None] , identifier[force_rerun] = keyword[False] ): literal[string] keyword[if] keyword[not] identifier[outdir] : identifier[outdir] = identifier[os] . identifier[getcwd] () identifier[parser] = identifier[etree] . identifier[XMLParser] ( identifier[ns_clean] = keyword[True] ) identifier[tree] = identifier[etree] . identifier[parse] ( identifier[pisa_multimers_xml] , identifier[parser] ) identifier[root] = identifier[tree] . identifier[getroot] () identifier[pisa] = identifier[defaultdict] ( identifier[dict] ) keyword[for] identifier[pdb] keyword[in] identifier[root] . identifier[findall] ( literal[string] ): identifier[pdb_id] = identifier[pdb] . identifier[find] ( literal[string] ). identifier[text] identifier[status] = identifier[pdb] . identifier[find] ( literal[string] ). identifier[text] identifier[errors] =[ literal[string] , literal[string] , literal[string] ] keyword[if] identifier[status] keyword[in] identifier[errors] : identifier[pisa] [ identifier[pdb_id] ][ literal[string] ]= identifier[status] keyword[continue] identifier[num_complexes] = identifier[int] ( identifier[pdb] . identifier[find] ( literal[string] ). identifier[text] ) keyword[if] identifier[num_complexes] == literal[int] : identifier[pisa] [ identifier[pdb_id] ][ literal[string] ]= literal[string] keyword[continue] keyword[elif] identifier[num_complexes] > literal[int] : identifier[sets] = identifier[pdb] . identifier[findall] ( literal[string] ) keyword[for] identifier[s] keyword[in] identifier[sets] : identifier[set_id] = identifier[int] ( identifier[s] . identifier[find] ( literal[string] ). identifier[text] ) identifier[assemblies] = identifier[s] . identifier[findall] ( literal[string] ) keyword[for] identifier[cplx] keyword[in] identifier[assemblies] : identifier[parts] = identifier[cplx] . identifier[findall] ( literal[string] ) identifier[chains] = identifier[defaultdict] ( identifier[int] ) keyword[for] identifier[part] keyword[in] identifier[parts] : identifier[part_id] = identifier[part] . identifier[find] ( literal[string] ). identifier[text] keyword[if] identifier[part_id] . identifier[startswith] ( literal[string] ): identifier[part_id] = literal[string] + identifier[part_id] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ( literal[string] ) identifier[chains] [ identifier[str] ( identifier[part_id] )]+= literal[int] identifier[ligands] ={} keyword[for] identifier[key] keyword[in] identifier[deepcopy] ( identifier[chains] ). identifier[keys] (): keyword[if] identifier[key] . identifier[startswith] ( literal[string] ): identifier[ligands] [ identifier[str] ( identifier[key] . identifier[split] ( literal[string] )[ literal[int] ])]= identifier[chains] . identifier[pop] ( identifier[key] ) identifier[adder] ={} identifier[cplx_id] = identifier[int] ( identifier[cplx] . identifier[find] ( literal[string] ). identifier[text] ) identifier[cplx_composition] = identifier[str] ( identifier[cplx] . identifier[find] ( literal[string] ). identifier[text] ) identifier[d_g_diss] = identifier[float] ( identifier[cplx] . identifier[find] ( literal[string] ). identifier[text] ) identifier[d_g_int] = identifier[float] ( identifier[cplx] . identifier[find] ( literal[string] ). identifier[text] ) identifier[pdb_biomol] = identifier[int] ( identifier[cplx] . identifier[find] ( literal[string] ). identifier[text] ) keyword[if] identifier[d_g_diss] >= literal[int] : identifier[stable] = keyword[True] keyword[else] : identifier[stable] = keyword[False] identifier[adder] [ literal[string] ]= identifier[cplx_composition] . identifier[strip] () identifier[adder] [ literal[string] ]= identifier[chains] identifier[adder] [ literal[string] ]= identifier[ligands] identifier[adder] [ literal[string] ]= identifier[stable] identifier[adder] [ literal[string] ]= identifier[d_g_diss] identifier[adder] [ literal[string] ]= identifier[d_g_int] identifier[adder] [ literal[string] ]= identifier[pdb_biomol] identifier[pisa] [ identifier[pdb_id] ][( identifier[set_id] , identifier[cplx_id] )]= identifier[adder] keyword[if] identifier[download_structures] : identifier[ident] = literal[string] . identifier[format] ( identifier[pdb_id] , identifier[set_id] , identifier[cplx_id] ) identifier[filename] = identifier[op] . identifier[join] ( identifier[outdir] , identifier[ident] + literal[string] ) keyword[if] identifier[ssbio] . identifier[utils] . identifier[force_rerun] ( identifier[flag] = identifier[force_rerun] , identifier[outfile] = identifier[filename] ): identifier[download_structure_link] = literal[string] . identifier[format] ( identifier[ident] ) identifier[r] = identifier[requests] . identifier[get] ( identifier[download_structure_link] ) keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[r] . identifier[text] ) identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[ident] )) keyword[else] : identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[ident] )) identifier[pisa] [ identifier[pdb_id] ][( identifier[set_id] , identifier[cplx_id] )][ literal[string] ]= identifier[filename] keyword[return] identifier[pisa]
def parse_pisa_multimers_xml(pisa_multimers_xml, download_structures=False, outdir=None, force_rerun=False): """Retrieve PISA information from an XML results file See: http://www.ebi.ac.uk/pdbe/pisa/pi_download.html for more info XML description of macromolecular assemblies: http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?pdbcodelist where "pdbcodelist" is a comma-separated (strictly no spaces) list of PDB codes. The resulting file contain XML output of assembly data, equivalent to that displayed in PISA assembly pages, for each of the specified PDB entries. NOTE: If a mass-download is intended, please minimize the number of retrievals by specifying as many PDB codes in the URL as feasible (20-50 is a good range), and never send another URL request until the previous one has been completed (meaning that the multimers.pisa file has been downloaded). Excessive requests will silently die in the server queue. Args: pisa_multimers_xml (str): Path to PISA XML output file download_structures (bool): If assembly files should be downloaded outdir (str): Directory to output assembly files force_rerun (bool): Redownload files if they already exist Returns: dict: of parsed PISA information """ if not outdir: outdir = os.getcwd() # depends on [control=['if'], data=[]] parser = etree.XMLParser(ns_clean=True) tree = etree.parse(pisa_multimers_xml, parser) root = tree.getroot() pisa = defaultdict(dict) for pdb in root.findall('pdb_entry'): # Get the PDB ID pdb_id = pdb.find('pdb_code').text # Check the assembly status status = pdb.find('status').text errors = ['Entry not found', 'Overlapping structures', 'No symmetry operations'] if status in errors: pisa[pdb_id]['status'] = status continue # depends on [control=['if'], data=['status']] # Check monomer status num_complexes = int(pdb.find('total_asm').text) if num_complexes == 0: pisa[pdb_id]['status'] = 'MONOMER' continue # depends on [control=['if'], data=[]] elif num_complexes > 0: # All "assembly sets" (see PISA sets for more info) sets = pdb.findall('asm_set') for s in sets: set_id = int(s.find('ser_no').text) # All assemblies assemblies = s.findall('assembly') for cplx in assemblies: ############################################################################################ # This part tells you the actual composition of the predicted complex (chains and ligands) parts = cplx.findall('molecule') chains = defaultdict(int) for part in parts: part_id = part.find('chain_id').text if part_id.startswith('['): part_id = 'LIG_' + part_id.split(']')[0].strip('[') # depends on [control=['if'], data=[]] chains[str(part_id)] += 1 # depends on [control=['for'], data=['part']] ligands = {} for key in deepcopy(chains).keys(): if key.startswith('LIG_'): ligands[str(key.split('_')[1])] = chains.pop(key) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] ############################################################################################ adder = {} cplx_id = int(cplx.find('id').text) cplx_composition = str(cplx.find('composition').text) d_g_diss = float(cplx.find('diss_energy').text) d_g_int = float(cplx.find('int_energy').text) pdb_biomol = int(cplx.find('R350').text) if d_g_diss >= 0: stable = True # depends on [control=['if'], data=[]] else: stable = False adder['cplx_composition'] = cplx_composition.strip() adder['cplx_chains'] = chains adder['cplx_ligands'] = ligands adder['stable'] = stable adder['d_g_diss'] = d_g_diss adder['d_g_int'] = d_g_int adder['pdb_biomol'] = pdb_biomol pisa[pdb_id][set_id, cplx_id] = adder if download_structures: ident = '{}:{},{}'.format(pdb_id, set_id, cplx_id) filename = op.join(outdir, ident + '.pdb') if ssbio.utils.force_rerun(flag=force_rerun, outfile=filename): download_structure_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimer.pdb?{}'.format(ident) r = requests.get(download_structure_link) with open(filename, 'w') as f: f.write(r.text) # depends on [control=['with'], data=['f']] log.debug('{}: downloaded structure file'.format(ident)) # depends on [control=['if'], data=[]] else: log.debug('{}: structure file already downloaded'.format(ident)) pisa[pdb_id][set_id, cplx_id]['structure_file'] = filename # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cplx']] # depends on [control=['for'], data=['s']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pdb']] return pisa
def parse(self, data, charset=None): """ Parse the data. It is usually a better idea to override ``_parse_data()`` than this method in derived classes. :param charset: the charset of the data. Uses datamapper's default (``self.charset``) if not given. :returns: """ charset = charset or self.charset return self._parse_data(data, charset)
def function[parse, parameter[self, data, charset]]: constant[ Parse the data. It is usually a better idea to override ``_parse_data()`` than this method in derived classes. :param charset: the charset of the data. Uses datamapper's default (``self.charset``) if not given. :returns: ] variable[charset] assign[=] <ast.BoolOp object at 0x7da18fe90670> return[call[name[self]._parse_data, parameter[name[data], name[charset]]]]
keyword[def] identifier[parse] ( identifier[self] , identifier[data] , identifier[charset] = keyword[None] ): literal[string] identifier[charset] = identifier[charset] keyword[or] identifier[self] . identifier[charset] keyword[return] identifier[self] . identifier[_parse_data] ( identifier[data] , identifier[charset] )
def parse(self, data, charset=None): """ Parse the data. It is usually a better idea to override ``_parse_data()`` than this method in derived classes. :param charset: the charset of the data. Uses datamapper's default (``self.charset``) if not given. :returns: """ charset = charset or self.charset return self._parse_data(data, charset)
def condition_from_text(text) -> Condition: """ Return a Condition instance with PEG grammar from text :param text: PEG parsable string :return: """ try: condition = pypeg2.parse(text, output.Condition) except SyntaxError: # Invalid conditions are possible, see https://github.com/duniter/duniter/issues/1156 # In such a case, they are store as empty PEG grammar object and considered unlockable condition = Condition(text) return condition
def function[condition_from_text, parameter[text]]: constant[ Return a Condition instance with PEG grammar from text :param text: PEG parsable string :return: ] <ast.Try object at 0x7da2044c3460> return[name[condition]]
keyword[def] identifier[condition_from_text] ( identifier[text] )-> identifier[Condition] : literal[string] keyword[try] : identifier[condition] = identifier[pypeg2] . identifier[parse] ( identifier[text] , identifier[output] . identifier[Condition] ) keyword[except] identifier[SyntaxError] : identifier[condition] = identifier[Condition] ( identifier[text] ) keyword[return] identifier[condition]
def condition_from_text(text) -> Condition: """ Return a Condition instance with PEG grammar from text :param text: PEG parsable string :return: """ try: condition = pypeg2.parse(text, output.Condition) # depends on [control=['try'], data=[]] except SyntaxError: # Invalid conditions are possible, see https://github.com/duniter/duniter/issues/1156 # In such a case, they are store as empty PEG grammar object and considered unlockable condition = Condition(text) # depends on [control=['except'], data=[]] return condition
def volume(self, lmax=None): """ If the function is the real shape of an object, calculate the volume of the body. Usage ----- volume = x.volume([lmax]) Returns ------- volume : float The volume of the object. Parameters ---------- lmax : int, optional, default = x.lmax The maximum spherical harmonic degree to use when calculating the volume. Description ----------- If the function is the real shape of an object, this method will calculate the volume of the body exactly by integration. This routine raises the function to the nth power, with n from 1 to 3, and calculates the spherical harmonic degree and order 0 term. To avoid aliases, the function is first expand on a grid that can resolve spherical harmonic degrees up to 3*lmax. """ if self.coeffs[0, 0, 0] == 0: raise ValueError('The volume of the object can not be calculated ' 'when the degree and order 0 term is equal to ' 'zero.') if self.kind == 'complex': raise ValueError('The volume of the object can not be calculated ' 'for complex functions.') if lmax is None: lmax = self.lmax r0 = self.coeffs[0, 0, 0] grid = self.expand(lmax=3*lmax) - r0 h200 = (grid**2).expand(lmax_calc=0).coeffs[0, 0, 0] h300 = (grid**3).expand(lmax_calc=0).coeffs[0, 0, 0] volume = 4 * _np.pi / 3 * (h300 + 3 * r0 * h200 + r0**3) return volume
def function[volume, parameter[self, lmax]]: constant[ If the function is the real shape of an object, calculate the volume of the body. Usage ----- volume = x.volume([lmax]) Returns ------- volume : float The volume of the object. Parameters ---------- lmax : int, optional, default = x.lmax The maximum spherical harmonic degree to use when calculating the volume. Description ----------- If the function is the real shape of an object, this method will calculate the volume of the body exactly by integration. This routine raises the function to the nth power, with n from 1 to 3, and calculates the spherical harmonic degree and order 0 term. To avoid aliases, the function is first expand on a grid that can resolve spherical harmonic degrees up to 3*lmax. ] if compare[call[name[self].coeffs][tuple[[<ast.Constant object at 0x7da18dc06980>, <ast.Constant object at 0x7da18dc07250>, <ast.Constant object at 0x7da18dc07e80>]]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da18dc059f0> if compare[name[self].kind equal[==] constant[complex]] begin[:] <ast.Raise object at 0x7da18dc05d80> if compare[name[lmax] is constant[None]] begin[:] variable[lmax] assign[=] name[self].lmax variable[r0] assign[=] call[name[self].coeffs][tuple[[<ast.Constant object at 0x7da18dc05120>, <ast.Constant object at 0x7da18dc05e40>, <ast.Constant object at 0x7da18dc04580>]]] variable[grid] assign[=] binary_operation[call[name[self].expand, parameter[]] - name[r0]] variable[h200] assign[=] call[call[binary_operation[name[grid] ** constant[2]].expand, parameter[]].coeffs][tuple[[<ast.Constant object at 0x7da18dc05150>, <ast.Constant object at 0x7da18dc05930>, <ast.Constant object at 0x7da18dc04d90>]]] variable[h300] assign[=] call[call[binary_operation[name[grid] ** constant[3]].expand, parameter[]].coeffs][tuple[[<ast.Constant object at 0x7da18dc05e10>, <ast.Constant object at 0x7da18dc067d0>, <ast.Constant object at 0x7da18dc07c40>]]] variable[volume] assign[=] binary_operation[binary_operation[binary_operation[constant[4] * name[_np].pi] / constant[3]] * binary_operation[binary_operation[name[h300] + binary_operation[binary_operation[constant[3] * name[r0]] * name[h200]]] + binary_operation[name[r0] ** constant[3]]]] return[name[volume]]
keyword[def] identifier[volume] ( identifier[self] , identifier[lmax] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[coeffs] [ literal[int] , literal[int] , literal[int] ]== literal[int] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] literal[string] ) keyword[if] identifier[self] . identifier[kind] == literal[string] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[if] identifier[lmax] keyword[is] keyword[None] : identifier[lmax] = identifier[self] . identifier[lmax] identifier[r0] = identifier[self] . identifier[coeffs] [ literal[int] , literal[int] , literal[int] ] identifier[grid] = identifier[self] . identifier[expand] ( identifier[lmax] = literal[int] * identifier[lmax] )- identifier[r0] identifier[h200] =( identifier[grid] ** literal[int] ). identifier[expand] ( identifier[lmax_calc] = literal[int] ). identifier[coeffs] [ literal[int] , literal[int] , literal[int] ] identifier[h300] =( identifier[grid] ** literal[int] ). identifier[expand] ( identifier[lmax_calc] = literal[int] ). identifier[coeffs] [ literal[int] , literal[int] , literal[int] ] identifier[volume] = literal[int] * identifier[_np] . identifier[pi] / literal[int] *( identifier[h300] + literal[int] * identifier[r0] * identifier[h200] + identifier[r0] ** literal[int] ) keyword[return] identifier[volume]
def volume(self, lmax=None): """ If the function is the real shape of an object, calculate the volume of the body. Usage ----- volume = x.volume([lmax]) Returns ------- volume : float The volume of the object. Parameters ---------- lmax : int, optional, default = x.lmax The maximum spherical harmonic degree to use when calculating the volume. Description ----------- If the function is the real shape of an object, this method will calculate the volume of the body exactly by integration. This routine raises the function to the nth power, with n from 1 to 3, and calculates the spherical harmonic degree and order 0 term. To avoid aliases, the function is first expand on a grid that can resolve spherical harmonic degrees up to 3*lmax. """ if self.coeffs[0, 0, 0] == 0: raise ValueError('The volume of the object can not be calculated when the degree and order 0 term is equal to zero.') # depends on [control=['if'], data=[]] if self.kind == 'complex': raise ValueError('The volume of the object can not be calculated for complex functions.') # depends on [control=['if'], data=[]] if lmax is None: lmax = self.lmax # depends on [control=['if'], data=['lmax']] r0 = self.coeffs[0, 0, 0] grid = self.expand(lmax=3 * lmax) - r0 h200 = (grid ** 2).expand(lmax_calc=0).coeffs[0, 0, 0] h300 = (grid ** 3).expand(lmax_calc=0).coeffs[0, 0, 0] volume = 4 * _np.pi / 3 * (h300 + 3 * r0 * h200 + r0 ** 3) return volume
def _append_hdu_info(self, ext): """ internal routine append info for indiciated extension """ # raised IOError if not found hdu_type = self._FITS.movabs_hdu(ext+1) if hdu_type == IMAGE_HDU: hdu = ImageHDU(self._FITS, ext, **self.keys) elif hdu_type == BINARY_TBL: hdu = TableHDU(self._FITS, ext, **self.keys) elif hdu_type == ASCII_TBL: hdu = AsciiTableHDU(self._FITS, ext, **self.keys) else: mess = ("extension %s is of unknown type %s " "this is probably a bug") mess = mess % (ext, hdu_type) raise IOError(mess) self.hdu_list.append(hdu) self.hdu_map[ext] = hdu extname = hdu.get_extname() if not self.case_sensitive: extname = extname.lower() if extname != '': # this will guarantee we default to *first* version, # if version is not requested, using __getitem__ if extname not in self.hdu_map: self.hdu_map[extname] = hdu ver = hdu.get_extver() if ver > 0: key = '%s-%s' % (extname, ver) self.hdu_map[key] = hdu
def function[_append_hdu_info, parameter[self, ext]]: constant[ internal routine append info for indiciated extension ] variable[hdu_type] assign[=] call[name[self]._FITS.movabs_hdu, parameter[binary_operation[name[ext] + constant[1]]]] if compare[name[hdu_type] equal[==] name[IMAGE_HDU]] begin[:] variable[hdu] assign[=] call[name[ImageHDU], parameter[name[self]._FITS, name[ext]]] call[name[self].hdu_list.append, parameter[name[hdu]]] call[name[self].hdu_map][name[ext]] assign[=] name[hdu] variable[extname] assign[=] call[name[hdu].get_extname, parameter[]] if <ast.UnaryOp object at 0x7da20c7949d0> begin[:] variable[extname] assign[=] call[name[extname].lower, parameter[]] if compare[name[extname] not_equal[!=] constant[]] begin[:] if compare[name[extname] <ast.NotIn object at 0x7da2590d7190> name[self].hdu_map] begin[:] call[name[self].hdu_map][name[extname]] assign[=] name[hdu] variable[ver] assign[=] call[name[hdu].get_extver, parameter[]] if compare[name[ver] greater[>] constant[0]] begin[:] variable[key] assign[=] binary_operation[constant[%s-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2043456f0>, <ast.Name object at 0x7da204345c90>]]] call[name[self].hdu_map][name[key]] assign[=] name[hdu]
keyword[def] identifier[_append_hdu_info] ( identifier[self] , identifier[ext] ): literal[string] identifier[hdu_type] = identifier[self] . identifier[_FITS] . identifier[movabs_hdu] ( identifier[ext] + literal[int] ) keyword[if] identifier[hdu_type] == identifier[IMAGE_HDU] : identifier[hdu] = identifier[ImageHDU] ( identifier[self] . identifier[_FITS] , identifier[ext] ,** identifier[self] . identifier[keys] ) keyword[elif] identifier[hdu_type] == identifier[BINARY_TBL] : identifier[hdu] = identifier[TableHDU] ( identifier[self] . identifier[_FITS] , identifier[ext] ,** identifier[self] . identifier[keys] ) keyword[elif] identifier[hdu_type] == identifier[ASCII_TBL] : identifier[hdu] = identifier[AsciiTableHDU] ( identifier[self] . identifier[_FITS] , identifier[ext] ,** identifier[self] . identifier[keys] ) keyword[else] : identifier[mess] =( literal[string] literal[string] ) identifier[mess] = identifier[mess] %( identifier[ext] , identifier[hdu_type] ) keyword[raise] identifier[IOError] ( identifier[mess] ) identifier[self] . identifier[hdu_list] . identifier[append] ( identifier[hdu] ) identifier[self] . identifier[hdu_map] [ identifier[ext] ]= identifier[hdu] identifier[extname] = identifier[hdu] . identifier[get_extname] () keyword[if] keyword[not] identifier[self] . identifier[case_sensitive] : identifier[extname] = identifier[extname] . identifier[lower] () keyword[if] identifier[extname] != literal[string] : keyword[if] identifier[extname] keyword[not] keyword[in] identifier[self] . identifier[hdu_map] : identifier[self] . identifier[hdu_map] [ identifier[extname] ]= identifier[hdu] identifier[ver] = identifier[hdu] . identifier[get_extver] () keyword[if] identifier[ver] > literal[int] : identifier[key] = literal[string] %( identifier[extname] , identifier[ver] ) identifier[self] . identifier[hdu_map] [ identifier[key] ]= identifier[hdu]
def _append_hdu_info(self, ext): """ internal routine append info for indiciated extension """ # raised IOError if not found hdu_type = self._FITS.movabs_hdu(ext + 1) if hdu_type == IMAGE_HDU: hdu = ImageHDU(self._FITS, ext, **self.keys) # depends on [control=['if'], data=[]] elif hdu_type == BINARY_TBL: hdu = TableHDU(self._FITS, ext, **self.keys) # depends on [control=['if'], data=[]] elif hdu_type == ASCII_TBL: hdu = AsciiTableHDU(self._FITS, ext, **self.keys) # depends on [control=['if'], data=[]] else: mess = 'extension %s is of unknown type %s this is probably a bug' mess = mess % (ext, hdu_type) raise IOError(mess) self.hdu_list.append(hdu) self.hdu_map[ext] = hdu extname = hdu.get_extname() if not self.case_sensitive: extname = extname.lower() # depends on [control=['if'], data=[]] if extname != '': # this will guarantee we default to *first* version, # if version is not requested, using __getitem__ if extname not in self.hdu_map: self.hdu_map[extname] = hdu # depends on [control=['if'], data=['extname']] ver = hdu.get_extver() if ver > 0: key = '%s-%s' % (extname, ver) self.hdu_map[key] = hdu # depends on [control=['if'], data=['ver']] # depends on [control=['if'], data=['extname']]
def str_to_etree(xml_str, encoding='utf-8'): """Deserialize API XML doc to an ElementTree. Args: xml_str: bytes DataONE API XML doc encoding: str Decoder to use when converting the XML doc ``bytes`` to a Unicode str. Returns: ElementTree: Matching the API version of the XML doc. """ parser = xml.etree.ElementTree.XMLParser(encoding=encoding) return xml.etree.ElementTree.fromstring(xml_str, parser=parser)
def function[str_to_etree, parameter[xml_str, encoding]]: constant[Deserialize API XML doc to an ElementTree. Args: xml_str: bytes DataONE API XML doc encoding: str Decoder to use when converting the XML doc ``bytes`` to a Unicode str. Returns: ElementTree: Matching the API version of the XML doc. ] variable[parser] assign[=] call[name[xml].etree.ElementTree.XMLParser, parameter[]] return[call[name[xml].etree.ElementTree.fromstring, parameter[name[xml_str]]]]
keyword[def] identifier[str_to_etree] ( identifier[xml_str] , identifier[encoding] = literal[string] ): literal[string] identifier[parser] = identifier[xml] . identifier[etree] . identifier[ElementTree] . identifier[XMLParser] ( identifier[encoding] = identifier[encoding] ) keyword[return] identifier[xml] . identifier[etree] . identifier[ElementTree] . identifier[fromstring] ( identifier[xml_str] , identifier[parser] = identifier[parser] )
def str_to_etree(xml_str, encoding='utf-8'): """Deserialize API XML doc to an ElementTree. Args: xml_str: bytes DataONE API XML doc encoding: str Decoder to use when converting the XML doc ``bytes`` to a Unicode str. Returns: ElementTree: Matching the API version of the XML doc. """ parser = xml.etree.ElementTree.XMLParser(encoding=encoding) return xml.etree.ElementTree.fromstring(xml_str, parser=parser)
def branch_rate(self, filename=None): """ Return the global branch rate of the coverage report. If the `filename` file is given, return the branch rate of the file. """ if filename is None: el = self.xml else: el = self._get_class_element_by_filename(filename) return float(el.attrib['branch-rate'])
def function[branch_rate, parameter[self, filename]]: constant[ Return the global branch rate of the coverage report. If the `filename` file is given, return the branch rate of the file. ] if compare[name[filename] is constant[None]] begin[:] variable[el] assign[=] name[self].xml return[call[name[float], parameter[call[name[el].attrib][constant[branch-rate]]]]]
keyword[def] identifier[branch_rate] ( identifier[self] , identifier[filename] = keyword[None] ): literal[string] keyword[if] identifier[filename] keyword[is] keyword[None] : identifier[el] = identifier[self] . identifier[xml] keyword[else] : identifier[el] = identifier[self] . identifier[_get_class_element_by_filename] ( identifier[filename] ) keyword[return] identifier[float] ( identifier[el] . identifier[attrib] [ literal[string] ])
def branch_rate(self, filename=None): """ Return the global branch rate of the coverage report. If the `filename` file is given, return the branch rate of the file. """ if filename is None: el = self.xml # depends on [control=['if'], data=[]] else: el = self._get_class_element_by_filename(filename) return float(el.attrib['branch-rate'])
def Call(func_name, args=None, prefix=None): """A function call""" node = Node(syms.power, [func_name, ArgList(args)]) if prefix is not None: node.prefix = prefix return node
def function[Call, parameter[func_name, args, prefix]]: constant[A function call] variable[node] assign[=] call[name[Node], parameter[name[syms].power, list[[<ast.Name object at 0x7da1b08c9a80>, <ast.Call object at 0x7da1b08ca410>]]]] if compare[name[prefix] is_not constant[None]] begin[:] name[node].prefix assign[=] name[prefix] return[name[node]]
keyword[def] identifier[Call] ( identifier[func_name] , identifier[args] = keyword[None] , identifier[prefix] = keyword[None] ): literal[string] identifier[node] = identifier[Node] ( identifier[syms] . identifier[power] ,[ identifier[func_name] , identifier[ArgList] ( identifier[args] )]) keyword[if] identifier[prefix] keyword[is] keyword[not] keyword[None] : identifier[node] . identifier[prefix] = identifier[prefix] keyword[return] identifier[node]
def Call(func_name, args=None, prefix=None): """A function call""" node = Node(syms.power, [func_name, ArgList(args)]) if prefix is not None: node.prefix = prefix # depends on [control=['if'], data=['prefix']] return node
def _get_par_summary(sim, n, probs): """Summarize chains merged and individually Parameters ---------- sim : dict from stanfit object n : int parameter index probs : iterable of int quantiles Returns ------- summary : dict Dictionary containing summaries """ # _get_samples gets chains for nth parameter ss = _get_samples(n, sim, inc_warmup=False) msdfun = lambda chain: (np.mean(chain), np.std(chain, ddof=1)) qfun = lambda chain: mquantiles(chain, probs) c_msd = np.array([msdfun(s) for s in ss]).flatten() c_quan = np.array([qfun(s) for s in ss]).flatten() ass = np.asarray(ss).flatten() msd = np.asarray(msdfun(ass)) quan = qfun(np.asarray(ass)) return dict(msd=msd, quan=quan, c_msd=c_msd, c_quan=c_quan)
def function[_get_par_summary, parameter[sim, n, probs]]: constant[Summarize chains merged and individually Parameters ---------- sim : dict from stanfit object n : int parameter index probs : iterable of int quantiles Returns ------- summary : dict Dictionary containing summaries ] variable[ss] assign[=] call[name[_get_samples], parameter[name[n], name[sim]]] variable[msdfun] assign[=] <ast.Lambda object at 0x7da1b2261f00> variable[qfun] assign[=] <ast.Lambda object at 0x7da1b22625c0> variable[c_msd] assign[=] call[call[name[np].array, parameter[<ast.ListComp object at 0x7da1b22622c0>]].flatten, parameter[]] variable[c_quan] assign[=] call[call[name[np].array, parameter[<ast.ListComp object at 0x7da1b22eb2b0>]].flatten, parameter[]] variable[ass] assign[=] call[call[name[np].asarray, parameter[name[ss]]].flatten, parameter[]] variable[msd] assign[=] call[name[np].asarray, parameter[call[name[msdfun], parameter[name[ass]]]]] variable[quan] assign[=] call[name[qfun], parameter[call[name[np].asarray, parameter[name[ass]]]]] return[call[name[dict], parameter[]]]
keyword[def] identifier[_get_par_summary] ( identifier[sim] , identifier[n] , identifier[probs] ): literal[string] identifier[ss] = identifier[_get_samples] ( identifier[n] , identifier[sim] , identifier[inc_warmup] = keyword[False] ) identifier[msdfun] = keyword[lambda] identifier[chain] :( identifier[np] . identifier[mean] ( identifier[chain] ), identifier[np] . identifier[std] ( identifier[chain] , identifier[ddof] = literal[int] )) identifier[qfun] = keyword[lambda] identifier[chain] : identifier[mquantiles] ( identifier[chain] , identifier[probs] ) identifier[c_msd] = identifier[np] . identifier[array] ([ identifier[msdfun] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[ss] ]). identifier[flatten] () identifier[c_quan] = identifier[np] . identifier[array] ([ identifier[qfun] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[ss] ]). identifier[flatten] () identifier[ass] = identifier[np] . identifier[asarray] ( identifier[ss] ). identifier[flatten] () identifier[msd] = identifier[np] . identifier[asarray] ( identifier[msdfun] ( identifier[ass] )) identifier[quan] = identifier[qfun] ( identifier[np] . identifier[asarray] ( identifier[ass] )) keyword[return] identifier[dict] ( identifier[msd] = identifier[msd] , identifier[quan] = identifier[quan] , identifier[c_msd] = identifier[c_msd] , identifier[c_quan] = identifier[c_quan] )
def _get_par_summary(sim, n, probs): """Summarize chains merged and individually Parameters ---------- sim : dict from stanfit object n : int parameter index probs : iterable of int quantiles Returns ------- summary : dict Dictionary containing summaries """ # _get_samples gets chains for nth parameter ss = _get_samples(n, sim, inc_warmup=False) msdfun = lambda chain: (np.mean(chain), np.std(chain, ddof=1)) qfun = lambda chain: mquantiles(chain, probs) c_msd = np.array([msdfun(s) for s in ss]).flatten() c_quan = np.array([qfun(s) for s in ss]).flatten() ass = np.asarray(ss).flatten() msd = np.asarray(msdfun(ass)) quan = qfun(np.asarray(ass)) return dict(msd=msd, quan=quan, c_msd=c_msd, c_quan=c_quan)
def _skip_spaces(string, idx): # type: (str, int) -> int """ Retrieves the next non-space character after idx index in the given string :param string: The string to look into :param idx: The base search index :return: The next non-space character index, -1 if not found """ i = idx for char in string[idx:]: if not char.isspace(): return i i += 1 return -1
def function[_skip_spaces, parameter[string, idx]]: constant[ Retrieves the next non-space character after idx index in the given string :param string: The string to look into :param idx: The base search index :return: The next non-space character index, -1 if not found ] variable[i] assign[=] name[idx] for taget[name[char]] in starred[call[name[string]][<ast.Slice object at 0x7da20c6e6c50>]] begin[:] if <ast.UnaryOp object at 0x7da20c6e4c10> begin[:] return[name[i]] <ast.AugAssign object at 0x7da20c6e6bf0> return[<ast.UnaryOp object at 0x7da20c6e51e0>]
keyword[def] identifier[_skip_spaces] ( identifier[string] , identifier[idx] ): literal[string] identifier[i] = identifier[idx] keyword[for] identifier[char] keyword[in] identifier[string] [ identifier[idx] :]: keyword[if] keyword[not] identifier[char] . identifier[isspace] (): keyword[return] identifier[i] identifier[i] += literal[int] keyword[return] - literal[int]
def _skip_spaces(string, idx): # type: (str, int) -> int '\n Retrieves the next non-space character after idx index in the given string\n\n :param string: The string to look into\n :param idx: The base search index\n :return: The next non-space character index, -1 if not found\n ' i = idx for char in string[idx:]: if not char.isspace(): return i # depends on [control=['if'], data=[]] i += 1 # depends on [control=['for'], data=['char']] return -1
def parse_args_and_run(): """ :return: The parsed arguments """ parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(help='Docker-tag-naming sub-commands', dest='subparser_name') # # Forge # parser_forge = subparsers.add_parser('forge', help='Create a new version tag') parser_forge.add_argument('--version', type=int, default=1, help='Version number') parser_forge.add_argument('--commit-id', required=True, help='Git commit id') parser_forge.add_argument('--branch', required=True, help='The branch name (ie. master)') # # Latest # parser_latest = subparsers.add_parser('latest', help='Query the latest tag in the' ' registry') parser_latest.add_argument('image', help='The image to query (ie. username/image)') parser_latest.add_argument('branch', help='The branch name (ie. master)') # # Bump # parser_bump = subparsers.add_parser('bump', help='Query the latest tag in the' ' registry and return a +1') parser_bump.add_argument('image', help='The image to bump (ie. username/image)') parser_bump.add_argument('branch', help='The branch name (ie. master)') parser_bump.add_argument('--commit-id', required=True, help='Git commit id for the newly created tag') # # Refresh # parser_latest = subparsers.add_parser('refresh', help='Loop until the latest tag in' ' the registry changes') parser_latest.add_argument('image', help='The image to query (ie. username/image)') parser_latest.add_argument('branch', help='The branch name (ie. master)') args = parser.parse_args() {'bump': run_bump, 'latest': run_latest, 'forge': run_forge, 'refresh': run_refresh}.get(args.subparser_name)(args)
def function[parse_args_and_run, parameter[]]: constant[ :return: The parsed arguments ] variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]] variable[subparsers] assign[=] call[name[parser].add_subparsers, parameter[]] variable[parser_forge] assign[=] call[name[subparsers].add_parser, parameter[constant[forge]]] call[name[parser_forge].add_argument, parameter[constant[--version]]] call[name[parser_forge].add_argument, parameter[constant[--commit-id]]] call[name[parser_forge].add_argument, parameter[constant[--branch]]] variable[parser_latest] assign[=] call[name[subparsers].add_parser, parameter[constant[latest]]] call[name[parser_latest].add_argument, parameter[constant[image]]] call[name[parser_latest].add_argument, parameter[constant[branch]]] variable[parser_bump] assign[=] call[name[subparsers].add_parser, parameter[constant[bump]]] call[name[parser_bump].add_argument, parameter[constant[image]]] call[name[parser_bump].add_argument, parameter[constant[branch]]] call[name[parser_bump].add_argument, parameter[constant[--commit-id]]] variable[parser_latest] assign[=] call[name[subparsers].add_parser, parameter[constant[refresh]]] call[name[parser_latest].add_argument, parameter[constant[image]]] call[name[parser_latest].add_argument, parameter[constant[branch]]] variable[args] assign[=] call[name[parser].parse_args, parameter[]] call[call[dictionary[[<ast.Constant object at 0x7da1b26e2b00>, <ast.Constant object at 0x7da1b26e2aa0>, <ast.Constant object at 0x7da1b26e1510>, <ast.Constant object at 0x7da1b26e2cb0>], [<ast.Name object at 0x7da1b26e2b60>, <ast.Name object at 0x7da1b26e2140>, <ast.Name object at 0x7da1b26e1480>, <ast.Name object at 0x7da1b26e1e10>]].get, parameter[name[args].subparser_name]], parameter[name[args]]]
keyword[def] identifier[parse_args_and_run] (): literal[string] identifier[parser] = identifier[argparse] . identifier[ArgumentParser] () identifier[subparsers] = identifier[parser] . identifier[add_subparsers] ( identifier[help] = literal[string] , identifier[dest] = literal[string] ) identifier[parser_forge] = identifier[subparsers] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_forge] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] ) identifier[parser_forge] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[parser_forge] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[parser_latest] = identifier[subparsers] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] literal[string] ) identifier[parser_latest] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_latest] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_bump] = identifier[subparsers] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] literal[string] ) identifier[parser_bump] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_bump] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_bump] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[parser_latest] = identifier[subparsers] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] literal[string] ) identifier[parser_latest] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_latest] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ) identifier[args] = identifier[parser] . identifier[parse_args] () { literal[string] : identifier[run_bump] , literal[string] : identifier[run_latest] , literal[string] : identifier[run_forge] , literal[string] : identifier[run_refresh] }. identifier[get] ( identifier[args] . identifier[subparser_name] )( identifier[args] )
def parse_args_and_run(): """ :return: The parsed arguments """ parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(help='Docker-tag-naming sub-commands', dest='subparser_name') # # Forge # parser_forge = subparsers.add_parser('forge', help='Create a new version tag') parser_forge.add_argument('--version', type=int, default=1, help='Version number') parser_forge.add_argument('--commit-id', required=True, help='Git commit id') parser_forge.add_argument('--branch', required=True, help='The branch name (ie. master)') # # Latest # parser_latest = subparsers.add_parser('latest', help='Query the latest tag in the registry') parser_latest.add_argument('image', help='The image to query (ie. username/image)') parser_latest.add_argument('branch', help='The branch name (ie. master)') # # Bump # parser_bump = subparsers.add_parser('bump', help='Query the latest tag in the registry and return a +1') parser_bump.add_argument('image', help='The image to bump (ie. username/image)') parser_bump.add_argument('branch', help='The branch name (ie. master)') parser_bump.add_argument('--commit-id', required=True, help='Git commit id for the newly created tag') # # Refresh # parser_latest = subparsers.add_parser('refresh', help='Loop until the latest tag in the registry changes') parser_latest.add_argument('image', help='The image to query (ie. username/image)') parser_latest.add_argument('branch', help='The branch name (ie. master)') args = parser.parse_args() {'bump': run_bump, 'latest': run_latest, 'forge': run_forge, 'refresh': run_refresh}.get(args.subparser_name)(args)
def penn_treebank_dataset( directory='data/penn-treebank', train=False, dev=False, test=False, train_filename='ptb.train.txt', dev_filename='ptb.valid.txt', test_filename='ptb.test.txt', check_files=['ptb.train.txt'], urls=[ 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt', 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt', 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt' ], unknown_token=DEFAULT_UNKNOWN_TOKEN, eos_token=DEFAULT_EOS_TOKEN): """ Load the Penn Treebank dataset. This is the Penn Treebank Project: Release 2 CDROM, featuring a million words of 1989 Wall Street Journal material. **Reference:** https://catalog.ldc.upenn.edu/ldc99t42 **Citation:** Marcus, Mitchell P., Marcinkiewicz, Mary Ann & Santorini, Beatrice (1993). Building a Large Annotated Corpus of English: The Penn Treebank Args: directory (str, optional): Directory to cache the dataset. train (bool, optional): If to load the training split of the dataset. dev (bool, optional): If to load the development split of the dataset. test (bool, optional): If to load the test split of the dataset. train_filename (str, optional): The filename of the training split. dev_filename (str, optional): The filename of the development split. test_filename (str, optional): The filename of the test split. name (str, optional): Name of the dataset directory. check_files (str, optional): Check if these files exist, then this download was successful. urls (str, optional): URLs to download. unknown_token (str, optional): Token to use for unknown words. eos_token (str, optional): Token to use at the end of sentences. Returns: :class:`tuple` of :class:`torchnlp.datasets.Dataset` or :class:`torchnlp.datasets.Dataset`: Returns between one and all dataset splits (train, dev and test) depending on if their respective boolean argument is ``True``. Example: >>> from torchnlp.datasets import penn_treebank_dataset # doctest: +SKIP >>> train = penn_treebank_dataset(train=True) # doctest: +SKIP >>> train[:10] # doctest: +SKIP ['aer', 'banknote', 'berlitz', 'calloway', 'centrust', 'cluett', 'fromstein', 'gitano', 'guterman', 'hydro-quebec'] """ download_files_maybe_extract(urls=urls, directory=directory, check_files=check_files) ret = [] splits = [(train, train_filename), (dev, dev_filename), (test, test_filename)] splits = [f for (requested, f) in splits if requested] for filename in splits: full_path = os.path.join(directory, filename) text = [] with io.open(full_path, encoding='utf-8') as f: for line in f: text.extend(line.replace('<unk>', unknown_token).split()) text.append(eos_token) ret.append(text) if len(ret) == 1: return ret[0] else: return tuple(ret)
def function[penn_treebank_dataset, parameter[directory, train, dev, test, train_filename, dev_filename, test_filename, check_files, urls, unknown_token, eos_token]]: constant[ Load the Penn Treebank dataset. This is the Penn Treebank Project: Release 2 CDROM, featuring a million words of 1989 Wall Street Journal material. **Reference:** https://catalog.ldc.upenn.edu/ldc99t42 **Citation:** Marcus, Mitchell P., Marcinkiewicz, Mary Ann & Santorini, Beatrice (1993). Building a Large Annotated Corpus of English: The Penn Treebank Args: directory (str, optional): Directory to cache the dataset. train (bool, optional): If to load the training split of the dataset. dev (bool, optional): If to load the development split of the dataset. test (bool, optional): If to load the test split of the dataset. train_filename (str, optional): The filename of the training split. dev_filename (str, optional): The filename of the development split. test_filename (str, optional): The filename of the test split. name (str, optional): Name of the dataset directory. check_files (str, optional): Check if these files exist, then this download was successful. urls (str, optional): URLs to download. unknown_token (str, optional): Token to use for unknown words. eos_token (str, optional): Token to use at the end of sentences. Returns: :class:`tuple` of :class:`torchnlp.datasets.Dataset` or :class:`torchnlp.datasets.Dataset`: Returns between one and all dataset splits (train, dev and test) depending on if their respective boolean argument is ``True``. Example: >>> from torchnlp.datasets import penn_treebank_dataset # doctest: +SKIP >>> train = penn_treebank_dataset(train=True) # doctest: +SKIP >>> train[:10] # doctest: +SKIP ['aer', 'banknote', 'berlitz', 'calloway', 'centrust', 'cluett', 'fromstein', 'gitano', 'guterman', 'hydro-quebec'] ] call[name[download_files_maybe_extract], parameter[]] variable[ret] assign[=] list[[]] variable[splits] assign[=] list[[<ast.Tuple object at 0x7da18fe92650>, <ast.Tuple object at 0x7da18fe92b60>, <ast.Tuple object at 0x7da18fe92530>]] variable[splits] assign[=] <ast.ListComp object at 0x7da18fe90880> for taget[name[filename]] in starred[name[splits]] begin[:] variable[full_path] assign[=] call[name[os].path.join, parameter[name[directory], name[filename]]] variable[text] assign[=] list[[]] with call[name[io].open, parameter[name[full_path]]] begin[:] for taget[name[line]] in starred[name[f]] begin[:] call[name[text].extend, parameter[call[call[name[line].replace, parameter[constant[<unk>], name[unknown_token]]].split, parameter[]]]] call[name[text].append, parameter[name[eos_token]]] call[name[ret].append, parameter[name[text]]] if compare[call[name[len], parameter[name[ret]]] equal[==] constant[1]] begin[:] return[call[name[ret]][constant[0]]]
keyword[def] identifier[penn_treebank_dataset] ( identifier[directory] = literal[string] , identifier[train] = keyword[False] , identifier[dev] = keyword[False] , identifier[test] = keyword[False] , identifier[train_filename] = literal[string] , identifier[dev_filename] = literal[string] , identifier[test_filename] = literal[string] , identifier[check_files] =[ literal[string] ], identifier[urls] =[ literal[string] , literal[string] , literal[string] ], identifier[unknown_token] = identifier[DEFAULT_UNKNOWN_TOKEN] , identifier[eos_token] = identifier[DEFAULT_EOS_TOKEN] ): literal[string] identifier[download_files_maybe_extract] ( identifier[urls] = identifier[urls] , identifier[directory] = identifier[directory] , identifier[check_files] = identifier[check_files] ) identifier[ret] =[] identifier[splits] =[( identifier[train] , identifier[train_filename] ),( identifier[dev] , identifier[dev_filename] ),( identifier[test] , identifier[test_filename] )] identifier[splits] =[ identifier[f] keyword[for] ( identifier[requested] , identifier[f] ) keyword[in] identifier[splits] keyword[if] identifier[requested] ] keyword[for] identifier[filename] keyword[in] identifier[splits] : identifier[full_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[filename] ) identifier[text] =[] keyword[with] identifier[io] . identifier[open] ( identifier[full_path] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] : keyword[for] identifier[line] keyword[in] identifier[f] : identifier[text] . identifier[extend] ( identifier[line] . identifier[replace] ( literal[string] , identifier[unknown_token] ). identifier[split] ()) identifier[text] . identifier[append] ( identifier[eos_token] ) identifier[ret] . identifier[append] ( identifier[text] ) keyword[if] identifier[len] ( identifier[ret] )== literal[int] : keyword[return] identifier[ret] [ literal[int] ] keyword[else] : keyword[return] identifier[tuple] ( identifier[ret] )
def penn_treebank_dataset(directory='data/penn-treebank', train=False, dev=False, test=False, train_filename='ptb.train.txt', dev_filename='ptb.valid.txt', test_filename='ptb.test.txt', check_files=['ptb.train.txt'], urls=['https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt', 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt', 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt'], unknown_token=DEFAULT_UNKNOWN_TOKEN, eos_token=DEFAULT_EOS_TOKEN): """ Load the Penn Treebank dataset. This is the Penn Treebank Project: Release 2 CDROM, featuring a million words of 1989 Wall Street Journal material. **Reference:** https://catalog.ldc.upenn.edu/ldc99t42 **Citation:** Marcus, Mitchell P., Marcinkiewicz, Mary Ann & Santorini, Beatrice (1993). Building a Large Annotated Corpus of English: The Penn Treebank Args: directory (str, optional): Directory to cache the dataset. train (bool, optional): If to load the training split of the dataset. dev (bool, optional): If to load the development split of the dataset. test (bool, optional): If to load the test split of the dataset. train_filename (str, optional): The filename of the training split. dev_filename (str, optional): The filename of the development split. test_filename (str, optional): The filename of the test split. name (str, optional): Name of the dataset directory. check_files (str, optional): Check if these files exist, then this download was successful. urls (str, optional): URLs to download. unknown_token (str, optional): Token to use for unknown words. eos_token (str, optional): Token to use at the end of sentences. Returns: :class:`tuple` of :class:`torchnlp.datasets.Dataset` or :class:`torchnlp.datasets.Dataset`: Returns between one and all dataset splits (train, dev and test) depending on if their respective boolean argument is ``True``. Example: >>> from torchnlp.datasets import penn_treebank_dataset # doctest: +SKIP >>> train = penn_treebank_dataset(train=True) # doctest: +SKIP >>> train[:10] # doctest: +SKIP ['aer', 'banknote', 'berlitz', 'calloway', 'centrust', 'cluett', 'fromstein', 'gitano', 'guterman', 'hydro-quebec'] """ download_files_maybe_extract(urls=urls, directory=directory, check_files=check_files) ret = [] splits = [(train, train_filename), (dev, dev_filename), (test, test_filename)] splits = [f for (requested, f) in splits if requested] for filename in splits: full_path = os.path.join(directory, filename) text = [] with io.open(full_path, encoding='utf-8') as f: for line in f: text.extend(line.replace('<unk>', unknown_token).split()) text.append(eos_token) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']] ret.append(text) # depends on [control=['for'], data=['filename']] if len(ret) == 1: return ret[0] # depends on [control=['if'], data=[]] else: return tuple(ret)
def remove_handler(args): """usage: {program} remove <anchor-id> [<path>] Remove an existing anchor. """ repo = _open_repo(args) anchor_id, anchor = _get_anchor(repo, args['<anchor-id>']) del repo[anchor_id] return ExitCode.OK
def function[remove_handler, parameter[args]]: constant[usage: {program} remove <anchor-id> [<path>] Remove an existing anchor. ] variable[repo] assign[=] call[name[_open_repo], parameter[name[args]]] <ast.Tuple object at 0x7da1b0a22860> assign[=] call[name[_get_anchor], parameter[name[repo], call[name[args]][constant[<anchor-id>]]]] <ast.Delete object at 0x7da1b0a23d60> return[name[ExitCode].OK]
keyword[def] identifier[remove_handler] ( identifier[args] ): literal[string] identifier[repo] = identifier[_open_repo] ( identifier[args] ) identifier[anchor_id] , identifier[anchor] = identifier[_get_anchor] ( identifier[repo] , identifier[args] [ literal[string] ]) keyword[del] identifier[repo] [ identifier[anchor_id] ] keyword[return] identifier[ExitCode] . identifier[OK]
def remove_handler(args): """usage: {program} remove <anchor-id> [<path>] Remove an existing anchor. """ repo = _open_repo(args) (anchor_id, anchor) = _get_anchor(repo, args['<anchor-id>']) del repo[anchor_id] return ExitCode.OK
def play_media(self, url, content_type, title=None, thumb=None, current_time=0, autoplay=True, stream_type=STREAM_TYPE_BUFFERED, metadata=None, subtitles=None, subtitles_lang='en-US', subtitles_mime='text/vtt', subtitle_id=1): """ Plays media on the Chromecast. Start default media receiver if not already started. Parameters: url: str - url of the media. content_type: str - mime type. Example: 'video/mp4'. title: str - title of the media. thumb: str - thumbnail image url. current_time: float - seconds from the beginning of the media to start playback. autoplay: bool - whether the media will automatically play. stream_type: str - describes the type of media artifact as one of the following: "NONE", "BUFFERED", "LIVE". subtitles: str - url of subtitle file to be shown on chromecast. subtitles_lang: str - language for subtitles. subtitles_mime: str - mimetype of subtitles. subtitle_id: int - id of subtitle to be loaded. metadata: dict - media metadata object, one of the following: GenericMediaMetadata, MovieMediaMetadata, TvShowMediaMetadata, MusicTrackMediaMetadata, PhotoMediaMetadata. Docs: https://developers.google.com/cast/docs/reference/messages#MediaData """ # pylint: disable=too-many-locals def app_launched_callback(): """Plays media after chromecast has switched to requested app.""" self._send_start_play_media( url, content_type, title, thumb, current_time, autoplay, stream_type, metadata, subtitles, subtitles_lang, subtitles_mime, subtitle_id) receiver_ctrl = self._socket_client.receiver_controller receiver_ctrl.launch_app(self.app_id, callback_function=app_launched_callback)
def function[play_media, parameter[self, url, content_type, title, thumb, current_time, autoplay, stream_type, metadata, subtitles, subtitles_lang, subtitles_mime, subtitle_id]]: constant[ Plays media on the Chromecast. Start default media receiver if not already started. Parameters: url: str - url of the media. content_type: str - mime type. Example: 'video/mp4'. title: str - title of the media. thumb: str - thumbnail image url. current_time: float - seconds from the beginning of the media to start playback. autoplay: bool - whether the media will automatically play. stream_type: str - describes the type of media artifact as one of the following: "NONE", "BUFFERED", "LIVE". subtitles: str - url of subtitle file to be shown on chromecast. subtitles_lang: str - language for subtitles. subtitles_mime: str - mimetype of subtitles. subtitle_id: int - id of subtitle to be loaded. metadata: dict - media metadata object, one of the following: GenericMediaMetadata, MovieMediaMetadata, TvShowMediaMetadata, MusicTrackMediaMetadata, PhotoMediaMetadata. Docs: https://developers.google.com/cast/docs/reference/messages#MediaData ] def function[app_launched_callback, parameter[]]: constant[Plays media after chromecast has switched to requested app.] call[name[self]._send_start_play_media, parameter[name[url], name[content_type], name[title], name[thumb], name[current_time], name[autoplay], name[stream_type], name[metadata], name[subtitles], name[subtitles_lang], name[subtitles_mime], name[subtitle_id]]] variable[receiver_ctrl] assign[=] name[self]._socket_client.receiver_controller call[name[receiver_ctrl].launch_app, parameter[name[self].app_id]]
keyword[def] identifier[play_media] ( identifier[self] , identifier[url] , identifier[content_type] , identifier[title] = keyword[None] , identifier[thumb] = keyword[None] , identifier[current_time] = literal[int] , identifier[autoplay] = keyword[True] , identifier[stream_type] = identifier[STREAM_TYPE_BUFFERED] , identifier[metadata] = keyword[None] , identifier[subtitles] = keyword[None] , identifier[subtitles_lang] = literal[string] , identifier[subtitles_mime] = literal[string] , identifier[subtitle_id] = literal[int] ): literal[string] keyword[def] identifier[app_launched_callback] (): literal[string] identifier[self] . identifier[_send_start_play_media] ( identifier[url] , identifier[content_type] , identifier[title] , identifier[thumb] , identifier[current_time] , identifier[autoplay] , identifier[stream_type] , identifier[metadata] , identifier[subtitles] , identifier[subtitles_lang] , identifier[subtitles_mime] , identifier[subtitle_id] ) identifier[receiver_ctrl] = identifier[self] . identifier[_socket_client] . identifier[receiver_controller] identifier[receiver_ctrl] . identifier[launch_app] ( identifier[self] . identifier[app_id] , identifier[callback_function] = identifier[app_launched_callback] )
def play_media(self, url, content_type, title=None, thumb=None, current_time=0, autoplay=True, stream_type=STREAM_TYPE_BUFFERED, metadata=None, subtitles=None, subtitles_lang='en-US', subtitles_mime='text/vtt', subtitle_id=1): """ Plays media on the Chromecast. Start default media receiver if not already started. Parameters: url: str - url of the media. content_type: str - mime type. Example: 'video/mp4'. title: str - title of the media. thumb: str - thumbnail image url. current_time: float - seconds from the beginning of the media to start playback. autoplay: bool - whether the media will automatically play. stream_type: str - describes the type of media artifact as one of the following: "NONE", "BUFFERED", "LIVE". subtitles: str - url of subtitle file to be shown on chromecast. subtitles_lang: str - language for subtitles. subtitles_mime: str - mimetype of subtitles. subtitle_id: int - id of subtitle to be loaded. metadata: dict - media metadata object, one of the following: GenericMediaMetadata, MovieMediaMetadata, TvShowMediaMetadata, MusicTrackMediaMetadata, PhotoMediaMetadata. Docs: https://developers.google.com/cast/docs/reference/messages#MediaData """ # pylint: disable=too-many-locals def app_launched_callback(): """Plays media after chromecast has switched to requested app.""" self._send_start_play_media(url, content_type, title, thumb, current_time, autoplay, stream_type, metadata, subtitles, subtitles_lang, subtitles_mime, subtitle_id) receiver_ctrl = self._socket_client.receiver_controller receiver_ctrl.launch_app(self.app_id, callback_function=app_launched_callback)