code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def acquire_hosting_device_slots(self, context, hosting_device, resource,
resource_type, resource_service, num,
exclusive=False):
"""Assign <num> slots in <hosting_device> to logical <resource>.
If exclusive is True the hosting device is bound to the resource's
tenant. Otherwise it is not bound to any tenant.
Returns True if allocation was granted, False otherwise.
"""
bound = hosting_device['tenant_bound']
if ((bound is not None and bound != resource['tenant_id']) or
(exclusive and not self._exclusively_used(context, hosting_device,
resource['tenant_id']))):
LOG.debug(
'Rejecting allocation of %(num)d slots in tenant %(bound)s '
'hosting device %(device)s to logical resource %(r_id)s due '
'to exclusive use conflict.',
{'num': num,
'bound': 'unbound' if bound is None else bound + ' bound',
'device': hosting_device['id'], 'r_id': resource['id']})
return False
with context.session.begin(subtransactions=True):
res_info = {'resource': resource, 'type': resource_type,
'service': resource_service}
slot_info, query = self._get_or_create_slot_allocation(
context, hosting_device, res_info)
if slot_info is None:
LOG.debug('Rejecting allocation of %(num)d slots in hosting '
'device %(device)s to logical resource %(r_id)s',
{'num': num, 'device': hosting_device['id'],
'r_id': resource['id']})
return False
new_allocation = num + slot_info.num_allocated
if hosting_device['template']['slot_capacity'] < new_allocation:
LOG.debug('Rejecting allocation of %(num)d slots in '
'hosting device %(device)s to logical resource '
'%(r_id)s due to insufficent slot availability.',
{'num': num, 'device': hosting_device['id'],
'r_id': resource['id']})
self._dispatch_pool_maintenance_job(hosting_device['template'])
return False
# handle any changes to exclusive usage by tenant
if exclusive and bound is None:
self._update_hosting_device_exclusivity(
context, hosting_device, resource['tenant_id'])
bound = resource['tenant_id']
elif not exclusive and bound is not None:
self._update_hosting_device_exclusivity(context,
hosting_device, None)
bound = None
slot_info.num_allocated = new_allocation
context.session.add(slot_info)
self._dispatch_pool_maintenance_job(hosting_device['template'])
# report success
LOG.info('Allocated %(num)d additional slots in tenant %(bound)s'
'bound hosting device %(hd_id)s. In total %(total)d '
'slots are now allocated in that hosting device for '
'logical resource %(r_id)s.',
{'num': num, 'bound': 'un-' if bound is None else bound + ' ',
'total': new_allocation, 'hd_id': hosting_device['id'],
'r_id': resource['id']})
return True | def function[acquire_hosting_device_slots, parameter[self, context, hosting_device, resource, resource_type, resource_service, num, exclusive]]:
constant[Assign <num> slots in <hosting_device> to logical <resource>.
If exclusive is True the hosting device is bound to the resource's
tenant. Otherwise it is not bound to any tenant.
Returns True if allocation was granted, False otherwise.
]
variable[bound] assign[=] call[name[hosting_device]][constant[tenant_bound]]
if <ast.BoolOp object at 0x7da1b1b14eb0> begin[:]
call[name[LOG].debug, parameter[constant[Rejecting allocation of %(num)d slots in tenant %(bound)s hosting device %(device)s to logical resource %(r_id)s due to exclusive use conflict.], dictionary[[<ast.Constant object at 0x7da1b1b16a40>, <ast.Constant object at 0x7da1b1b17520>, <ast.Constant object at 0x7da1b1b16dd0>, <ast.Constant object at 0x7da1b1b16d40>], [<ast.Name object at 0x7da1b1b16f20>, <ast.IfExp object at 0x7da1b1b17970>, <ast.Subscript object at 0x7da1b1b16020>, <ast.Subscript object at 0x7da1b1b16890>]]]]
return[constant[False]]
with call[name[context].session.begin, parameter[]] begin[:]
variable[res_info] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b16bf0>, <ast.Constant object at 0x7da1b1b17c10>, <ast.Constant object at 0x7da1b1b160e0>], [<ast.Name object at 0x7da1b1b17b80>, <ast.Name object at 0x7da1b1b16440>, <ast.Name object at 0x7da1b1b17670>]]
<ast.Tuple object at 0x7da18bc710c0> assign[=] call[name[self]._get_or_create_slot_allocation, parameter[name[context], name[hosting_device], name[res_info]]]
if compare[name[slot_info] is constant[None]] begin[:]
call[name[LOG].debug, parameter[constant[Rejecting allocation of %(num)d slots in hosting device %(device)s to logical resource %(r_id)s], dictionary[[<ast.Constant object at 0x7da1b1b86980>, <ast.Constant object at 0x7da1b1b849a0>, <ast.Constant object at 0x7da1b1b875b0>], [<ast.Name object at 0x7da1b1b1ae30>, <ast.Subscript object at 0x7da1b1b1b700>, <ast.Subscript object at 0x7da1b1b1b970>]]]]
return[constant[False]]
variable[new_allocation] assign[=] binary_operation[name[num] + name[slot_info].num_allocated]
if compare[call[call[name[hosting_device]][constant[template]]][constant[slot_capacity]] less[<] name[new_allocation]] begin[:]
call[name[LOG].debug, parameter[constant[Rejecting allocation of %(num)d slots in hosting device %(device)s to logical resource %(r_id)s due to insufficent slot availability.], dictionary[[<ast.Constant object at 0x7da1b1b1b340>, <ast.Constant object at 0x7da1b1b1b940>, <ast.Constant object at 0x7da1b1b1b100>], [<ast.Name object at 0x7da1b1b1af50>, <ast.Subscript object at 0x7da1b1b1b610>, <ast.Subscript object at 0x7da1b1b1a7d0>]]]]
call[name[self]._dispatch_pool_maintenance_job, parameter[call[name[hosting_device]][constant[template]]]]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b1b1aa40> begin[:]
call[name[self]._update_hosting_device_exclusivity, parameter[name[context], name[hosting_device], call[name[resource]][constant[tenant_id]]]]
variable[bound] assign[=] call[name[resource]][constant[tenant_id]]
name[slot_info].num_allocated assign[=] name[new_allocation]
call[name[context].session.add, parameter[name[slot_info]]]
call[name[self]._dispatch_pool_maintenance_job, parameter[call[name[hosting_device]][constant[template]]]]
call[name[LOG].info, parameter[constant[Allocated %(num)d additional slots in tenant %(bound)sbound hosting device %(hd_id)s. In total %(total)d slots are now allocated in that hosting device for logical resource %(r_id)s.], dictionary[[<ast.Constant object at 0x7da1b1b176a0>, <ast.Constant object at 0x7da1b1b17730>, <ast.Constant object at 0x7da1b1b16da0>, <ast.Constant object at 0x7da1b1b16830>, <ast.Constant object at 0x7da1b1b16d70>], [<ast.Name object at 0x7da1b1b17be0>, <ast.IfExp object at 0x7da1b1b16e00>, <ast.Name object at 0x7da1b1b14910>, <ast.Subscript object at 0x7da1b1b17ac0>, <ast.Subscript object at 0x7da1b1b16b90>]]]]
return[constant[True]] | keyword[def] identifier[acquire_hosting_device_slots] ( identifier[self] , identifier[context] , identifier[hosting_device] , identifier[resource] ,
identifier[resource_type] , identifier[resource_service] , identifier[num] ,
identifier[exclusive] = keyword[False] ):
literal[string]
identifier[bound] = identifier[hosting_device] [ literal[string] ]
keyword[if] (( identifier[bound] keyword[is] keyword[not] keyword[None] keyword[and] identifier[bound] != identifier[resource] [ literal[string] ]) keyword[or]
( identifier[exclusive] keyword[and] keyword[not] identifier[self] . identifier[_exclusively_used] ( identifier[context] , identifier[hosting_device] ,
identifier[resource] [ literal[string] ]))):
identifier[LOG] . identifier[debug] (
literal[string]
literal[string]
literal[string] ,
{ literal[string] : identifier[num] ,
literal[string] : literal[string] keyword[if] identifier[bound] keyword[is] keyword[None] keyword[else] identifier[bound] + literal[string] ,
literal[string] : identifier[hosting_device] [ literal[string] ], literal[string] : identifier[resource] [ literal[string] ]})
keyword[return] keyword[False]
keyword[with] identifier[context] . identifier[session] . identifier[begin] ( identifier[subtransactions] = keyword[True] ):
identifier[res_info] ={ literal[string] : identifier[resource] , literal[string] : identifier[resource_type] ,
literal[string] : identifier[resource_service] }
identifier[slot_info] , identifier[query] = identifier[self] . identifier[_get_or_create_slot_allocation] (
identifier[context] , identifier[hosting_device] , identifier[res_info] )
keyword[if] identifier[slot_info] keyword[is] keyword[None] :
identifier[LOG] . identifier[debug] ( literal[string]
literal[string] ,
{ literal[string] : identifier[num] , literal[string] : identifier[hosting_device] [ literal[string] ],
literal[string] : identifier[resource] [ literal[string] ]})
keyword[return] keyword[False]
identifier[new_allocation] = identifier[num] + identifier[slot_info] . identifier[num_allocated]
keyword[if] identifier[hosting_device] [ literal[string] ][ literal[string] ]< identifier[new_allocation] :
identifier[LOG] . identifier[debug] ( literal[string]
literal[string]
literal[string] ,
{ literal[string] : identifier[num] , literal[string] : identifier[hosting_device] [ literal[string] ],
literal[string] : identifier[resource] [ literal[string] ]})
identifier[self] . identifier[_dispatch_pool_maintenance_job] ( identifier[hosting_device] [ literal[string] ])
keyword[return] keyword[False]
keyword[if] identifier[exclusive] keyword[and] identifier[bound] keyword[is] keyword[None] :
identifier[self] . identifier[_update_hosting_device_exclusivity] (
identifier[context] , identifier[hosting_device] , identifier[resource] [ literal[string] ])
identifier[bound] = identifier[resource] [ literal[string] ]
keyword[elif] keyword[not] identifier[exclusive] keyword[and] identifier[bound] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_update_hosting_device_exclusivity] ( identifier[context] ,
identifier[hosting_device] , keyword[None] )
identifier[bound] = keyword[None]
identifier[slot_info] . identifier[num_allocated] = identifier[new_allocation]
identifier[context] . identifier[session] . identifier[add] ( identifier[slot_info] )
identifier[self] . identifier[_dispatch_pool_maintenance_job] ( identifier[hosting_device] [ literal[string] ])
identifier[LOG] . identifier[info] ( literal[string]
literal[string]
literal[string]
literal[string] ,
{ literal[string] : identifier[num] , literal[string] : literal[string] keyword[if] identifier[bound] keyword[is] keyword[None] keyword[else] identifier[bound] + literal[string] ,
literal[string] : identifier[new_allocation] , literal[string] : identifier[hosting_device] [ literal[string] ],
literal[string] : identifier[resource] [ literal[string] ]})
keyword[return] keyword[True] | def acquire_hosting_device_slots(self, context, hosting_device, resource, resource_type, resource_service, num, exclusive=False):
"""Assign <num> slots in <hosting_device> to logical <resource>.
If exclusive is True the hosting device is bound to the resource's
tenant. Otherwise it is not bound to any tenant.
Returns True if allocation was granted, False otherwise.
"""
bound = hosting_device['tenant_bound']
if bound is not None and bound != resource['tenant_id'] or (exclusive and (not self._exclusively_used(context, hosting_device, resource['tenant_id']))):
LOG.debug('Rejecting allocation of %(num)d slots in tenant %(bound)s hosting device %(device)s to logical resource %(r_id)s due to exclusive use conflict.', {'num': num, 'bound': 'unbound' if bound is None else bound + ' bound', 'device': hosting_device['id'], 'r_id': resource['id']})
return False # depends on [control=['if'], data=[]]
with context.session.begin(subtransactions=True):
res_info = {'resource': resource, 'type': resource_type, 'service': resource_service}
(slot_info, query) = self._get_or_create_slot_allocation(context, hosting_device, res_info)
if slot_info is None:
LOG.debug('Rejecting allocation of %(num)d slots in hosting device %(device)s to logical resource %(r_id)s', {'num': num, 'device': hosting_device['id'], 'r_id': resource['id']})
return False # depends on [control=['if'], data=[]]
new_allocation = num + slot_info.num_allocated
if hosting_device['template']['slot_capacity'] < new_allocation:
LOG.debug('Rejecting allocation of %(num)d slots in hosting device %(device)s to logical resource %(r_id)s due to insufficent slot availability.', {'num': num, 'device': hosting_device['id'], 'r_id': resource['id']})
self._dispatch_pool_maintenance_job(hosting_device['template'])
return False # depends on [control=['if'], data=[]]
# handle any changes to exclusive usage by tenant
if exclusive and bound is None:
self._update_hosting_device_exclusivity(context, hosting_device, resource['tenant_id'])
bound = resource['tenant_id'] # depends on [control=['if'], data=[]]
elif not exclusive and bound is not None:
self._update_hosting_device_exclusivity(context, hosting_device, None)
bound = None # depends on [control=['if'], data=[]]
slot_info.num_allocated = new_allocation
context.session.add(slot_info) # depends on [control=['with'], data=[]]
self._dispatch_pool_maintenance_job(hosting_device['template'])
# report success
LOG.info('Allocated %(num)d additional slots in tenant %(bound)sbound hosting device %(hd_id)s. In total %(total)d slots are now allocated in that hosting device for logical resource %(r_id)s.', {'num': num, 'bound': 'un-' if bound is None else bound + ' ', 'total': new_allocation, 'hd_id': hosting_device['id'], 'r_id': resource['id']})
return True |
def choice_type(self, tchain, p_elem):
"""Handle ``enumeration`` and ``union`` types."""
elem = SchemaNode.choice(p_elem, occur=2)
self.handle_substmts(tchain[0], elem) | def function[choice_type, parameter[self, tchain, p_elem]]:
constant[Handle ``enumeration`` and ``union`` types.]
variable[elem] assign[=] call[name[SchemaNode].choice, parameter[name[p_elem]]]
call[name[self].handle_substmts, parameter[call[name[tchain]][constant[0]], name[elem]]] | keyword[def] identifier[choice_type] ( identifier[self] , identifier[tchain] , identifier[p_elem] ):
literal[string]
identifier[elem] = identifier[SchemaNode] . identifier[choice] ( identifier[p_elem] , identifier[occur] = literal[int] )
identifier[self] . identifier[handle_substmts] ( identifier[tchain] [ literal[int] ], identifier[elem] ) | def choice_type(self, tchain, p_elem):
"""Handle ``enumeration`` and ``union`` types."""
elem = SchemaNode.choice(p_elem, occur=2)
self.handle_substmts(tchain[0], elem) |
def _gatherLookupIndexes(gpos):
"""
Gather a mapping of script to lookup indexes
referenced by the kern feature for each script.
Returns a dictionary of this structure:
{
"latn" : [0],
"DFLT" : [0]
}
"""
# gather the indexes of the kern features
kernFeatureIndexes = [index for index, featureRecord in enumerate(gpos.FeatureList.FeatureRecord) if featureRecord.FeatureTag == "kern"]
# find scripts and languages that have kern features
scriptKernFeatureIndexes = {}
for scriptRecord in gpos.ScriptList.ScriptRecord:
script = scriptRecord.ScriptTag
thisScriptKernFeatureIndexes = []
defaultLangSysRecord = scriptRecord.Script.DefaultLangSys
if defaultLangSysRecord is not None:
f = []
for featureIndex in defaultLangSysRecord.FeatureIndex:
if featureIndex not in kernFeatureIndexes:
continue
f.append(featureIndex)
if f:
thisScriptKernFeatureIndexes.append((None, f))
if scriptRecord.Script.LangSysRecord is not None:
for langSysRecord in scriptRecord.Script.LangSysRecord:
langSys = langSysRecord.LangSysTag
f = []
for featureIndex in langSysRecord.LangSys.FeatureIndex:
if featureIndex not in kernFeatureIndexes:
continue
f.append(featureIndex)
if f:
thisScriptKernFeatureIndexes.append((langSys, f))
scriptKernFeatureIndexes[script] = thisScriptKernFeatureIndexes
# convert the feature indexes to lookup indexes
scriptLookupIndexes = {}
for script, featureDefinitions in scriptKernFeatureIndexes.items():
lookupIndexes = scriptLookupIndexes[script] = []
for language, featureIndexes in featureDefinitions:
for featureIndex in featureIndexes:
featureRecord = gpos.FeatureList.FeatureRecord[featureIndex]
for lookupIndex in featureRecord.Feature.LookupListIndex:
if lookupIndex not in lookupIndexes:
lookupIndexes.append(lookupIndex)
# done
return scriptLookupIndexes | def function[_gatherLookupIndexes, parameter[gpos]]:
constant[
Gather a mapping of script to lookup indexes
referenced by the kern feature for each script.
Returns a dictionary of this structure:
{
"latn" : [0],
"DFLT" : [0]
}
]
variable[kernFeatureIndexes] assign[=] <ast.ListComp object at 0x7da204346350>
variable[scriptKernFeatureIndexes] assign[=] dictionary[[], []]
for taget[name[scriptRecord]] in starred[name[gpos].ScriptList.ScriptRecord] begin[:]
variable[script] assign[=] name[scriptRecord].ScriptTag
variable[thisScriptKernFeatureIndexes] assign[=] list[[]]
variable[defaultLangSysRecord] assign[=] name[scriptRecord].Script.DefaultLangSys
if compare[name[defaultLangSysRecord] is_not constant[None]] begin[:]
variable[f] assign[=] list[[]]
for taget[name[featureIndex]] in starred[name[defaultLangSysRecord].FeatureIndex] begin[:]
if compare[name[featureIndex] <ast.NotIn object at 0x7da2590d7190> name[kernFeatureIndexes]] begin[:]
continue
call[name[f].append, parameter[name[featureIndex]]]
if name[f] begin[:]
call[name[thisScriptKernFeatureIndexes].append, parameter[tuple[[<ast.Constant object at 0x7da204345b40>, <ast.Name object at 0x7da204344eb0>]]]]
if compare[name[scriptRecord].Script.LangSysRecord is_not constant[None]] begin[:]
for taget[name[langSysRecord]] in starred[name[scriptRecord].Script.LangSysRecord] begin[:]
variable[langSys] assign[=] name[langSysRecord].LangSysTag
variable[f] assign[=] list[[]]
for taget[name[featureIndex]] in starred[name[langSysRecord].LangSys.FeatureIndex] begin[:]
if compare[name[featureIndex] <ast.NotIn object at 0x7da2590d7190> name[kernFeatureIndexes]] begin[:]
continue
call[name[f].append, parameter[name[featureIndex]]]
if name[f] begin[:]
call[name[thisScriptKernFeatureIndexes].append, parameter[tuple[[<ast.Name object at 0x7da2043447c0>, <ast.Name object at 0x7da2043451e0>]]]]
call[name[scriptKernFeatureIndexes]][name[script]] assign[=] name[thisScriptKernFeatureIndexes]
variable[scriptLookupIndexes] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da2043456f0>, <ast.Name object at 0x7da2043478e0>]]] in starred[call[name[scriptKernFeatureIndexes].items, parameter[]]] begin[:]
variable[lookupIndexes] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da204346740>, <ast.Name object at 0x7da204346080>]]] in starred[name[featureDefinitions]] begin[:]
for taget[name[featureIndex]] in starred[name[featureIndexes]] begin[:]
variable[featureRecord] assign[=] call[name[gpos].FeatureList.FeatureRecord][name[featureIndex]]
for taget[name[lookupIndex]] in starred[name[featureRecord].Feature.LookupListIndex] begin[:]
if compare[name[lookupIndex] <ast.NotIn object at 0x7da2590d7190> name[lookupIndexes]] begin[:]
call[name[lookupIndexes].append, parameter[name[lookupIndex]]]
return[name[scriptLookupIndexes]] | keyword[def] identifier[_gatherLookupIndexes] ( identifier[gpos] ):
literal[string]
identifier[kernFeatureIndexes] =[ identifier[index] keyword[for] identifier[index] , identifier[featureRecord] keyword[in] identifier[enumerate] ( identifier[gpos] . identifier[FeatureList] . identifier[FeatureRecord] ) keyword[if] identifier[featureRecord] . identifier[FeatureTag] == literal[string] ]
identifier[scriptKernFeatureIndexes] ={}
keyword[for] identifier[scriptRecord] keyword[in] identifier[gpos] . identifier[ScriptList] . identifier[ScriptRecord] :
identifier[script] = identifier[scriptRecord] . identifier[ScriptTag]
identifier[thisScriptKernFeatureIndexes] =[]
identifier[defaultLangSysRecord] = identifier[scriptRecord] . identifier[Script] . identifier[DefaultLangSys]
keyword[if] identifier[defaultLangSysRecord] keyword[is] keyword[not] keyword[None] :
identifier[f] =[]
keyword[for] identifier[featureIndex] keyword[in] identifier[defaultLangSysRecord] . identifier[FeatureIndex] :
keyword[if] identifier[featureIndex] keyword[not] keyword[in] identifier[kernFeatureIndexes] :
keyword[continue]
identifier[f] . identifier[append] ( identifier[featureIndex] )
keyword[if] identifier[f] :
identifier[thisScriptKernFeatureIndexes] . identifier[append] (( keyword[None] , identifier[f] ))
keyword[if] identifier[scriptRecord] . identifier[Script] . identifier[LangSysRecord] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[langSysRecord] keyword[in] identifier[scriptRecord] . identifier[Script] . identifier[LangSysRecord] :
identifier[langSys] = identifier[langSysRecord] . identifier[LangSysTag]
identifier[f] =[]
keyword[for] identifier[featureIndex] keyword[in] identifier[langSysRecord] . identifier[LangSys] . identifier[FeatureIndex] :
keyword[if] identifier[featureIndex] keyword[not] keyword[in] identifier[kernFeatureIndexes] :
keyword[continue]
identifier[f] . identifier[append] ( identifier[featureIndex] )
keyword[if] identifier[f] :
identifier[thisScriptKernFeatureIndexes] . identifier[append] (( identifier[langSys] , identifier[f] ))
identifier[scriptKernFeatureIndexes] [ identifier[script] ]= identifier[thisScriptKernFeatureIndexes]
identifier[scriptLookupIndexes] ={}
keyword[for] identifier[script] , identifier[featureDefinitions] keyword[in] identifier[scriptKernFeatureIndexes] . identifier[items] ():
identifier[lookupIndexes] = identifier[scriptLookupIndexes] [ identifier[script] ]=[]
keyword[for] identifier[language] , identifier[featureIndexes] keyword[in] identifier[featureDefinitions] :
keyword[for] identifier[featureIndex] keyword[in] identifier[featureIndexes] :
identifier[featureRecord] = identifier[gpos] . identifier[FeatureList] . identifier[FeatureRecord] [ identifier[featureIndex] ]
keyword[for] identifier[lookupIndex] keyword[in] identifier[featureRecord] . identifier[Feature] . identifier[LookupListIndex] :
keyword[if] identifier[lookupIndex] keyword[not] keyword[in] identifier[lookupIndexes] :
identifier[lookupIndexes] . identifier[append] ( identifier[lookupIndex] )
keyword[return] identifier[scriptLookupIndexes] | def _gatherLookupIndexes(gpos):
"""
Gather a mapping of script to lookup indexes
referenced by the kern feature for each script.
Returns a dictionary of this structure:
{
"latn" : [0],
"DFLT" : [0]
}
"""
# gather the indexes of the kern features
kernFeatureIndexes = [index for (index, featureRecord) in enumerate(gpos.FeatureList.FeatureRecord) if featureRecord.FeatureTag == 'kern']
# find scripts and languages that have kern features
scriptKernFeatureIndexes = {}
for scriptRecord in gpos.ScriptList.ScriptRecord:
script = scriptRecord.ScriptTag
thisScriptKernFeatureIndexes = []
defaultLangSysRecord = scriptRecord.Script.DefaultLangSys
if defaultLangSysRecord is not None:
f = []
for featureIndex in defaultLangSysRecord.FeatureIndex:
if featureIndex not in kernFeatureIndexes:
continue # depends on [control=['if'], data=[]]
f.append(featureIndex) # depends on [control=['for'], data=['featureIndex']]
if f:
thisScriptKernFeatureIndexes.append((None, f)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['defaultLangSysRecord']]
if scriptRecord.Script.LangSysRecord is not None:
for langSysRecord in scriptRecord.Script.LangSysRecord:
langSys = langSysRecord.LangSysTag
f = []
for featureIndex in langSysRecord.LangSys.FeatureIndex:
if featureIndex not in kernFeatureIndexes:
continue # depends on [control=['if'], data=[]]
f.append(featureIndex) # depends on [control=['for'], data=['featureIndex']]
if f:
thisScriptKernFeatureIndexes.append((langSys, f)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['langSysRecord']] # depends on [control=['if'], data=[]]
scriptKernFeatureIndexes[script] = thisScriptKernFeatureIndexes # depends on [control=['for'], data=['scriptRecord']]
# convert the feature indexes to lookup indexes
scriptLookupIndexes = {}
for (script, featureDefinitions) in scriptKernFeatureIndexes.items():
lookupIndexes = scriptLookupIndexes[script] = []
for (language, featureIndexes) in featureDefinitions:
for featureIndex in featureIndexes:
featureRecord = gpos.FeatureList.FeatureRecord[featureIndex]
for lookupIndex in featureRecord.Feature.LookupListIndex:
if lookupIndex not in lookupIndexes:
lookupIndexes.append(lookupIndex) # depends on [control=['if'], data=['lookupIndex', 'lookupIndexes']] # depends on [control=['for'], data=['lookupIndex']] # depends on [control=['for'], data=['featureIndex']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
# done
return scriptLookupIndexes |
def encloses(self,
location: FileLocation
) -> Optional[FunctionDesc]:
"""
Returns the function, if any, that encloses a given location.
"""
for func in self.in_file(location.filename):
if location in func.location:
return func
return None | def function[encloses, parameter[self, location]]:
constant[
Returns the function, if any, that encloses a given location.
]
for taget[name[func]] in starred[call[name[self].in_file, parameter[name[location].filename]]] begin[:]
if compare[name[location] in name[func].location] begin[:]
return[name[func]]
return[constant[None]] | keyword[def] identifier[encloses] ( identifier[self] ,
identifier[location] : identifier[FileLocation]
)-> identifier[Optional] [ identifier[FunctionDesc] ]:
literal[string]
keyword[for] identifier[func] keyword[in] identifier[self] . identifier[in_file] ( identifier[location] . identifier[filename] ):
keyword[if] identifier[location] keyword[in] identifier[func] . identifier[location] :
keyword[return] identifier[func]
keyword[return] keyword[None] | def encloses(self, location: FileLocation) -> Optional[FunctionDesc]:
"""
Returns the function, if any, that encloses a given location.
"""
for func in self.in_file(location.filename):
if location in func.location:
return func # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['func']]
return None |
def start(self, *args, **kwargs):
"""Deprecated function to |add| a new handler.
Warnings
--------
.. deprecated:: 0.2.2
``start()`` will be removed in Loguru 1.0.0, it is replaced by ``add()`` which is a less
confusing name.
"""
warnings.warn(
"The 'start()' method is deprecated, please use 'add()' instead", DeprecationWarning
)
return self.add(*args, **kwargs) | def function[start, parameter[self]]:
constant[Deprecated function to |add| a new handler.
Warnings
--------
.. deprecated:: 0.2.2
``start()`` will be removed in Loguru 1.0.0, it is replaced by ``add()`` which is a less
confusing name.
]
call[name[warnings].warn, parameter[constant[The 'start()' method is deprecated, please use 'add()' instead], name[DeprecationWarning]]]
return[call[name[self].add, parameter[<ast.Starred object at 0x7da1b2043610>]]] | keyword[def] identifier[start] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[warnings] . identifier[warn] (
literal[string] , identifier[DeprecationWarning]
)
keyword[return] identifier[self] . identifier[add] (* identifier[args] ,** identifier[kwargs] ) | def start(self, *args, **kwargs):
"""Deprecated function to |add| a new handler.
Warnings
--------
.. deprecated:: 0.2.2
``start()`` will be removed in Loguru 1.0.0, it is replaced by ``add()`` which is a less
confusing name.
"""
warnings.warn("The 'start()' method is deprecated, please use 'add()' instead", DeprecationWarning)
return self.add(*args, **kwargs) |
def c_checker(code, working_directory):
"""Return checker."""
return gcc_checker(code, '.c',
[os.getenv('CC', 'gcc'), '-std=c99'] + INCLUDE_FLAGS,
working_directory=working_directory) | def function[c_checker, parameter[code, working_directory]]:
constant[Return checker.]
return[call[name[gcc_checker], parameter[name[code], constant[.c], binary_operation[list[[<ast.Call object at 0x7da1b08fbb20>, <ast.Constant object at 0x7da1b08fb010>]] + name[INCLUDE_FLAGS]]]]] | keyword[def] identifier[c_checker] ( identifier[code] , identifier[working_directory] ):
literal[string]
keyword[return] identifier[gcc_checker] ( identifier[code] , literal[string] ,
[ identifier[os] . identifier[getenv] ( literal[string] , literal[string] ), literal[string] ]+ identifier[INCLUDE_FLAGS] ,
identifier[working_directory] = identifier[working_directory] ) | def c_checker(code, working_directory):
"""Return checker."""
return gcc_checker(code, '.c', [os.getenv('CC', 'gcc'), '-std=c99'] + INCLUDE_FLAGS, working_directory=working_directory) |
def run(self):
"""Execute the task on all the input and send the needed number of EXIT at the end"""
input = self._consume()
put_item = self._que_out.put
try:
if input is None: # producer
res = self._callable(*self._args, **self._kwargs)
else:
res = self._callable(input, *self._args, **self._kwargs)
if res != None:
for item in res:
put_item(item)
except Exception as e:
# we catch an error, we send on the error que, we consume the input and we exit
# consuming the input queue avoid to keep running processes before exiting with
# errors
self._que_err.put((self.name, e))
if input is not None:
for i in input:
pass
raise
finally:
for i in range(self._num_followers):
put_item(EXIT)
self._que_err.put(EXIT) | def function[run, parameter[self]]:
constant[Execute the task on all the input and send the needed number of EXIT at the end]
variable[input] assign[=] call[name[self]._consume, parameter[]]
variable[put_item] assign[=] name[self]._que_out.put
<ast.Try object at 0x7da1b20d5810> | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[input] = identifier[self] . identifier[_consume] ()
identifier[put_item] = identifier[self] . identifier[_que_out] . identifier[put]
keyword[try] :
keyword[if] identifier[input] keyword[is] keyword[None] :
identifier[res] = identifier[self] . identifier[_callable] (* identifier[self] . identifier[_args] ,** identifier[self] . identifier[_kwargs] )
keyword[else] :
identifier[res] = identifier[self] . identifier[_callable] ( identifier[input] ,* identifier[self] . identifier[_args] ,** identifier[self] . identifier[_kwargs] )
keyword[if] identifier[res] != keyword[None] :
keyword[for] identifier[item] keyword[in] identifier[res] :
identifier[put_item] ( identifier[item] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[_que_err] . identifier[put] (( identifier[self] . identifier[name] , identifier[e] ))
keyword[if] identifier[input] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[i] keyword[in] identifier[input] :
keyword[pass]
keyword[raise]
keyword[finally] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[_num_followers] ):
identifier[put_item] ( identifier[EXIT] )
identifier[self] . identifier[_que_err] . identifier[put] ( identifier[EXIT] ) | def run(self):
"""Execute the task on all the input and send the needed number of EXIT at the end"""
input = self._consume()
put_item = self._que_out.put
try:
if input is None: # producer
res = self._callable(*self._args, **self._kwargs) # depends on [control=['if'], data=[]]
else:
res = self._callable(input, *self._args, **self._kwargs)
if res != None:
for item in res:
put_item(item) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=['res']] # depends on [control=['try'], data=[]]
except Exception as e:
# we catch an error, we send on the error que, we consume the input and we exit
# consuming the input queue avoid to keep running processes before exiting with
# errors
self._que_err.put((self.name, e))
if input is not None:
for i in input:
pass # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['input']]
raise # depends on [control=['except'], data=['e']]
finally:
for i in range(self._num_followers):
put_item(EXIT) # depends on [control=['for'], data=[]]
self._que_err.put(EXIT) |
def cpuinfo():
''' Return the information in /proc/cpuinfo
as a dictionary in the following format:
cpu_info['proc0']={...}
cpu_info['proc1']={...}
'''
cpuinfo=OrderedDict()
procinfo=OrderedDict()
nprocs = 0
with open('/proc/cpuinfo') as f:
for line in f:
if not line.strip():
# end of one processor
cpuinfo['proc%s' % nprocs] = procinfo
nprocs=nprocs+1
# Reset
procinfo=OrderedDict()
else:
if len(line.split(':')) == 2:
procinfo[line.split(':')[0].strip()] = line.split(':')[1].strip()
else:
procinfo[line.split(':')[0].strip()] = ''
return cpuinfo | def function[cpuinfo, parameter[]]:
constant[ Return the information in /proc/cpuinfo
as a dictionary in the following format:
cpu_info['proc0']={...}
cpu_info['proc1']={...}
]
variable[cpuinfo] assign[=] call[name[OrderedDict], parameter[]]
variable[procinfo] assign[=] call[name[OrderedDict], parameter[]]
variable[nprocs] assign[=] constant[0]
with call[name[open], parameter[constant[/proc/cpuinfo]]] begin[:]
for taget[name[line]] in starred[name[f]] begin[:]
if <ast.UnaryOp object at 0x7da18bcc9a20> begin[:]
call[name[cpuinfo]][binary_operation[constant[proc%s] <ast.Mod object at 0x7da2590d6920> name[nprocs]]] assign[=] name[procinfo]
variable[nprocs] assign[=] binary_operation[name[nprocs] + constant[1]]
variable[procinfo] assign[=] call[name[OrderedDict], parameter[]]
return[name[cpuinfo]] | keyword[def] identifier[cpuinfo] ():
literal[string]
identifier[cpuinfo] = identifier[OrderedDict] ()
identifier[procinfo] = identifier[OrderedDict] ()
identifier[nprocs] = literal[int]
keyword[with] identifier[open] ( literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
keyword[if] keyword[not] identifier[line] . identifier[strip] ():
identifier[cpuinfo] [ literal[string] % identifier[nprocs] ]= identifier[procinfo]
identifier[nprocs] = identifier[nprocs] + literal[int]
identifier[procinfo] = identifier[OrderedDict] ()
keyword[else] :
keyword[if] identifier[len] ( identifier[line] . identifier[split] ( literal[string] ))== literal[int] :
identifier[procinfo] [ identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()]= identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
keyword[else] :
identifier[procinfo] [ identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()]= literal[string]
keyword[return] identifier[cpuinfo] | def cpuinfo():
""" Return the information in /proc/cpuinfo
as a dictionary in the following format:
cpu_info['proc0']={...}
cpu_info['proc1']={...}
"""
cpuinfo = OrderedDict()
procinfo = OrderedDict()
nprocs = 0
with open('/proc/cpuinfo') as f:
for line in f:
if not line.strip():
# end of one processor
cpuinfo['proc%s' % nprocs] = procinfo
nprocs = nprocs + 1
# Reset
procinfo = OrderedDict() # depends on [control=['if'], data=[]]
elif len(line.split(':')) == 2:
procinfo[line.split(':')[0].strip()] = line.split(':')[1].strip() # depends on [control=['if'], data=[]]
else:
procinfo[line.split(':')[0].strip()] = '' # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']]
return cpuinfo |
def print_model(self,sep='\n'):
"""prints model contained in best inds, if ml has a coefficient property.
otherwise, prints the features generated by FEW."""
model = ''
# print('ml type:',self.ml_type)
# print('ml:',self._best_estimator)
if self._best_inds:
if self.ml_type == 'GridSearchCV':
ml = self._best_estimator.named_steps['ml'].best_estimator_
else:
ml = self._best_estimator.named_steps['ml']
if self.ml_type != 'SVC' and self.ml_type != 'SVR':
# this is need because svm has a bug that throws valueerror on
# attribute check
if hasattr(ml,'coef_'):
if len(ml.coef_.shape)==1:
s = np.argsort(np.abs(ml.coef_))[::-1]
scoef = ml.coef_[s]
bi = [self._best_inds[k] for k in s]
model = (' +' + sep).join(
[str(round(c,3))+'*'+self.stack_2_eqn(f)
for i,(f,c) in enumerate(zip(bi,scoef))
if round(scoef[i],3) != 0])
else:
# more than one decision function is fit. print all.
for j,coef in enumerate(ml.coef_):
s = np.argsort(np.abs(coef))[::-1]
scoef = coef[s]
bi =[self._best_inds[k] for k in s]
model += sep + 'class'+str(j)+' :'+' + '.join(
[str(round(c,3))+'*'+self.stack_2_eqn(f)
for i,(f,c) in enumerate(zip(bi,coef))
if coef[i] != 0])
elif hasattr(ml,'feature_importances_'):
s = np.argsort(ml.feature_importances_)[::-1]
sfi = ml.feature_importances_[s]
bi = [self._best_inds[k] for k in s]
# model = 'importance:feature'+sep
model += sep.join(
[str(round(c,3))+':'+self.stack_2_eqn(f)
for i,(f,c) in enumerate(zip(bi,sfi))
if round(sfi[i],3) != 0])
else:
return sep.join(self.stacks_2_eqns(self._best_inds))
else:
return sep.join(self.stacks_2_eqns(self._best_inds))
else:
return 'original features'
return model | def function[print_model, parameter[self, sep]]:
constant[prints model contained in best inds, if ml has a coefficient property.
otherwise, prints the features generated by FEW.]
variable[model] assign[=] constant[]
if name[self]._best_inds begin[:]
if compare[name[self].ml_type equal[==] constant[GridSearchCV]] begin[:]
variable[ml] assign[=] call[name[self]._best_estimator.named_steps][constant[ml]].best_estimator_
if <ast.BoolOp object at 0x7da1b1971f30> begin[:]
if call[name[hasattr], parameter[name[ml], constant[coef_]]] begin[:]
if compare[call[name[len], parameter[name[ml].coef_.shape]] equal[==] constant[1]] begin[:]
variable[s] assign[=] call[call[name[np].argsort, parameter[call[name[np].abs, parameter[name[ml].coef_]]]]][<ast.Slice object at 0x7da1b19722c0>]
variable[scoef] assign[=] call[name[ml].coef_][name[s]]
variable[bi] assign[=] <ast.ListComp object at 0x7da1b1970c40>
variable[model] assign[=] call[binary_operation[constant[ +] + name[sep]].join, parameter[<ast.ListComp object at 0x7da1b1971fc0>]]
return[name[model]] | keyword[def] identifier[print_model] ( identifier[self] , identifier[sep] = literal[string] ):
literal[string]
identifier[model] = literal[string]
keyword[if] identifier[self] . identifier[_best_inds] :
keyword[if] identifier[self] . identifier[ml_type] == literal[string] :
identifier[ml] = identifier[self] . identifier[_best_estimator] . identifier[named_steps] [ literal[string] ]. identifier[best_estimator_]
keyword[else] :
identifier[ml] = identifier[self] . identifier[_best_estimator] . identifier[named_steps] [ literal[string] ]
keyword[if] identifier[self] . identifier[ml_type] != literal[string] keyword[and] identifier[self] . identifier[ml_type] != literal[string] :
keyword[if] identifier[hasattr] ( identifier[ml] , literal[string] ):
keyword[if] identifier[len] ( identifier[ml] . identifier[coef_] . identifier[shape] )== literal[int] :
identifier[s] = identifier[np] . identifier[argsort] ( identifier[np] . identifier[abs] ( identifier[ml] . identifier[coef_] ))[::- literal[int] ]
identifier[scoef] = identifier[ml] . identifier[coef_] [ identifier[s] ]
identifier[bi] =[ identifier[self] . identifier[_best_inds] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[s] ]
identifier[model] =( literal[string] + identifier[sep] ). identifier[join] (
[ identifier[str] ( identifier[round] ( identifier[c] , literal[int] ))+ literal[string] + identifier[self] . identifier[stack_2_eqn] ( identifier[f] )
keyword[for] identifier[i] ,( identifier[f] , identifier[c] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[bi] , identifier[scoef] ))
keyword[if] identifier[round] ( identifier[scoef] [ identifier[i] ], literal[int] )!= literal[int] ])
keyword[else] :
keyword[for] identifier[j] , identifier[coef] keyword[in] identifier[enumerate] ( identifier[ml] . identifier[coef_] ):
identifier[s] = identifier[np] . identifier[argsort] ( identifier[np] . identifier[abs] ( identifier[coef] ))[::- literal[int] ]
identifier[scoef] = identifier[coef] [ identifier[s] ]
identifier[bi] =[ identifier[self] . identifier[_best_inds] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[s] ]
identifier[model] += identifier[sep] + literal[string] + identifier[str] ( identifier[j] )+ literal[string] + literal[string] . identifier[join] (
[ identifier[str] ( identifier[round] ( identifier[c] , literal[int] ))+ literal[string] + identifier[self] . identifier[stack_2_eqn] ( identifier[f] )
keyword[for] identifier[i] ,( identifier[f] , identifier[c] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[bi] , identifier[coef] ))
keyword[if] identifier[coef] [ identifier[i] ]!= literal[int] ])
keyword[elif] identifier[hasattr] ( identifier[ml] , literal[string] ):
identifier[s] = identifier[np] . identifier[argsort] ( identifier[ml] . identifier[feature_importances_] )[::- literal[int] ]
identifier[sfi] = identifier[ml] . identifier[feature_importances_] [ identifier[s] ]
identifier[bi] =[ identifier[self] . identifier[_best_inds] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[s] ]
identifier[model] += identifier[sep] . identifier[join] (
[ identifier[str] ( identifier[round] ( identifier[c] , literal[int] ))+ literal[string] + identifier[self] . identifier[stack_2_eqn] ( identifier[f] )
keyword[for] identifier[i] ,( identifier[f] , identifier[c] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[bi] , identifier[sfi] ))
keyword[if] identifier[round] ( identifier[sfi] [ identifier[i] ], literal[int] )!= literal[int] ])
keyword[else] :
keyword[return] identifier[sep] . identifier[join] ( identifier[self] . identifier[stacks_2_eqns] ( identifier[self] . identifier[_best_inds] ))
keyword[else] :
keyword[return] identifier[sep] . identifier[join] ( identifier[self] . identifier[stacks_2_eqns] ( identifier[self] . identifier[_best_inds] ))
keyword[else] :
keyword[return] literal[string]
keyword[return] identifier[model] | def print_model(self, sep='\n'):
"""prints model contained in best inds, if ml has a coefficient property.
otherwise, prints the features generated by FEW."""
model = ''
# print('ml type:',self.ml_type)
# print('ml:',self._best_estimator)
if self._best_inds:
if self.ml_type == 'GridSearchCV':
ml = self._best_estimator.named_steps['ml'].best_estimator_ # depends on [control=['if'], data=[]]
else:
ml = self._best_estimator.named_steps['ml']
if self.ml_type != 'SVC' and self.ml_type != 'SVR':
# this is need because svm has a bug that throws valueerror on
# attribute check
if hasattr(ml, 'coef_'):
if len(ml.coef_.shape) == 1:
s = np.argsort(np.abs(ml.coef_))[::-1]
scoef = ml.coef_[s]
bi = [self._best_inds[k] for k in s]
model = (' +' + sep).join([str(round(c, 3)) + '*' + self.stack_2_eqn(f) for (i, (f, c)) in enumerate(zip(bi, scoef)) if round(scoef[i], 3) != 0]) # depends on [control=['if'], data=[]]
else:
# more than one decision function is fit. print all.
for (j, coef) in enumerate(ml.coef_):
s = np.argsort(np.abs(coef))[::-1]
scoef = coef[s]
bi = [self._best_inds[k] for k in s]
model += sep + 'class' + str(j) + ' :' + ' + '.join([str(round(c, 3)) + '*' + self.stack_2_eqn(f) for (i, (f, c)) in enumerate(zip(bi, coef)) if coef[i] != 0]) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif hasattr(ml, 'feature_importances_'):
s = np.argsort(ml.feature_importances_)[::-1]
sfi = ml.feature_importances_[s]
bi = [self._best_inds[k] for k in s]
# model = 'importance:feature'+sep
model += sep.join([str(round(c, 3)) + ':' + self.stack_2_eqn(f) for (i, (f, c)) in enumerate(zip(bi, sfi)) if round(sfi[i], 3) != 0]) # depends on [control=['if'], data=[]]
else:
return sep.join(self.stacks_2_eqns(self._best_inds)) # depends on [control=['if'], data=[]]
else:
return sep.join(self.stacks_2_eqns(self._best_inds)) # depends on [control=['if'], data=[]]
else:
return 'original features'
return model |
def _get_audio_filter_cmd(self):
"""
Return filter_complex command and output labels needed
"""
all_filters = []
output_labels = []
for audio_stream in self.streams['audio'].values():
if self.ffmpeg_normalize.normalization_type == 'ebu':
stream_filter = audio_stream.get_second_pass_opts_ebu()
else:
stream_filter = audio_stream.get_second_pass_opts_peakrms()
input_label = '[0:{}]'.format(audio_stream.stream_id)
output_label = '[norm{}]'.format(audio_stream.stream_id)
output_labels.append(output_label)
all_filters.append(input_label + stream_filter + output_label)
filter_complex_cmd = ';'.join(all_filters)
return filter_complex_cmd, output_labels | def function[_get_audio_filter_cmd, parameter[self]]:
constant[
Return filter_complex command and output labels needed
]
variable[all_filters] assign[=] list[[]]
variable[output_labels] assign[=] list[[]]
for taget[name[audio_stream]] in starred[call[call[name[self].streams][constant[audio]].values, parameter[]]] begin[:]
if compare[name[self].ffmpeg_normalize.normalization_type equal[==] constant[ebu]] begin[:]
variable[stream_filter] assign[=] call[name[audio_stream].get_second_pass_opts_ebu, parameter[]]
variable[input_label] assign[=] call[constant[[0:{}]].format, parameter[name[audio_stream].stream_id]]
variable[output_label] assign[=] call[constant[[norm{}]].format, parameter[name[audio_stream].stream_id]]
call[name[output_labels].append, parameter[name[output_label]]]
call[name[all_filters].append, parameter[binary_operation[binary_operation[name[input_label] + name[stream_filter]] + name[output_label]]]]
variable[filter_complex_cmd] assign[=] call[constant[;].join, parameter[name[all_filters]]]
return[tuple[[<ast.Name object at 0x7da2044c0dc0>, <ast.Name object at 0x7da2044c3cd0>]]] | keyword[def] identifier[_get_audio_filter_cmd] ( identifier[self] ):
literal[string]
identifier[all_filters] =[]
identifier[output_labels] =[]
keyword[for] identifier[audio_stream] keyword[in] identifier[self] . identifier[streams] [ literal[string] ]. identifier[values] ():
keyword[if] identifier[self] . identifier[ffmpeg_normalize] . identifier[normalization_type] == literal[string] :
identifier[stream_filter] = identifier[audio_stream] . identifier[get_second_pass_opts_ebu] ()
keyword[else] :
identifier[stream_filter] = identifier[audio_stream] . identifier[get_second_pass_opts_peakrms] ()
identifier[input_label] = literal[string] . identifier[format] ( identifier[audio_stream] . identifier[stream_id] )
identifier[output_label] = literal[string] . identifier[format] ( identifier[audio_stream] . identifier[stream_id] )
identifier[output_labels] . identifier[append] ( identifier[output_label] )
identifier[all_filters] . identifier[append] ( identifier[input_label] + identifier[stream_filter] + identifier[output_label] )
identifier[filter_complex_cmd] = literal[string] . identifier[join] ( identifier[all_filters] )
keyword[return] identifier[filter_complex_cmd] , identifier[output_labels] | def _get_audio_filter_cmd(self):
"""
Return filter_complex command and output labels needed
"""
all_filters = []
output_labels = []
for audio_stream in self.streams['audio'].values():
if self.ffmpeg_normalize.normalization_type == 'ebu':
stream_filter = audio_stream.get_second_pass_opts_ebu() # depends on [control=['if'], data=[]]
else:
stream_filter = audio_stream.get_second_pass_opts_peakrms()
input_label = '[0:{}]'.format(audio_stream.stream_id)
output_label = '[norm{}]'.format(audio_stream.stream_id)
output_labels.append(output_label)
all_filters.append(input_label + stream_filter + output_label) # depends on [control=['for'], data=['audio_stream']]
filter_complex_cmd = ';'.join(all_filters)
return (filter_complex_cmd, output_labels) |
def create_menu(self, name, parent=None, **kwargs):
""" Creates a maya menu or menu item
:param name: Used to access a menu via its parent. Unless the nolabel flag is set to True, the name will also become the label of the menu.
:type name: str
:param parent: Optional - The parent menu. If None, this will create a toplevel menu. If parent menu is a Menu instance, this will create a menu item. Default is None.
:type parent: Menu|None
:param nolabel: Optional - If nolabel=True, the label flag for the maya command will not be overwritten by name
:type nolabel: bool
:param kwargs: all keyword arguments used for the cmds.menu/cmds.menuitem command
:type kwargs: named arguments
:returns: None
:rtype: None
:raises: errors.MenuExistsError
"""
m = Menu(name, parent, **kwargs)
if parent is None:
self.menus[name] = m
return m | def function[create_menu, parameter[self, name, parent]]:
constant[ Creates a maya menu or menu item
:param name: Used to access a menu via its parent. Unless the nolabel flag is set to True, the name will also become the label of the menu.
:type name: str
:param parent: Optional - The parent menu. If None, this will create a toplevel menu. If parent menu is a Menu instance, this will create a menu item. Default is None.
:type parent: Menu|None
:param nolabel: Optional - If nolabel=True, the label flag for the maya command will not be overwritten by name
:type nolabel: bool
:param kwargs: all keyword arguments used for the cmds.menu/cmds.menuitem command
:type kwargs: named arguments
:returns: None
:rtype: None
:raises: errors.MenuExistsError
]
variable[m] assign[=] call[name[Menu], parameter[name[name], name[parent]]]
if compare[name[parent] is constant[None]] begin[:]
call[name[self].menus][name[name]] assign[=] name[m]
return[name[m]] | keyword[def] identifier[create_menu] ( identifier[self] , identifier[name] , identifier[parent] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[m] = identifier[Menu] ( identifier[name] , identifier[parent] ,** identifier[kwargs] )
keyword[if] identifier[parent] keyword[is] keyword[None] :
identifier[self] . identifier[menus] [ identifier[name] ]= identifier[m]
keyword[return] identifier[m] | def create_menu(self, name, parent=None, **kwargs):
""" Creates a maya menu or menu item
:param name: Used to access a menu via its parent. Unless the nolabel flag is set to True, the name will also become the label of the menu.
:type name: str
:param parent: Optional - The parent menu. If None, this will create a toplevel menu. If parent menu is a Menu instance, this will create a menu item. Default is None.
:type parent: Menu|None
:param nolabel: Optional - If nolabel=True, the label flag for the maya command will not be overwritten by name
:type nolabel: bool
:param kwargs: all keyword arguments used for the cmds.menu/cmds.menuitem command
:type kwargs: named arguments
:returns: None
:rtype: None
:raises: errors.MenuExistsError
"""
m = Menu(name, parent, **kwargs)
if parent is None:
self.menus[name] = m # depends on [control=['if'], data=[]]
return m |
def _copy_sources(self, target, results_dir):
"""Copy sources from a target to a results directory.
:param NodePackage target: A subclass of NodePackage
:param string results_dir: The results directory
"""
buildroot = get_buildroot()
source_relative_to = target.address.spec_path
for source in target.sources_relative_to_buildroot():
dest = os.path.join(results_dir, os.path.relpath(source, source_relative_to))
safe_mkdir(os.path.dirname(dest))
shutil.copyfile(os.path.join(buildroot, source), dest) | def function[_copy_sources, parameter[self, target, results_dir]]:
constant[Copy sources from a target to a results directory.
:param NodePackage target: A subclass of NodePackage
:param string results_dir: The results directory
]
variable[buildroot] assign[=] call[name[get_buildroot], parameter[]]
variable[source_relative_to] assign[=] name[target].address.spec_path
for taget[name[source]] in starred[call[name[target].sources_relative_to_buildroot, parameter[]]] begin[:]
variable[dest] assign[=] call[name[os].path.join, parameter[name[results_dir], call[name[os].path.relpath, parameter[name[source], name[source_relative_to]]]]]
call[name[safe_mkdir], parameter[call[name[os].path.dirname, parameter[name[dest]]]]]
call[name[shutil].copyfile, parameter[call[name[os].path.join, parameter[name[buildroot], name[source]]], name[dest]]] | keyword[def] identifier[_copy_sources] ( identifier[self] , identifier[target] , identifier[results_dir] ):
literal[string]
identifier[buildroot] = identifier[get_buildroot] ()
identifier[source_relative_to] = identifier[target] . identifier[address] . identifier[spec_path]
keyword[for] identifier[source] keyword[in] identifier[target] . identifier[sources_relative_to_buildroot] ():
identifier[dest] = identifier[os] . identifier[path] . identifier[join] ( identifier[results_dir] , identifier[os] . identifier[path] . identifier[relpath] ( identifier[source] , identifier[source_relative_to] ))
identifier[safe_mkdir] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[dest] ))
identifier[shutil] . identifier[copyfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[buildroot] , identifier[source] ), identifier[dest] ) | def _copy_sources(self, target, results_dir):
"""Copy sources from a target to a results directory.
:param NodePackage target: A subclass of NodePackage
:param string results_dir: The results directory
"""
buildroot = get_buildroot()
source_relative_to = target.address.spec_path
for source in target.sources_relative_to_buildroot():
dest = os.path.join(results_dir, os.path.relpath(source, source_relative_to))
safe_mkdir(os.path.dirname(dest))
shutil.copyfile(os.path.join(buildroot, source), dest) # depends on [control=['for'], data=['source']] |
def from_timepoints(cls, timepoints, refdate, unit='hours'):
"""Return a |Timegrid| object representing the given starting
`timepoints` in relation to the given `refdate`.
The following examples identical with the ones of
|Timegrid.to_timepoints| but reversed.
At least two given time points must be increasing and
equidistant. By default, they are assumed in hours since
the given reference date:
>>> from hydpy import Timegrid
>>> Timegrid.from_timepoints(
... [0.0, 6.0, 12.0, 18.0], '01.01.2000')
Timegrid('01.01.2000 00:00:00',
'02.01.2000 00:00:00',
'6h')
>>> Timegrid.from_timepoints(
... [24.0, 30.0, 36.0, 42.0], '1999-12-31')
Timegrid('2000-01-01 00:00:00',
'2000-01-02 00:00:00',
'6h')
Other time units (`days` or `min`) must be passed explicitely
(only the first character counts):
>>> Timegrid.from_timepoints(
... [0.0, 0.25, 0.5, 0.75], '01.01.2000', unit='d')
Timegrid('01.01.2000 00:00:00',
'02.01.2000 00:00:00',
'6h')
>>> Timegrid.from_timepoints(
... [1.0, 1.25, 1.5, 1.75], '1999-12-31', unit='day')
Timegrid('2000-01-01 00:00:00',
'2000-01-02 00:00:00',
'6h')
"""
refdate = Date(refdate)
unit = Period.from_cfunits(unit)
delta = timepoints[1]-timepoints[0]
firstdate = refdate+timepoints[0]*unit
lastdate = refdate+(timepoints[-1]+delta)*unit
stepsize = (lastdate-firstdate)/len(timepoints)
return cls(firstdate, lastdate, stepsize) | def function[from_timepoints, parameter[cls, timepoints, refdate, unit]]:
constant[Return a |Timegrid| object representing the given starting
`timepoints` in relation to the given `refdate`.
The following examples identical with the ones of
|Timegrid.to_timepoints| but reversed.
At least two given time points must be increasing and
equidistant. By default, they are assumed in hours since
the given reference date:
>>> from hydpy import Timegrid
>>> Timegrid.from_timepoints(
... [0.0, 6.0, 12.0, 18.0], '01.01.2000')
Timegrid('01.01.2000 00:00:00',
'02.01.2000 00:00:00',
'6h')
>>> Timegrid.from_timepoints(
... [24.0, 30.0, 36.0, 42.0], '1999-12-31')
Timegrid('2000-01-01 00:00:00',
'2000-01-02 00:00:00',
'6h')
Other time units (`days` or `min`) must be passed explicitely
(only the first character counts):
>>> Timegrid.from_timepoints(
... [0.0, 0.25, 0.5, 0.75], '01.01.2000', unit='d')
Timegrid('01.01.2000 00:00:00',
'02.01.2000 00:00:00',
'6h')
>>> Timegrid.from_timepoints(
... [1.0, 1.25, 1.5, 1.75], '1999-12-31', unit='day')
Timegrid('2000-01-01 00:00:00',
'2000-01-02 00:00:00',
'6h')
]
variable[refdate] assign[=] call[name[Date], parameter[name[refdate]]]
variable[unit] assign[=] call[name[Period].from_cfunits, parameter[name[unit]]]
variable[delta] assign[=] binary_operation[call[name[timepoints]][constant[1]] - call[name[timepoints]][constant[0]]]
variable[firstdate] assign[=] binary_operation[name[refdate] + binary_operation[call[name[timepoints]][constant[0]] * name[unit]]]
variable[lastdate] assign[=] binary_operation[name[refdate] + binary_operation[binary_operation[call[name[timepoints]][<ast.UnaryOp object at 0x7da18bccaf80>] + name[delta]] * name[unit]]]
variable[stepsize] assign[=] binary_operation[binary_operation[name[lastdate] - name[firstdate]] / call[name[len], parameter[name[timepoints]]]]
return[call[name[cls], parameter[name[firstdate], name[lastdate], name[stepsize]]]] | keyword[def] identifier[from_timepoints] ( identifier[cls] , identifier[timepoints] , identifier[refdate] , identifier[unit] = literal[string] ):
literal[string]
identifier[refdate] = identifier[Date] ( identifier[refdate] )
identifier[unit] = identifier[Period] . identifier[from_cfunits] ( identifier[unit] )
identifier[delta] = identifier[timepoints] [ literal[int] ]- identifier[timepoints] [ literal[int] ]
identifier[firstdate] = identifier[refdate] + identifier[timepoints] [ literal[int] ]* identifier[unit]
identifier[lastdate] = identifier[refdate] +( identifier[timepoints] [- literal[int] ]+ identifier[delta] )* identifier[unit]
identifier[stepsize] =( identifier[lastdate] - identifier[firstdate] )/ identifier[len] ( identifier[timepoints] )
keyword[return] identifier[cls] ( identifier[firstdate] , identifier[lastdate] , identifier[stepsize] ) | def from_timepoints(cls, timepoints, refdate, unit='hours'):
"""Return a |Timegrid| object representing the given starting
`timepoints` in relation to the given `refdate`.
The following examples identical with the ones of
|Timegrid.to_timepoints| but reversed.
At least two given time points must be increasing and
equidistant. By default, they are assumed in hours since
the given reference date:
>>> from hydpy import Timegrid
>>> Timegrid.from_timepoints(
... [0.0, 6.0, 12.0, 18.0], '01.01.2000')
Timegrid('01.01.2000 00:00:00',
'02.01.2000 00:00:00',
'6h')
>>> Timegrid.from_timepoints(
... [24.0, 30.0, 36.0, 42.0], '1999-12-31')
Timegrid('2000-01-01 00:00:00',
'2000-01-02 00:00:00',
'6h')
Other time units (`days` or `min`) must be passed explicitely
(only the first character counts):
>>> Timegrid.from_timepoints(
... [0.0, 0.25, 0.5, 0.75], '01.01.2000', unit='d')
Timegrid('01.01.2000 00:00:00',
'02.01.2000 00:00:00',
'6h')
>>> Timegrid.from_timepoints(
... [1.0, 1.25, 1.5, 1.75], '1999-12-31', unit='day')
Timegrid('2000-01-01 00:00:00',
'2000-01-02 00:00:00',
'6h')
"""
refdate = Date(refdate)
unit = Period.from_cfunits(unit)
delta = timepoints[1] - timepoints[0]
firstdate = refdate + timepoints[0] * unit
lastdate = refdate + (timepoints[-1] + delta) * unit
stepsize = (lastdate - firstdate) / len(timepoints)
return cls(firstdate, lastdate, stepsize) |
def genSubDirCMakeListsFile(self, working_path, addToCompilerIncludeDirectories, subdirs, files):
"""
Generate CMakeLists.txt in subdirectories.
:param working_path: current working directory
:param subdirs: a list of subdirectories of current working directory.
:param files: a list of files in current working directory.
:return: the full path name of generated CMakeLists.txt.
"""
fnameOut = os.path.join(working_path, 'CMakeLists.txt')
template = self.envJinja.get_template(self.SUBDIR_CMAKELISTS_JINJA2_TEMPLATE)
fcontent = template.render({'addToCompilerIncludeDirectories':addToCompilerIncludeDirectories,
'subdirs': subdirs,
'files': files})
with open(fnameOut, 'w') as f:
f.write(fcontent)
return fnameOut | def function[genSubDirCMakeListsFile, parameter[self, working_path, addToCompilerIncludeDirectories, subdirs, files]]:
constant[
Generate CMakeLists.txt in subdirectories.
:param working_path: current working directory
:param subdirs: a list of subdirectories of current working directory.
:param files: a list of files in current working directory.
:return: the full path name of generated CMakeLists.txt.
]
variable[fnameOut] assign[=] call[name[os].path.join, parameter[name[working_path], constant[CMakeLists.txt]]]
variable[template] assign[=] call[name[self].envJinja.get_template, parameter[name[self].SUBDIR_CMAKELISTS_JINJA2_TEMPLATE]]
variable[fcontent] assign[=] call[name[template].render, parameter[dictionary[[<ast.Constant object at 0x7da1b0aeda20>, <ast.Constant object at 0x7da1b0aeffa0>, <ast.Constant object at 0x7da1b0aec0d0>], [<ast.Name object at 0x7da1b0aef910>, <ast.Name object at 0x7da1b0aefee0>, <ast.Name object at 0x7da1b0aee860>]]]]
with call[name[open], parameter[name[fnameOut], constant[w]]] begin[:]
call[name[f].write, parameter[name[fcontent]]]
return[name[fnameOut]] | keyword[def] identifier[genSubDirCMakeListsFile] ( identifier[self] , identifier[working_path] , identifier[addToCompilerIncludeDirectories] , identifier[subdirs] , identifier[files] ):
literal[string]
identifier[fnameOut] = identifier[os] . identifier[path] . identifier[join] ( identifier[working_path] , literal[string] )
identifier[template] = identifier[self] . identifier[envJinja] . identifier[get_template] ( identifier[self] . identifier[SUBDIR_CMAKELISTS_JINJA2_TEMPLATE] )
identifier[fcontent] = identifier[template] . identifier[render] ({ literal[string] : identifier[addToCompilerIncludeDirectories] ,
literal[string] : identifier[subdirs] ,
literal[string] : identifier[files] })
keyword[with] identifier[open] ( identifier[fnameOut] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[fcontent] )
keyword[return] identifier[fnameOut] | def genSubDirCMakeListsFile(self, working_path, addToCompilerIncludeDirectories, subdirs, files):
"""
Generate CMakeLists.txt in subdirectories.
:param working_path: current working directory
:param subdirs: a list of subdirectories of current working directory.
:param files: a list of files in current working directory.
:return: the full path name of generated CMakeLists.txt.
"""
fnameOut = os.path.join(working_path, 'CMakeLists.txt')
template = self.envJinja.get_template(self.SUBDIR_CMAKELISTS_JINJA2_TEMPLATE)
fcontent = template.render({'addToCompilerIncludeDirectories': addToCompilerIncludeDirectories, 'subdirs': subdirs, 'files': files})
with open(fnameOut, 'w') as f:
f.write(fcontent) # depends on [control=['with'], data=['f']]
return fnameOut |
def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(
pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data
).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8')
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket) | def function[put_job, parameter[self, data, pri, delay, ttr]]:
constant[Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
]
with call[name[self]._sock_ctx, parameter[]] begin[:]
variable[message] assign[=] call[call[constant[put {pri} {delay} {ttr} {datalen}
].format, parameter[]].encode, parameter[constant[utf-8]]]
if <ast.UnaryOp object at 0x7da1b2404910> begin[:]
variable[data] assign[=] call[name[data].encode, parameter[constant[utf-8]]]
<ast.AugAssign object at 0x7da2054a6fe0>
<ast.AugAssign object at 0x7da2054a4ac0>
call[name[self]._send_message, parameter[name[message], name[socket]]]
return[call[name[self]._receive_id, parameter[name[socket]]]] | keyword[def] identifier[put_job] ( identifier[self] , identifier[data] , identifier[pri] = literal[int] , identifier[delay] = literal[int] , identifier[ttr] = literal[int] ):
literal[string]
keyword[with] identifier[self] . identifier[_sock_ctx] () keyword[as] identifier[socket] :
identifier[message] = literal[string] . identifier[format] (
identifier[pri] = identifier[pri] , identifier[delay] = identifier[delay] , identifier[ttr] = identifier[ttr] , identifier[datalen] = identifier[len] ( identifier[data] ), identifier[data] = identifier[data]
). identifier[encode] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[bytes] ):
identifier[data] = identifier[data] . identifier[encode] ( literal[string] )
identifier[message] += identifier[data]
identifier[message] += literal[string]
identifier[self] . identifier[_send_message] ( identifier[message] , identifier[socket] )
keyword[return] identifier[self] . identifier[_receive_id] ( identifier[socket] ) | def put_job(self, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into whatever queue is currently USEd
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job_into()`
Put a job into a specific tube
:func:`using()`
Insert a job using an external guard
"""
with self._sock_ctx() as socket:
message = 'put {pri} {delay} {ttr} {datalen}\r\n'.format(pri=pri, delay=delay, ttr=ttr, datalen=len(data), data=data).encode('utf-8')
if not isinstance(data, bytes):
data = data.encode('utf-8') # depends on [control=['if'], data=[]]
message += data
message += b'\r\n'
self._send_message(message, socket)
return self._receive_id(socket) # depends on [control=['with'], data=['socket']] |
def get_spot(self, feature=None, **kwargs):
"""
Shortcut to :meth:`get_feature` but with kind='spot'
"""
kwargs.setdefault('kind', 'spot')
return self.get_feature(feature, **kwargs) | def function[get_spot, parameter[self, feature]]:
constant[
Shortcut to :meth:`get_feature` but with kind='spot'
]
call[name[kwargs].setdefault, parameter[constant[kind], constant[spot]]]
return[call[name[self].get_feature, parameter[name[feature]]]] | keyword[def] identifier[get_spot] ( identifier[self] , identifier[feature] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] . identifier[setdefault] ( literal[string] , literal[string] )
keyword[return] identifier[self] . identifier[get_feature] ( identifier[feature] ,** identifier[kwargs] ) | def get_spot(self, feature=None, **kwargs):
"""
Shortcut to :meth:`get_feature` but with kind='spot'
"""
kwargs.setdefault('kind', 'spot')
return self.get_feature(feature, **kwargs) |
def _goleft_indexcov(bam_file, data, out_dir):
"""Use goleft indexcov to estimate coverage distributions using BAM index.
Only used for whole genome runs as captures typically don't have enough data
to be useful for index-only summaries.
"""
if not dd.get_coverage_interval(data) == "genome":
return []
out_dir = utils.safe_makedir(os.path.join(out_dir, "indexcov"))
out_files = [os.path.join(out_dir, "%s-indexcov.%s" % (dd.get_sample_name(data), ext))
for ext in ["roc", "ped", "bed.gz"]]
if not utils.file_uptodate(out_files[-1], bam_file):
with transaction.tx_tmpdir(data) as tmp_dir:
tmp_dir = utils.safe_makedir(os.path.join(tmp_dir, dd.get_sample_name(data)))
gender_chroms = [x.name for x in ref.file_contigs(dd.get_ref_file(data)) if chromhacks.is_sex(x.name)]
gender_args = "--sex %s" % (",".join(gender_chroms)) if gender_chroms else ""
cmd = "goleft indexcov --directory {tmp_dir} {gender_args} -- {bam_file}"
try:
do.run(cmd.format(**locals()), "QC: goleft indexcov")
except subprocess.CalledProcessError as msg:
if not ("indexcov: no usable" in str(msg) or
("indexcov: expected" in str(msg) and "sex chromosomes, found:" in str(msg))):
raise
for out_file in out_files:
orig_file = os.path.join(tmp_dir, os.path.basename(out_file))
if utils.file_exists(orig_file):
utils.copy_plus(orig_file, out_file)
# MultiQC needs non-gzipped/BED inputs so unpack the file
out_bed = out_files[-1].replace(".bed.gz", ".tsv")
if utils.file_exists(out_files[-1]) and not utils.file_exists(out_bed):
with transaction.file_transaction(data, out_bed) as tx_out_bed:
cmd = "gunzip -c %s > %s" % (out_files[-1], tx_out_bed)
do.run(cmd, "Unpack indexcov BED file")
out_files[-1] = out_bed
return [x for x in out_files if utils.file_exists(x)] | def function[_goleft_indexcov, parameter[bam_file, data, out_dir]]:
constant[Use goleft indexcov to estimate coverage distributions using BAM index.
Only used for whole genome runs as captures typically don't have enough data
to be useful for index-only summaries.
]
if <ast.UnaryOp object at 0x7da1b18d8430> begin[:]
return[list[[]]]
variable[out_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[name[out_dir], constant[indexcov]]]]]
variable[out_files] assign[=] <ast.ListComp object at 0x7da1b18d8250>
if <ast.UnaryOp object at 0x7da1b18d8fd0> begin[:]
with call[name[transaction].tx_tmpdir, parameter[name[data]]] begin[:]
variable[tmp_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[name[tmp_dir], call[name[dd].get_sample_name, parameter[name[data]]]]]]]
variable[gender_chroms] assign[=] <ast.ListComp object at 0x7da1b18d2ad0>
variable[gender_args] assign[=] <ast.IfExp object at 0x7da1b18d32b0>
variable[cmd] assign[=] constant[goleft indexcov --directory {tmp_dir} {gender_args} -- {bam_file}]
<ast.Try object at 0x7da1b18d2fb0>
for taget[name[out_file]] in starred[name[out_files]] begin[:]
variable[orig_file] assign[=] call[name[os].path.join, parameter[name[tmp_dir], call[name[os].path.basename, parameter[name[out_file]]]]]
if call[name[utils].file_exists, parameter[name[orig_file]]] begin[:]
call[name[utils].copy_plus, parameter[name[orig_file], name[out_file]]]
variable[out_bed] assign[=] call[call[name[out_files]][<ast.UnaryOp object at 0x7da1b18d1840>].replace, parameter[constant[.bed.gz], constant[.tsv]]]
if <ast.BoolOp object at 0x7da1b18d19f0> begin[:]
with call[name[transaction].file_transaction, parameter[name[data], name[out_bed]]] begin[:]
variable[cmd] assign[=] binary_operation[constant[gunzip -c %s > %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b1884580>, <ast.Name object at 0x7da1b1884f70>]]]
call[name[do].run, parameter[name[cmd], constant[Unpack indexcov BED file]]]
call[name[out_files]][<ast.UnaryOp object at 0x7da1b1884460>] assign[=] name[out_bed]
return[<ast.ListComp object at 0x7da1b18848b0>] | keyword[def] identifier[_goleft_indexcov] ( identifier[bam_file] , identifier[data] , identifier[out_dir] ):
literal[string]
keyword[if] keyword[not] identifier[dd] . identifier[get_coverage_interval] ( identifier[data] )== literal[string] :
keyword[return] []
identifier[out_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] ))
identifier[out_files] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] %( identifier[dd] . identifier[get_sample_name] ( identifier[data] ), identifier[ext] ))
keyword[for] identifier[ext] keyword[in] [ literal[string] , literal[string] , literal[string] ]]
keyword[if] keyword[not] identifier[utils] . identifier[file_uptodate] ( identifier[out_files] [- literal[int] ], identifier[bam_file] ):
keyword[with] identifier[transaction] . identifier[tx_tmpdir] ( identifier[data] ) keyword[as] identifier[tmp_dir] :
identifier[tmp_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[tmp_dir] , identifier[dd] . identifier[get_sample_name] ( identifier[data] )))
identifier[gender_chroms] =[ identifier[x] . identifier[name] keyword[for] identifier[x] keyword[in] identifier[ref] . identifier[file_contigs] ( identifier[dd] . identifier[get_ref_file] ( identifier[data] )) keyword[if] identifier[chromhacks] . identifier[is_sex] ( identifier[x] . identifier[name] )]
identifier[gender_args] = literal[string] %( literal[string] . identifier[join] ( identifier[gender_chroms] )) keyword[if] identifier[gender_chroms] keyword[else] literal[string]
identifier[cmd] = literal[string]
keyword[try] :
identifier[do] . identifier[run] ( identifier[cmd] . identifier[format] (** identifier[locals] ()), literal[string] )
keyword[except] identifier[subprocess] . identifier[CalledProcessError] keyword[as] identifier[msg] :
keyword[if] keyword[not] ( literal[string] keyword[in] identifier[str] ( identifier[msg] ) keyword[or]
( literal[string] keyword[in] identifier[str] ( identifier[msg] ) keyword[and] literal[string] keyword[in] identifier[str] ( identifier[msg] ))):
keyword[raise]
keyword[for] identifier[out_file] keyword[in] identifier[out_files] :
identifier[orig_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[tmp_dir] , identifier[os] . identifier[path] . identifier[basename] ( identifier[out_file] ))
keyword[if] identifier[utils] . identifier[file_exists] ( identifier[orig_file] ):
identifier[utils] . identifier[copy_plus] ( identifier[orig_file] , identifier[out_file] )
identifier[out_bed] = identifier[out_files] [- literal[int] ]. identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[utils] . identifier[file_exists] ( identifier[out_files] [- literal[int] ]) keyword[and] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_bed] ):
keyword[with] identifier[transaction] . identifier[file_transaction] ( identifier[data] , identifier[out_bed] ) keyword[as] identifier[tx_out_bed] :
identifier[cmd] = literal[string] %( identifier[out_files] [- literal[int] ], identifier[tx_out_bed] )
identifier[do] . identifier[run] ( identifier[cmd] , literal[string] )
identifier[out_files] [- literal[int] ]= identifier[out_bed]
keyword[return] [ identifier[x] keyword[for] identifier[x] keyword[in] identifier[out_files] keyword[if] identifier[utils] . identifier[file_exists] ( identifier[x] )] | def _goleft_indexcov(bam_file, data, out_dir):
"""Use goleft indexcov to estimate coverage distributions using BAM index.
Only used for whole genome runs as captures typically don't have enough data
to be useful for index-only summaries.
"""
if not dd.get_coverage_interval(data) == 'genome':
return [] # depends on [control=['if'], data=[]]
out_dir = utils.safe_makedir(os.path.join(out_dir, 'indexcov'))
out_files = [os.path.join(out_dir, '%s-indexcov.%s' % (dd.get_sample_name(data), ext)) for ext in ['roc', 'ped', 'bed.gz']]
if not utils.file_uptodate(out_files[-1], bam_file):
with transaction.tx_tmpdir(data) as tmp_dir:
tmp_dir = utils.safe_makedir(os.path.join(tmp_dir, dd.get_sample_name(data)))
gender_chroms = [x.name for x in ref.file_contigs(dd.get_ref_file(data)) if chromhacks.is_sex(x.name)]
gender_args = '--sex %s' % ','.join(gender_chroms) if gender_chroms else ''
cmd = 'goleft indexcov --directory {tmp_dir} {gender_args} -- {bam_file}'
try:
do.run(cmd.format(**locals()), 'QC: goleft indexcov') # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError as msg:
if not ('indexcov: no usable' in str(msg) or ('indexcov: expected' in str(msg) and 'sex chromosomes, found:' in str(msg))):
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['msg']]
for out_file in out_files:
orig_file = os.path.join(tmp_dir, os.path.basename(out_file))
if utils.file_exists(orig_file):
utils.copy_plus(orig_file, out_file) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['out_file']] # depends on [control=['with'], data=['tmp_dir']] # depends on [control=['if'], data=[]]
# MultiQC needs non-gzipped/BED inputs so unpack the file
out_bed = out_files[-1].replace('.bed.gz', '.tsv')
if utils.file_exists(out_files[-1]) and (not utils.file_exists(out_bed)):
with transaction.file_transaction(data, out_bed) as tx_out_bed:
cmd = 'gunzip -c %s > %s' % (out_files[-1], tx_out_bed)
do.run(cmd, 'Unpack indexcov BED file') # depends on [control=['with'], data=['tx_out_bed']] # depends on [control=['if'], data=[]]
out_files[-1] = out_bed
return [x for x in out_files if utils.file_exists(x)] |
def get_assessment_part(self):
"""Gets the parent assessment.
return: (osid.assessment.authoring.AssessmentPart) - the parent
assessment part
raise: IllegalState - ``has_parent_part()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
if not self.has_parent_part():
raise errors.IllegalState('no parent part')
lookup_session = self._get_assessment_part_lookup_session()
return lookup_session.get_assessment_part(self.get_assessment_part_id()) | def function[get_assessment_part, parameter[self]]:
constant[Gets the parent assessment.
return: (osid.assessment.authoring.AssessmentPart) - the parent
assessment part
raise: IllegalState - ``has_parent_part()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
]
if <ast.UnaryOp object at 0x7da18f58c190> begin[:]
<ast.Raise object at 0x7da18f58d090>
variable[lookup_session] assign[=] call[name[self]._get_assessment_part_lookup_session, parameter[]]
return[call[name[lookup_session].get_assessment_part, parameter[call[name[self].get_assessment_part_id, parameter[]]]]] | keyword[def] identifier[get_assessment_part] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[has_parent_part] ():
keyword[raise] identifier[errors] . identifier[IllegalState] ( literal[string] )
identifier[lookup_session] = identifier[self] . identifier[_get_assessment_part_lookup_session] ()
keyword[return] identifier[lookup_session] . identifier[get_assessment_part] ( identifier[self] . identifier[get_assessment_part_id] ()) | def get_assessment_part(self):
"""Gets the parent assessment.
return: (osid.assessment.authoring.AssessmentPart) - the parent
assessment part
raise: IllegalState - ``has_parent_part()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
if not self.has_parent_part():
raise errors.IllegalState('no parent part') # depends on [control=['if'], data=[]]
lookup_session = self._get_assessment_part_lookup_session()
return lookup_session.get_assessment_part(self.get_assessment_part_id()) |
def request(self, method, url, params=None, **kwargs):
"""Perform a request, or return a cached response if available."""
params_key = tuple(params.items()) if params else ()
if method.upper() == "GET":
if (url, params_key) in self.get_cache:
print("Returning cached response for:", method, url, params)
return self.get_cache[(url, params_key)]
result = super().request(method, url, params, **kwargs)
if method.upper() == "GET":
self.get_cache[(url, params_key)] = result
print("Adding entry to the cache:", method, url, params)
return result | def function[request, parameter[self, method, url, params]]:
constant[Perform a request, or return a cached response if available.]
variable[params_key] assign[=] <ast.IfExp object at 0x7da2041d9750>
if compare[call[name[method].upper, parameter[]] equal[==] constant[GET]] begin[:]
if compare[tuple[[<ast.Name object at 0x7da2041d9030>, <ast.Name object at 0x7da2041db820>]] in name[self].get_cache] begin[:]
call[name[print], parameter[constant[Returning cached response for:], name[method], name[url], name[params]]]
return[call[name[self].get_cache][tuple[[<ast.Name object at 0x7da2041da890>, <ast.Name object at 0x7da2041dbdc0>]]]]
variable[result] assign[=] call[call[name[super], parameter[]].request, parameter[name[method], name[url], name[params]]]
if compare[call[name[method].upper, parameter[]] equal[==] constant[GET]] begin[:]
call[name[self].get_cache][tuple[[<ast.Name object at 0x7da2041dbf40>, <ast.Name object at 0x7da2041d8580>]]] assign[=] name[result]
call[name[print], parameter[constant[Adding entry to the cache:], name[method], name[url], name[params]]]
return[name[result]] | keyword[def] identifier[request] ( identifier[self] , identifier[method] , identifier[url] , identifier[params] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[params_key] = identifier[tuple] ( identifier[params] . identifier[items] ()) keyword[if] identifier[params] keyword[else] ()
keyword[if] identifier[method] . identifier[upper] ()== literal[string] :
keyword[if] ( identifier[url] , identifier[params_key] ) keyword[in] identifier[self] . identifier[get_cache] :
identifier[print] ( literal[string] , identifier[method] , identifier[url] , identifier[params] )
keyword[return] identifier[self] . identifier[get_cache] [( identifier[url] , identifier[params_key] )]
identifier[result] = identifier[super] (). identifier[request] ( identifier[method] , identifier[url] , identifier[params] ,** identifier[kwargs] )
keyword[if] identifier[method] . identifier[upper] ()== literal[string] :
identifier[self] . identifier[get_cache] [( identifier[url] , identifier[params_key] )]= identifier[result]
identifier[print] ( literal[string] , identifier[method] , identifier[url] , identifier[params] )
keyword[return] identifier[result] | def request(self, method, url, params=None, **kwargs):
"""Perform a request, or return a cached response if available."""
params_key = tuple(params.items()) if params else ()
if method.upper() == 'GET':
if (url, params_key) in self.get_cache:
print('Returning cached response for:', method, url, params)
return self.get_cache[url, params_key] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
result = super().request(method, url, params, **kwargs)
if method.upper() == 'GET':
self.get_cache[url, params_key] = result
print('Adding entry to the cache:', method, url, params) # depends on [control=['if'], data=[]]
return result |
def draw_bars(out_value, features, feature_type, width_separators, width_bar):
"""Draw the bars and separators."""
rectangle_list = []
separator_list = []
pre_val = out_value
for index, features in zip(range(len(features)), features):
if feature_type == 'positive':
left_bound = float(features[0])
right_bound = pre_val
pre_val = left_bound
separator_indent = np.abs(width_separators)
separator_pos = left_bound
colors = ['#FF0D57', '#FFC3D5']
else:
left_bound = pre_val
right_bound = float(features[0])
pre_val = right_bound
separator_indent = - np.abs(width_separators)
separator_pos = right_bound
colors = ['#1E88E5', '#D1E6FA']
# Create rectangle
if index == 0:
if feature_type == 'positive':
points_rectangle = [[left_bound, 0],
[right_bound, 0],
[right_bound, width_bar],
[left_bound, width_bar],
[left_bound + separator_indent, (width_bar / 2)]
]
else:
points_rectangle = [[right_bound, 0],
[left_bound, 0],
[left_bound, width_bar],
[right_bound, width_bar],
[right_bound + separator_indent, (width_bar / 2)]
]
else:
points_rectangle = [[left_bound, 0],
[right_bound, 0],
[right_bound + separator_indent * 0.90, (width_bar / 2)],
[right_bound, width_bar],
[left_bound, width_bar],
[left_bound + separator_indent * 0.90, (width_bar / 2)]]
line = plt.Polygon(points_rectangle, closed=True, fill=True,
facecolor=colors[0], linewidth=0)
rectangle_list += [line]
# Create seperator
points_separator = [[separator_pos, 0],
[separator_pos + separator_indent, (width_bar / 2)],
[separator_pos, width_bar]]
line = plt.Polygon(points_separator, closed=None, fill=None,
edgecolor=colors[1], lw=3)
separator_list += [line]
return rectangle_list, separator_list | def function[draw_bars, parameter[out_value, features, feature_type, width_separators, width_bar]]:
constant[Draw the bars and separators.]
variable[rectangle_list] assign[=] list[[]]
variable[separator_list] assign[=] list[[]]
variable[pre_val] assign[=] name[out_value]
for taget[tuple[[<ast.Name object at 0x7da2044c2230>, <ast.Name object at 0x7da2044c02b0>]]] in starred[call[name[zip], parameter[call[name[range], parameter[call[name[len], parameter[name[features]]]]], name[features]]]] begin[:]
if compare[name[feature_type] equal[==] constant[positive]] begin[:]
variable[left_bound] assign[=] call[name[float], parameter[call[name[features]][constant[0]]]]
variable[right_bound] assign[=] name[pre_val]
variable[pre_val] assign[=] name[left_bound]
variable[separator_indent] assign[=] call[name[np].abs, parameter[name[width_separators]]]
variable[separator_pos] assign[=] name[left_bound]
variable[colors] assign[=] list[[<ast.Constant object at 0x7da20e9627a0>, <ast.Constant object at 0x7da20e961e10>]]
if compare[name[index] equal[==] constant[0]] begin[:]
if compare[name[feature_type] equal[==] constant[positive]] begin[:]
variable[points_rectangle] assign[=] list[[<ast.List object at 0x7da2041d8a90>, <ast.List object at 0x7da2041d80a0>, <ast.List object at 0x7da2041d9870>, <ast.List object at 0x7da2041db9a0>, <ast.List object at 0x7da2041da680>]]
variable[line] assign[=] call[name[plt].Polygon, parameter[name[points_rectangle]]]
<ast.AugAssign object at 0x7da2041dad10>
variable[points_separator] assign[=] list[[<ast.List object at 0x7da2041dbbb0>, <ast.List object at 0x7da2041db610>, <ast.List object at 0x7da2041d9f60>]]
variable[line] assign[=] call[name[plt].Polygon, parameter[name[points_separator]]]
<ast.AugAssign object at 0x7da2041db880>
return[tuple[[<ast.Name object at 0x7da2041d8a30>, <ast.Name object at 0x7da2041db0d0>]]] | keyword[def] identifier[draw_bars] ( identifier[out_value] , identifier[features] , identifier[feature_type] , identifier[width_separators] , identifier[width_bar] ):
literal[string]
identifier[rectangle_list] =[]
identifier[separator_list] =[]
identifier[pre_val] = identifier[out_value]
keyword[for] identifier[index] , identifier[features] keyword[in] identifier[zip] ( identifier[range] ( identifier[len] ( identifier[features] )), identifier[features] ):
keyword[if] identifier[feature_type] == literal[string] :
identifier[left_bound] = identifier[float] ( identifier[features] [ literal[int] ])
identifier[right_bound] = identifier[pre_val]
identifier[pre_val] = identifier[left_bound]
identifier[separator_indent] = identifier[np] . identifier[abs] ( identifier[width_separators] )
identifier[separator_pos] = identifier[left_bound]
identifier[colors] =[ literal[string] , literal[string] ]
keyword[else] :
identifier[left_bound] = identifier[pre_val]
identifier[right_bound] = identifier[float] ( identifier[features] [ literal[int] ])
identifier[pre_val] = identifier[right_bound]
identifier[separator_indent] =- identifier[np] . identifier[abs] ( identifier[width_separators] )
identifier[separator_pos] = identifier[right_bound]
identifier[colors] =[ literal[string] , literal[string] ]
keyword[if] identifier[index] == literal[int] :
keyword[if] identifier[feature_type] == literal[string] :
identifier[points_rectangle] =[[ identifier[left_bound] , literal[int] ],
[ identifier[right_bound] , literal[int] ],
[ identifier[right_bound] , identifier[width_bar] ],
[ identifier[left_bound] , identifier[width_bar] ],
[ identifier[left_bound] + identifier[separator_indent] ,( identifier[width_bar] / literal[int] )]
]
keyword[else] :
identifier[points_rectangle] =[[ identifier[right_bound] , literal[int] ],
[ identifier[left_bound] , literal[int] ],
[ identifier[left_bound] , identifier[width_bar] ],
[ identifier[right_bound] , identifier[width_bar] ],
[ identifier[right_bound] + identifier[separator_indent] ,( identifier[width_bar] / literal[int] )]
]
keyword[else] :
identifier[points_rectangle] =[[ identifier[left_bound] , literal[int] ],
[ identifier[right_bound] , literal[int] ],
[ identifier[right_bound] + identifier[separator_indent] * literal[int] ,( identifier[width_bar] / literal[int] )],
[ identifier[right_bound] , identifier[width_bar] ],
[ identifier[left_bound] , identifier[width_bar] ],
[ identifier[left_bound] + identifier[separator_indent] * literal[int] ,( identifier[width_bar] / literal[int] )]]
identifier[line] = identifier[plt] . identifier[Polygon] ( identifier[points_rectangle] , identifier[closed] = keyword[True] , identifier[fill] = keyword[True] ,
identifier[facecolor] = identifier[colors] [ literal[int] ], identifier[linewidth] = literal[int] )
identifier[rectangle_list] +=[ identifier[line] ]
identifier[points_separator] =[[ identifier[separator_pos] , literal[int] ],
[ identifier[separator_pos] + identifier[separator_indent] ,( identifier[width_bar] / literal[int] )],
[ identifier[separator_pos] , identifier[width_bar] ]]
identifier[line] = identifier[plt] . identifier[Polygon] ( identifier[points_separator] , identifier[closed] = keyword[None] , identifier[fill] = keyword[None] ,
identifier[edgecolor] = identifier[colors] [ literal[int] ], identifier[lw] = literal[int] )
identifier[separator_list] +=[ identifier[line] ]
keyword[return] identifier[rectangle_list] , identifier[separator_list] | def draw_bars(out_value, features, feature_type, width_separators, width_bar):
"""Draw the bars and separators."""
rectangle_list = []
separator_list = []
pre_val = out_value
for (index, features) in zip(range(len(features)), features):
if feature_type == 'positive':
left_bound = float(features[0])
right_bound = pre_val
pre_val = left_bound
separator_indent = np.abs(width_separators)
separator_pos = left_bound
colors = ['#FF0D57', '#FFC3D5'] # depends on [control=['if'], data=[]]
else:
left_bound = pre_val
right_bound = float(features[0])
pre_val = right_bound
separator_indent = -np.abs(width_separators)
separator_pos = right_bound
colors = ['#1E88E5', '#D1E6FA']
# Create rectangle
if index == 0:
if feature_type == 'positive':
points_rectangle = [[left_bound, 0], [right_bound, 0], [right_bound, width_bar], [left_bound, width_bar], [left_bound + separator_indent, width_bar / 2]] # depends on [control=['if'], data=[]]
else:
points_rectangle = [[right_bound, 0], [left_bound, 0], [left_bound, width_bar], [right_bound, width_bar], [right_bound + separator_indent, width_bar / 2]] # depends on [control=['if'], data=[]]
else:
points_rectangle = [[left_bound, 0], [right_bound, 0], [right_bound + separator_indent * 0.9, width_bar / 2], [right_bound, width_bar], [left_bound, width_bar], [left_bound + separator_indent * 0.9, width_bar / 2]]
line = plt.Polygon(points_rectangle, closed=True, fill=True, facecolor=colors[0], linewidth=0)
rectangle_list += [line]
# Create seperator
points_separator = [[separator_pos, 0], [separator_pos + separator_indent, width_bar / 2], [separator_pos, width_bar]]
line = plt.Polygon(points_separator, closed=None, fill=None, edgecolor=colors[1], lw=3)
separator_list += [line] # depends on [control=['for'], data=[]]
return (rectangle_list, separator_list) |
def is_bbox_not_intersecting(self, other):
"""Returns False iif bounding boxed of self and other intersect"""
self_x_min, self_x_max, self_y_min, self_y_max = self.get_bbox()
other_x_min, other_x_max, other_y_min, other_y_max = other.get_bbox()
return \
self_x_min > other_x_max or \
other_x_min > self_x_max or \
self_y_min > other_y_max or \
other_y_min > self_y_max | def function[is_bbox_not_intersecting, parameter[self, other]]:
constant[Returns False iif bounding boxed of self and other intersect]
<ast.Tuple object at 0x7da1b16fc3d0> assign[=] call[name[self].get_bbox, parameter[]]
<ast.Tuple object at 0x7da1b16fc760> assign[=] call[name[other].get_bbox, parameter[]]
return[<ast.BoolOp object at 0x7da1b16fc070>] | keyword[def] identifier[is_bbox_not_intersecting] ( identifier[self] , identifier[other] ):
literal[string]
identifier[self_x_min] , identifier[self_x_max] , identifier[self_y_min] , identifier[self_y_max] = identifier[self] . identifier[get_bbox] ()
identifier[other_x_min] , identifier[other_x_max] , identifier[other_y_min] , identifier[other_y_max] = identifier[other] . identifier[get_bbox] ()
keyword[return] identifier[self_x_min] > identifier[other_x_max] keyword[or] identifier[other_x_min] > identifier[self_x_max] keyword[or] identifier[self_y_min] > identifier[other_y_max] keyword[or] identifier[other_y_min] > identifier[self_y_max] | def is_bbox_not_intersecting(self, other):
"""Returns False iif bounding boxed of self and other intersect"""
(self_x_min, self_x_max, self_y_min, self_y_max) = self.get_bbox()
(other_x_min, other_x_max, other_y_min, other_y_max) = other.get_bbox()
return self_x_min > other_x_max or other_x_min > self_x_max or self_y_min > other_y_max or (other_y_min > self_y_max) |
def find_best_ensemble(results, options):
"""
Return the best performing ensemble. If the user hasn't specified a FPF, the default behavior sorts ensembles by
largest enrichment factor at the smallest FPF (1 / n, where n is the total number of decoys). If the user supplied
a FPF, ensembles are sorted by the largest enrichment factor at the input FPF. If the user supplied a FPF = 1,
ensembles are sorted by AUC. Ties are broken by considering enrichment factors at the smallest FPF not already
considered.
:param results: {ensemble_storage_object1, ensemble_storage_object2, ..., ensemble_storage_objectn}
:param options: options object, contains user-specified arguments as attributes.
:return: ensemble_storage_object (classification.EnsembleStorage) that contains the best performing ensemble
"""
# We need the total number of decoys in the set to determine the number of decoys that correspond to the FPF values
# at which enrichment factors were measured. The number of decoys are keys in the ef dictionary of each
# ensemble object stored in the results dictionary.
n = sorted(list(list(results.items())[0][1].ef.keys()), reverse=True)[0]
# determine the number of decoys that correspond to the FPF used for training
if not options.fpf:
ndecoys = 1
else:
ndecoys = int(round(n * options.fpf))
# sort the results according to the user-specified training method
if ndecoys == n:
# the user specified an fpf of 1, so wants the ensemble the maximizes the AUC, so sort on auc
prop_key = 'auc'
sorted_list = sorted(results.items(), key = lambda x: x[1].get_prop(prop_key), reverse=True)
else:
# the user is interested in an ensemble that maximizes an enrichment factor at some FPF
prop_key = 'ef'
sorted_list = sorted(results.items(), key = lambda x: x[1].get_prop(ndecoys, prop_key), reverse=True)
# we only need to consider breaking a tie if there is more than one ensemble to consider
if len(sorted_list) > 1:
sorted_list = tie_break(sorted_list, results, prop_key, ndecoys)
return sorted_list[0][0] | def function[find_best_ensemble, parameter[results, options]]:
constant[
Return the best performing ensemble. If the user hasn't specified a FPF, the default behavior sorts ensembles by
largest enrichment factor at the smallest FPF (1 / n, where n is the total number of decoys). If the user supplied
a FPF, ensembles are sorted by the largest enrichment factor at the input FPF. If the user supplied a FPF = 1,
ensembles are sorted by AUC. Ties are broken by considering enrichment factors at the smallest FPF not already
considered.
:param results: {ensemble_storage_object1, ensemble_storage_object2, ..., ensemble_storage_objectn}
:param options: options object, contains user-specified arguments as attributes.
:return: ensemble_storage_object (classification.EnsembleStorage) that contains the best performing ensemble
]
variable[n] assign[=] call[call[name[sorted], parameter[call[name[list], parameter[call[call[call[call[name[list], parameter[call[name[results].items, parameter[]]]]][constant[0]]][constant[1]].ef.keys, parameter[]]]]]]][constant[0]]
if <ast.UnaryOp object at 0x7da20c990cd0> begin[:]
variable[ndecoys] assign[=] constant[1]
if compare[name[ndecoys] equal[==] name[n]] begin[:]
variable[prop_key] assign[=] constant[auc]
variable[sorted_list] assign[=] call[name[sorted], parameter[call[name[results].items, parameter[]]]]
if compare[call[name[len], parameter[name[sorted_list]]] greater[>] constant[1]] begin[:]
variable[sorted_list] assign[=] call[name[tie_break], parameter[name[sorted_list], name[results], name[prop_key], name[ndecoys]]]
return[call[call[name[sorted_list]][constant[0]]][constant[0]]] | keyword[def] identifier[find_best_ensemble] ( identifier[results] , identifier[options] ):
literal[string]
identifier[n] = identifier[sorted] ( identifier[list] ( identifier[list] ( identifier[results] . identifier[items] ())[ literal[int] ][ literal[int] ]. identifier[ef] . identifier[keys] ()), identifier[reverse] = keyword[True] )[ literal[int] ]
keyword[if] keyword[not] identifier[options] . identifier[fpf] :
identifier[ndecoys] = literal[int]
keyword[else] :
identifier[ndecoys] = identifier[int] ( identifier[round] ( identifier[n] * identifier[options] . identifier[fpf] ))
keyword[if] identifier[ndecoys] == identifier[n] :
identifier[prop_key] = literal[string]
identifier[sorted_list] = identifier[sorted] ( identifier[results] . identifier[items] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]. identifier[get_prop] ( identifier[prop_key] ), identifier[reverse] = keyword[True] )
keyword[else] :
identifier[prop_key] = literal[string]
identifier[sorted_list] = identifier[sorted] ( identifier[results] . identifier[items] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]. identifier[get_prop] ( identifier[ndecoys] , identifier[prop_key] ), identifier[reverse] = keyword[True] )
keyword[if] identifier[len] ( identifier[sorted_list] )> literal[int] :
identifier[sorted_list] = identifier[tie_break] ( identifier[sorted_list] , identifier[results] , identifier[prop_key] , identifier[ndecoys] )
keyword[return] identifier[sorted_list] [ literal[int] ][ literal[int] ] | def find_best_ensemble(results, options):
"""
Return the best performing ensemble. If the user hasn't specified a FPF, the default behavior sorts ensembles by
largest enrichment factor at the smallest FPF (1 / n, where n is the total number of decoys). If the user supplied
a FPF, ensembles are sorted by the largest enrichment factor at the input FPF. If the user supplied a FPF = 1,
ensembles are sorted by AUC. Ties are broken by considering enrichment factors at the smallest FPF not already
considered.
:param results: {ensemble_storage_object1, ensemble_storage_object2, ..., ensemble_storage_objectn}
:param options: options object, contains user-specified arguments as attributes.
:return: ensemble_storage_object (classification.EnsembleStorage) that contains the best performing ensemble
"""
# We need the total number of decoys in the set to determine the number of decoys that correspond to the FPF values
# at which enrichment factors were measured. The number of decoys are keys in the ef dictionary of each
# ensemble object stored in the results dictionary.
n = sorted(list(list(results.items())[0][1].ef.keys()), reverse=True)[0]
# determine the number of decoys that correspond to the FPF used for training
if not options.fpf:
ndecoys = 1 # depends on [control=['if'], data=[]]
else:
ndecoys = int(round(n * options.fpf))
# sort the results according to the user-specified training method
if ndecoys == n:
# the user specified an fpf of 1, so wants the ensemble the maximizes the AUC, so sort on auc
prop_key = 'auc'
sorted_list = sorted(results.items(), key=lambda x: x[1].get_prop(prop_key), reverse=True) # depends on [control=['if'], data=[]]
else:
# the user is interested in an ensemble that maximizes an enrichment factor at some FPF
prop_key = 'ef'
sorted_list = sorted(results.items(), key=lambda x: x[1].get_prop(ndecoys, prop_key), reverse=True)
# we only need to consider breaking a tie if there is more than one ensemble to consider
if len(sorted_list) > 1:
sorted_list = tie_break(sorted_list, results, prop_key, ndecoys) # depends on [control=['if'], data=[]]
return sorted_list[0][0] |
def dist_hamming(src, tar, diff_lens=True):
"""Return the normalized Hamming distance between two strings.
This is a wrapper for :py:meth:`Hamming.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
diff_lens : bool
If True (default), this returns the Hamming distance for those
characters that have a matching character in both strings plus the
difference in the strings' lengths. This is equivalent to extending the
shorter string with obligatorily non-matching characters. If False, an
exception is raised in the case of strings of unequal lengths.
Returns
-------
float
The normalized Hamming distance
Examples
--------
>>> round(dist_hamming('cat', 'hat'), 12)
0.333333333333
>>> dist_hamming('Niall', 'Neil')
0.6
>>> dist_hamming('aluminum', 'Catalan')
1.0
>>> dist_hamming('ATCG', 'TAGC')
1.0
"""
return Hamming().dist(src, tar, diff_lens) | def function[dist_hamming, parameter[src, tar, diff_lens]]:
constant[Return the normalized Hamming distance between two strings.
This is a wrapper for :py:meth:`Hamming.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
diff_lens : bool
If True (default), this returns the Hamming distance for those
characters that have a matching character in both strings plus the
difference in the strings' lengths. This is equivalent to extending the
shorter string with obligatorily non-matching characters. If False, an
exception is raised in the case of strings of unequal lengths.
Returns
-------
float
The normalized Hamming distance
Examples
--------
>>> round(dist_hamming('cat', 'hat'), 12)
0.333333333333
>>> dist_hamming('Niall', 'Neil')
0.6
>>> dist_hamming('aluminum', 'Catalan')
1.0
>>> dist_hamming('ATCG', 'TAGC')
1.0
]
return[call[call[name[Hamming], parameter[]].dist, parameter[name[src], name[tar], name[diff_lens]]]] | keyword[def] identifier[dist_hamming] ( identifier[src] , identifier[tar] , identifier[diff_lens] = keyword[True] ):
literal[string]
keyword[return] identifier[Hamming] (). identifier[dist] ( identifier[src] , identifier[tar] , identifier[diff_lens] ) | def dist_hamming(src, tar, diff_lens=True):
"""Return the normalized Hamming distance between two strings.
This is a wrapper for :py:meth:`Hamming.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
diff_lens : bool
If True (default), this returns the Hamming distance for those
characters that have a matching character in both strings plus the
difference in the strings' lengths. This is equivalent to extending the
shorter string with obligatorily non-matching characters. If False, an
exception is raised in the case of strings of unequal lengths.
Returns
-------
float
The normalized Hamming distance
Examples
--------
>>> round(dist_hamming('cat', 'hat'), 12)
0.333333333333
>>> dist_hamming('Niall', 'Neil')
0.6
>>> dist_hamming('aluminum', 'Catalan')
1.0
>>> dist_hamming('ATCG', 'TAGC')
1.0
"""
return Hamming().dist(src, tar, diff_lens) |
def encrypt(self, data, pad=True):
"""
DES encrypts the data based on the key it was initialised with.
:param data: The bytes string to encrypt
:param pad: Whether to right pad data with \x00 to a multiple of 8
:return: The encrypted bytes string
"""
encrypted_data = b""
for i in range(0, len(data), 8):
block = data[i:i + 8]
block_length = len(block)
if block_length != 8 and pad:
block += b"\x00" * (8 - block_length)
elif block_length != 8:
raise ValueError("DES encryption must be a multiple of 8 "
"bytes")
encrypted_data += self._encode_block(block)
return encrypted_data | def function[encrypt, parameter[self, data, pad]]:
constant[
DES encrypts the data based on the key it was initialised with.
:param data: The bytes string to encrypt
:param pad: Whether to right pad data with to a multiple of 8
:return: The encrypted bytes string
]
variable[encrypted_data] assign[=] constant[b'']
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[data]]], constant[8]]]] begin[:]
variable[block] assign[=] call[name[data]][<ast.Slice object at 0x7da1b0d0fb80>]
variable[block_length] assign[=] call[name[len], parameter[name[block]]]
if <ast.BoolOp object at 0x7da1b0d0c6a0> begin[:]
<ast.AugAssign object at 0x7da1b0d0d7e0>
<ast.AugAssign object at 0x7da1b0d0f880>
return[name[encrypted_data]] | keyword[def] identifier[encrypt] ( identifier[self] , identifier[data] , identifier[pad] = keyword[True] ):
literal[string]
identifier[encrypted_data] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[data] ), literal[int] ):
identifier[block] = identifier[data] [ identifier[i] : identifier[i] + literal[int] ]
identifier[block_length] = identifier[len] ( identifier[block] )
keyword[if] identifier[block_length] != literal[int] keyword[and] identifier[pad] :
identifier[block] += literal[string] *( literal[int] - identifier[block_length] )
keyword[elif] identifier[block_length] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[encrypted_data] += identifier[self] . identifier[_encode_block] ( identifier[block] )
keyword[return] identifier[encrypted_data] | def encrypt(self, data, pad=True):
"""
DES encrypts the data based on the key it was initialised with.
:param data: The bytes string to encrypt
:param pad: Whether to right pad data with \x00 to a multiple of 8
:return: The encrypted bytes string
"""
encrypted_data = b''
for i in range(0, len(data), 8):
block = data[i:i + 8]
block_length = len(block)
if block_length != 8 and pad:
block += b'\x00' * (8 - block_length) # depends on [control=['if'], data=[]]
elif block_length != 8:
raise ValueError('DES encryption must be a multiple of 8 bytes') # depends on [control=['if'], data=[]]
encrypted_data += self._encode_block(block) # depends on [control=['for'], data=['i']]
return encrypted_data |
def get_current_user(self):
"""
Returns the access key id used in this request as the current user id
"""
if 'Authorization' in self.headers:
match = self.access_key_regex.search(self.headers['Authorization'])
if match:
return match.group(1)
if self.querystring.get('AWSAccessKeyId'):
return self.querystring.get('AWSAccessKeyId')
else:
# Should we raise an unauthorized exception instead?
return '111122223333' | def function[get_current_user, parameter[self]]:
constant[
Returns the access key id used in this request as the current user id
]
if compare[constant[Authorization] in name[self].headers] begin[:]
variable[match] assign[=] call[name[self].access_key_regex.search, parameter[call[name[self].headers][constant[Authorization]]]]
if name[match] begin[:]
return[call[name[match].group, parameter[constant[1]]]]
if call[name[self].querystring.get, parameter[constant[AWSAccessKeyId]]] begin[:]
return[call[name[self].querystring.get, parameter[constant[AWSAccessKeyId]]]] | keyword[def] identifier[get_current_user] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[headers] :
identifier[match] = identifier[self] . identifier[access_key_regex] . identifier[search] ( identifier[self] . identifier[headers] [ literal[string] ])
keyword[if] identifier[match] :
keyword[return] identifier[match] . identifier[group] ( literal[int] )
keyword[if] identifier[self] . identifier[querystring] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[querystring] . identifier[get] ( literal[string] )
keyword[else] :
keyword[return] literal[string] | def get_current_user(self):
"""
Returns the access key id used in this request as the current user id
"""
if 'Authorization' in self.headers:
match = self.access_key_regex.search(self.headers['Authorization'])
if match:
return match.group(1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.querystring.get('AWSAccessKeyId'):
return self.querystring.get('AWSAccessKeyId') # depends on [control=['if'], data=[]]
else:
# Should we raise an unauthorized exception instead?
return '111122223333' |
def dirs(self):
"""Get an iter of VenvDirs within the directory."""
contents = self.paths
contents = (BinDir(path.path) for path in contents if path.is_dir)
return contents | def function[dirs, parameter[self]]:
constant[Get an iter of VenvDirs within the directory.]
variable[contents] assign[=] name[self].paths
variable[contents] assign[=] <ast.GeneratorExp object at 0x7da2054a5cc0>
return[name[contents]] | keyword[def] identifier[dirs] ( identifier[self] ):
literal[string]
identifier[contents] = identifier[self] . identifier[paths]
identifier[contents] =( identifier[BinDir] ( identifier[path] . identifier[path] ) keyword[for] identifier[path] keyword[in] identifier[contents] keyword[if] identifier[path] . identifier[is_dir] )
keyword[return] identifier[contents] | def dirs(self):
"""Get an iter of VenvDirs within the directory."""
contents = self.paths
contents = (BinDir(path.path) for path in contents if path.is_dir)
return contents |
def file_abspath(self, resource):
"""Deprecated alias for *resource_path*."""
warnings.warn(
"file_abspath is deprecated; use resource_path instead",
DeprecationWarning, stacklevel=2)
return self.resource_path(resource) | def function[file_abspath, parameter[self, resource]]:
constant[Deprecated alias for *resource_path*.]
call[name[warnings].warn, parameter[constant[file_abspath is deprecated; use resource_path instead], name[DeprecationWarning]]]
return[call[name[self].resource_path, parameter[name[resource]]]] | keyword[def] identifier[file_abspath] ( identifier[self] , identifier[resource] ):
literal[string]
identifier[warnings] . identifier[warn] (
literal[string] ,
identifier[DeprecationWarning] , identifier[stacklevel] = literal[int] )
keyword[return] identifier[self] . identifier[resource_path] ( identifier[resource] ) | def file_abspath(self, resource):
"""Deprecated alias for *resource_path*."""
warnings.warn('file_abspath is deprecated; use resource_path instead', DeprecationWarning, stacklevel=2)
return self.resource_path(resource) |
def run_command(self, command, timeout=-1):
"""Send a command to the REPL, wait for and return output.
:param str command: The command to send. Trailing newlines are not needed.
This should be a complete block of input that will trigger execution;
if a continuation prompt is found after sending input, :exc:`ValueError`
will be raised.
:param int timeout: How long to wait for the next prompt. -1 means the
default from the :class:`pexpect.spawn` object (default 30 seconds).
None means to wait indefinitely.
"""
# Split up multiline commands and feed them in bit-by-bit
cmdlines = command.splitlines()
# splitlines ignores trailing newlines - add it back in manually
if command.endswith('\n'):
cmdlines.append('')
if not cmdlines:
raise ValueError("No command was given")
res = []
self.child.sendline(cmdlines[0])
for line in cmdlines[1:]:
self._expect_prompt(timeout=timeout)
res.append(self.child.before)
self.child.sendline(line)
# Command was fully submitted, now wait for the next prompt
if self._expect_prompt(timeout=timeout) == 1:
# We got the continuation prompt - command was incomplete
self.child.kill(signal.SIGINT)
self._expect_prompt(timeout=1)
raise ValueError("Continuation prompt found - input was incomplete:\n"
+ command)
return u''.join(res + [self.child.before]) | def function[run_command, parameter[self, command, timeout]]:
constant[Send a command to the REPL, wait for and return output.
:param str command: The command to send. Trailing newlines are not needed.
This should be a complete block of input that will trigger execution;
if a continuation prompt is found after sending input, :exc:`ValueError`
will be raised.
:param int timeout: How long to wait for the next prompt. -1 means the
default from the :class:`pexpect.spawn` object (default 30 seconds).
None means to wait indefinitely.
]
variable[cmdlines] assign[=] call[name[command].splitlines, parameter[]]
if call[name[command].endswith, parameter[constant[
]]] begin[:]
call[name[cmdlines].append, parameter[constant[]]]
if <ast.UnaryOp object at 0x7da18bc72ec0> begin[:]
<ast.Raise object at 0x7da18bc72b60>
variable[res] assign[=] list[[]]
call[name[self].child.sendline, parameter[call[name[cmdlines]][constant[0]]]]
for taget[name[line]] in starred[call[name[cmdlines]][<ast.Slice object at 0x7da1b2347a00>]] begin[:]
call[name[self]._expect_prompt, parameter[]]
call[name[res].append, parameter[name[self].child.before]]
call[name[self].child.sendline, parameter[name[line]]]
if compare[call[name[self]._expect_prompt, parameter[]] equal[==] constant[1]] begin[:]
call[name[self].child.kill, parameter[name[signal].SIGINT]]
call[name[self]._expect_prompt, parameter[]]
<ast.Raise object at 0x7da1b23445e0>
return[call[constant[].join, parameter[binary_operation[name[res] + list[[<ast.Attribute object at 0x7da1b2347c10>]]]]]] | keyword[def] identifier[run_command] ( identifier[self] , identifier[command] , identifier[timeout] =- literal[int] ):
literal[string]
identifier[cmdlines] = identifier[command] . identifier[splitlines] ()
keyword[if] identifier[command] . identifier[endswith] ( literal[string] ):
identifier[cmdlines] . identifier[append] ( literal[string] )
keyword[if] keyword[not] identifier[cmdlines] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[res] =[]
identifier[self] . identifier[child] . identifier[sendline] ( identifier[cmdlines] [ literal[int] ])
keyword[for] identifier[line] keyword[in] identifier[cmdlines] [ literal[int] :]:
identifier[self] . identifier[_expect_prompt] ( identifier[timeout] = identifier[timeout] )
identifier[res] . identifier[append] ( identifier[self] . identifier[child] . identifier[before] )
identifier[self] . identifier[child] . identifier[sendline] ( identifier[line] )
keyword[if] identifier[self] . identifier[_expect_prompt] ( identifier[timeout] = identifier[timeout] )== literal[int] :
identifier[self] . identifier[child] . identifier[kill] ( identifier[signal] . identifier[SIGINT] )
identifier[self] . identifier[_expect_prompt] ( identifier[timeout] = literal[int] )
keyword[raise] identifier[ValueError] ( literal[string]
+ identifier[command] )
keyword[return] literal[string] . identifier[join] ( identifier[res] +[ identifier[self] . identifier[child] . identifier[before] ]) | def run_command(self, command, timeout=-1):
"""Send a command to the REPL, wait for and return output.
:param str command: The command to send. Trailing newlines are not needed.
This should be a complete block of input that will trigger execution;
if a continuation prompt is found after sending input, :exc:`ValueError`
will be raised.
:param int timeout: How long to wait for the next prompt. -1 means the
default from the :class:`pexpect.spawn` object (default 30 seconds).
None means to wait indefinitely.
"""
# Split up multiline commands and feed them in bit-by-bit
cmdlines = command.splitlines()
# splitlines ignores trailing newlines - add it back in manually
if command.endswith('\n'):
cmdlines.append('') # depends on [control=['if'], data=[]]
if not cmdlines:
raise ValueError('No command was given') # depends on [control=['if'], data=[]]
res = []
self.child.sendline(cmdlines[0])
for line in cmdlines[1:]:
self._expect_prompt(timeout=timeout)
res.append(self.child.before)
self.child.sendline(line) # depends on [control=['for'], data=['line']]
# Command was fully submitted, now wait for the next prompt
if self._expect_prompt(timeout=timeout) == 1:
# We got the continuation prompt - command was incomplete
self.child.kill(signal.SIGINT)
self._expect_prompt(timeout=1)
raise ValueError('Continuation prompt found - input was incomplete:\n' + command) # depends on [control=['if'], data=[]]
return u''.join(res + [self.child.before]) |
def unwrap_or(self, default: U) -> Union[T, U]:
"""
Returns the contained value or ``default``.
Args:
default: The default value.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``default``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :py:meth:`unwrap_or_else` instead.
Examples:
>>> Some(0).unwrap_or(3)
0
>>> NONE.unwrap_or(0)
0
"""
return self.unwrap_or_else(lambda: default) | def function[unwrap_or, parameter[self, default]]:
constant[
Returns the contained value or ``default``.
Args:
default: The default value.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``default``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :py:meth:`unwrap_or_else` instead.
Examples:
>>> Some(0).unwrap_or(3)
0
>>> NONE.unwrap_or(0)
0
]
return[call[name[self].unwrap_or_else, parameter[<ast.Lambda object at 0x7da18bc70670>]]] | keyword[def] identifier[unwrap_or] ( identifier[self] , identifier[default] : identifier[U] )-> identifier[Union] [ identifier[T] , identifier[U] ]:
literal[string]
keyword[return] identifier[self] . identifier[unwrap_or_else] ( keyword[lambda] : identifier[default] ) | def unwrap_or(self, default: U) -> Union[T, U]:
"""
Returns the contained value or ``default``.
Args:
default: The default value.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``default``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :py:meth:`unwrap_or_else` instead.
Examples:
>>> Some(0).unwrap_or(3)
0
>>> NONE.unwrap_or(0)
0
"""
return self.unwrap_or_else(lambda : default) |
def get_elastic_page_numbers(current_page, num_pages):
"""Alternative callable for page listing.
Produce an adaptive pagination, useful for big numbers of pages, by
splitting the num_pages ranges in two parts at current_page. Each part
will have its own S-curve.
"""
if num_pages <= 10:
return list(range(1, num_pages + 1))
if current_page == 1:
pages = [1]
else:
pages = ['first', 'previous']
pages.extend(_make_elastic_range(1, current_page))
if current_page != num_pages:
pages.extend(_make_elastic_range(current_page, num_pages)[1:])
pages.extend(['next', 'last'])
return pages | def function[get_elastic_page_numbers, parameter[current_page, num_pages]]:
constant[Alternative callable for page listing.
Produce an adaptive pagination, useful for big numbers of pages, by
splitting the num_pages ranges in two parts at current_page. Each part
will have its own S-curve.
]
if compare[name[num_pages] less_or_equal[<=] constant[10]] begin[:]
return[call[name[list], parameter[call[name[range], parameter[constant[1], binary_operation[name[num_pages] + constant[1]]]]]]]
if compare[name[current_page] equal[==] constant[1]] begin[:]
variable[pages] assign[=] list[[<ast.Constant object at 0x7da1b12f2aa0>]]
if compare[name[current_page] not_equal[!=] name[num_pages]] begin[:]
call[name[pages].extend, parameter[call[call[name[_make_elastic_range], parameter[name[current_page], name[num_pages]]]][<ast.Slice object at 0x7da1b12f2b60>]]]
call[name[pages].extend, parameter[list[[<ast.Constant object at 0x7da1b12f21d0>, <ast.Constant object at 0x7da1b12f0c70>]]]]
return[name[pages]] | keyword[def] identifier[get_elastic_page_numbers] ( identifier[current_page] , identifier[num_pages] ):
literal[string]
keyword[if] identifier[num_pages] <= literal[int] :
keyword[return] identifier[list] ( identifier[range] ( literal[int] , identifier[num_pages] + literal[int] ))
keyword[if] identifier[current_page] == literal[int] :
identifier[pages] =[ literal[int] ]
keyword[else] :
identifier[pages] =[ literal[string] , literal[string] ]
identifier[pages] . identifier[extend] ( identifier[_make_elastic_range] ( literal[int] , identifier[current_page] ))
keyword[if] identifier[current_page] != identifier[num_pages] :
identifier[pages] . identifier[extend] ( identifier[_make_elastic_range] ( identifier[current_page] , identifier[num_pages] )[ literal[int] :])
identifier[pages] . identifier[extend] ([ literal[string] , literal[string] ])
keyword[return] identifier[pages] | def get_elastic_page_numbers(current_page, num_pages):
"""Alternative callable for page listing.
Produce an adaptive pagination, useful for big numbers of pages, by
splitting the num_pages ranges in two parts at current_page. Each part
will have its own S-curve.
"""
if num_pages <= 10:
return list(range(1, num_pages + 1)) # depends on [control=['if'], data=['num_pages']]
if current_page == 1:
pages = [1] # depends on [control=['if'], data=[]]
else:
pages = ['first', 'previous']
pages.extend(_make_elastic_range(1, current_page))
if current_page != num_pages:
pages.extend(_make_elastic_range(current_page, num_pages)[1:])
pages.extend(['next', 'last']) # depends on [control=['if'], data=['current_page', 'num_pages']]
return pages |
def extractData(fileName, populations):
"""Extract the C1 and C2 columns for plotting.
:param fileName: the name of the MDS file.
:param populations: the population of each sample in the MDS file.
:type fileName: str
:type fileName: dict
:returns: the MDS data with information about the population of each
sample. The first element of the returned tuple is a tuple. The
last element of the returned tuple is the list of the populations
(the order is the same as in the first element). The first
element of the first tuple is the C1 data, and the last element
is the C2 data.
.. note::
If a sample in the MDS file is not in the population file, it is skip.
"""
# The different population labels
possibleLabels = list(set(populations.values()))
nbPossibleLabels = len(possibleLabels)
c1 = [[] for i in xrange(nbPossibleLabels)]
c2 = [[] for i in xrange(nbPossibleLabels)]
with open(fileName, 'r') as inputFile:
headerIndex = None
for i, line in enumerate(inputFile):
row = createRowFromPlinkSpacedOutput(line)
if i == 0:
# This is the header
headerIndex = dict([(row[j], j) for j in xrange(len(row))])
for columnName in ["FID", "IID", "C1", "C2"]:
if columnName not in headerIndex:
msg = "%(fileName)s: no column named " \
"%(columnName)s" % locals()
raise ProgramError(msg)
else:
# Getting the component 1 and 2
currC1 = row[headerIndex["C1"]]
currC2 = row[headerIndex["C2"]]
# Getting the individual informations
famID = row[headerIndex["FID"]]
indID = row[headerIndex["IID"]]
curLabel = ""
if (famID, indID) in populations:
curLabel = populations[(famID, indID)]
else:
continue
c1[possibleLabels.index(curLabel)].append(currC1)
c2[possibleLabels.index(curLabel)].append(currC2)
return (np.array(c1), np.array(c2)), possibleLabels | def function[extractData, parameter[fileName, populations]]:
constant[Extract the C1 and C2 columns for plotting.
:param fileName: the name of the MDS file.
:param populations: the population of each sample in the MDS file.
:type fileName: str
:type fileName: dict
:returns: the MDS data with information about the population of each
sample. The first element of the returned tuple is a tuple. The
last element of the returned tuple is the list of the populations
(the order is the same as in the first element). The first
element of the first tuple is the C1 data, and the last element
is the C2 data.
.. note::
If a sample in the MDS file is not in the population file, it is skip.
]
variable[possibleLabels] assign[=] call[name[list], parameter[call[name[set], parameter[call[name[populations].values, parameter[]]]]]]
variable[nbPossibleLabels] assign[=] call[name[len], parameter[name[possibleLabels]]]
variable[c1] assign[=] <ast.ListComp object at 0x7da1b0a1ea10>
variable[c2] assign[=] <ast.ListComp object at 0x7da1b0a1c070>
with call[name[open], parameter[name[fileName], constant[r]]] begin[:]
variable[headerIndex] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da1b0a1d240>, <ast.Name object at 0x7da1b0a1c100>]]] in starred[call[name[enumerate], parameter[name[inputFile]]]] begin[:]
variable[row] assign[=] call[name[createRowFromPlinkSpacedOutput], parameter[name[line]]]
if compare[name[i] equal[==] constant[0]] begin[:]
variable[headerIndex] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b0926500>]]
for taget[name[columnName]] in starred[list[[<ast.Constant object at 0x7da1b09253c0>, <ast.Constant object at 0x7da1b0926b60>, <ast.Constant object at 0x7da1b0926350>, <ast.Constant object at 0x7da1b0924610>]]] begin[:]
if compare[name[columnName] <ast.NotIn object at 0x7da2590d7190> name[headerIndex]] begin[:]
variable[msg] assign[=] binary_operation[constant[%(fileName)s: no column named %(columnName)s] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]]
<ast.Raise object at 0x7da1b0926170>
return[tuple[[<ast.Tuple object at 0x7da1b0ad9ff0>, <ast.Name object at 0x7da1b0ad94b0>]]] | keyword[def] identifier[extractData] ( identifier[fileName] , identifier[populations] ):
literal[string]
identifier[possibleLabels] = identifier[list] ( identifier[set] ( identifier[populations] . identifier[values] ()))
identifier[nbPossibleLabels] = identifier[len] ( identifier[possibleLabels] )
identifier[c1] =[[] keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[nbPossibleLabels] )]
identifier[c2] =[[] keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[nbPossibleLabels] )]
keyword[with] identifier[open] ( identifier[fileName] , literal[string] ) keyword[as] identifier[inputFile] :
identifier[headerIndex] = keyword[None]
keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[inputFile] ):
identifier[row] = identifier[createRowFromPlinkSpacedOutput] ( identifier[line] )
keyword[if] identifier[i] == literal[int] :
identifier[headerIndex] = identifier[dict] ([( identifier[row] [ identifier[j] ], identifier[j] ) keyword[for] identifier[j] keyword[in] identifier[xrange] ( identifier[len] ( identifier[row] ))])
keyword[for] identifier[columnName] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[columnName] keyword[not] keyword[in] identifier[headerIndex] :
identifier[msg] = literal[string] literal[string] % identifier[locals] ()
keyword[raise] identifier[ProgramError] ( identifier[msg] )
keyword[else] :
identifier[currC1] = identifier[row] [ identifier[headerIndex] [ literal[string] ]]
identifier[currC2] = identifier[row] [ identifier[headerIndex] [ literal[string] ]]
identifier[famID] = identifier[row] [ identifier[headerIndex] [ literal[string] ]]
identifier[indID] = identifier[row] [ identifier[headerIndex] [ literal[string] ]]
identifier[curLabel] = literal[string]
keyword[if] ( identifier[famID] , identifier[indID] ) keyword[in] identifier[populations] :
identifier[curLabel] = identifier[populations] [( identifier[famID] , identifier[indID] )]
keyword[else] :
keyword[continue]
identifier[c1] [ identifier[possibleLabels] . identifier[index] ( identifier[curLabel] )]. identifier[append] ( identifier[currC1] )
identifier[c2] [ identifier[possibleLabels] . identifier[index] ( identifier[curLabel] )]. identifier[append] ( identifier[currC2] )
keyword[return] ( identifier[np] . identifier[array] ( identifier[c1] ), identifier[np] . identifier[array] ( identifier[c2] )), identifier[possibleLabels] | def extractData(fileName, populations):
"""Extract the C1 and C2 columns for plotting.
:param fileName: the name of the MDS file.
:param populations: the population of each sample in the MDS file.
:type fileName: str
:type fileName: dict
:returns: the MDS data with information about the population of each
sample. The first element of the returned tuple is a tuple. The
last element of the returned tuple is the list of the populations
(the order is the same as in the first element). The first
element of the first tuple is the C1 data, and the last element
is the C2 data.
.. note::
If a sample in the MDS file is not in the population file, it is skip.
"""
# The different population labels
possibleLabels = list(set(populations.values()))
nbPossibleLabels = len(possibleLabels)
c1 = [[] for i in xrange(nbPossibleLabels)]
c2 = [[] for i in xrange(nbPossibleLabels)]
with open(fileName, 'r') as inputFile:
headerIndex = None
for (i, line) in enumerate(inputFile):
row = createRowFromPlinkSpacedOutput(line)
if i == 0:
# This is the header
headerIndex = dict([(row[j], j) for j in xrange(len(row))])
for columnName in ['FID', 'IID', 'C1', 'C2']:
if columnName not in headerIndex:
msg = '%(fileName)s: no column named %(columnName)s' % locals()
raise ProgramError(msg) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['columnName']] # depends on [control=['if'], data=[]]
else:
# Getting the component 1 and 2
currC1 = row[headerIndex['C1']]
currC2 = row[headerIndex['C2']]
# Getting the individual informations
famID = row[headerIndex['FID']]
indID = row[headerIndex['IID']]
curLabel = ''
if (famID, indID) in populations:
curLabel = populations[famID, indID] # depends on [control=['if'], data=['populations']]
else:
continue
c1[possibleLabels.index(curLabel)].append(currC1)
c2[possibleLabels.index(curLabel)].append(currC2) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['inputFile']]
return ((np.array(c1), np.array(c2)), possibleLabels) |
def slice_save(self, astr_outputFile):
'''
Saves a single slice.
ARGS
o astr_output
The output filename to save the slice to.
'''
self._log('Outputfile = %s\n' % astr_outputFile)
fformat = astr_outputFile.split('.')[-1]
if fformat == 'dcm':
if self._dcm:
self._dcm.pixel_array.flat = self._Mnp_2Dslice.flat
self._dcm.PixelData = self._dcm.pixel_array.tostring()
self._dcm.save_as(astr_outputFile)
else:
raise ValueError('dcm output format only available for DICOM files')
else:
pylab.imsave(astr_outputFile, self._Mnp_2Dslice, format=fformat, cmap = cm.Greys_r) | def function[slice_save, parameter[self, astr_outputFile]]:
constant[
Saves a single slice.
ARGS
o astr_output
The output filename to save the slice to.
]
call[name[self]._log, parameter[binary_operation[constant[Outputfile = %s
] <ast.Mod object at 0x7da2590d6920> name[astr_outputFile]]]]
variable[fformat] assign[=] call[call[name[astr_outputFile].split, parameter[constant[.]]]][<ast.UnaryOp object at 0x7da1b0859330>]
if compare[name[fformat] equal[==] constant[dcm]] begin[:]
if name[self]._dcm begin[:]
name[self]._dcm.pixel_array.flat assign[=] name[self]._Mnp_2Dslice.flat
name[self]._dcm.PixelData assign[=] call[name[self]._dcm.pixel_array.tostring, parameter[]]
call[name[self]._dcm.save_as, parameter[name[astr_outputFile]]] | keyword[def] identifier[slice_save] ( identifier[self] , identifier[astr_outputFile] ):
literal[string]
identifier[self] . identifier[_log] ( literal[string] % identifier[astr_outputFile] )
identifier[fformat] = identifier[astr_outputFile] . identifier[split] ( literal[string] )[- literal[int] ]
keyword[if] identifier[fformat] == literal[string] :
keyword[if] identifier[self] . identifier[_dcm] :
identifier[self] . identifier[_dcm] . identifier[pixel_array] . identifier[flat] = identifier[self] . identifier[_Mnp_2Dslice] . identifier[flat]
identifier[self] . identifier[_dcm] . identifier[PixelData] = identifier[self] . identifier[_dcm] . identifier[pixel_array] . identifier[tostring] ()
identifier[self] . identifier[_dcm] . identifier[save_as] ( identifier[astr_outputFile] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
identifier[pylab] . identifier[imsave] ( identifier[astr_outputFile] , identifier[self] . identifier[_Mnp_2Dslice] , identifier[format] = identifier[fformat] , identifier[cmap] = identifier[cm] . identifier[Greys_r] ) | def slice_save(self, astr_outputFile):
"""
Saves a single slice.
ARGS
o astr_output
The output filename to save the slice to.
"""
self._log('Outputfile = %s\n' % astr_outputFile)
fformat = astr_outputFile.split('.')[-1]
if fformat == 'dcm':
if self._dcm:
self._dcm.pixel_array.flat = self._Mnp_2Dslice.flat
self._dcm.PixelData = self._dcm.pixel_array.tostring()
self._dcm.save_as(astr_outputFile) # depends on [control=['if'], data=[]]
else:
raise ValueError('dcm output format only available for DICOM files') # depends on [control=['if'], data=[]]
else:
pylab.imsave(astr_outputFile, self._Mnp_2Dslice, format=fformat, cmap=cm.Greys_r) |
def remove_from_category(self, category, name):
"""
Removes given action from given category.
:param category: Category to remove the action from.
:type category: unicode
:param name: Action name.
:type name: unicode
:return: Method success.
:rtype: bool
"""
category = self.get_category(category)
if not isinstance(category, dict):
return False
del (category[name])
LOGGER.debug("> Removed '{0}' action from '{1}' category!".format(category, name))
return True | def function[remove_from_category, parameter[self, category, name]]:
constant[
Removes given action from given category.
:param category: Category to remove the action from.
:type category: unicode
:param name: Action name.
:type name: unicode
:return: Method success.
:rtype: bool
]
variable[category] assign[=] call[name[self].get_category, parameter[name[category]]]
if <ast.UnaryOp object at 0x7da1b09bca00> begin[:]
return[constant[False]]
<ast.Delete object at 0x7da1b09bfe80>
call[name[LOGGER].debug, parameter[call[constant[> Removed '{0}' action from '{1}' category!].format, parameter[name[category], name[name]]]]]
return[constant[True]] | keyword[def] identifier[remove_from_category] ( identifier[self] , identifier[category] , identifier[name] ):
literal[string]
identifier[category] = identifier[self] . identifier[get_category] ( identifier[category] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[category] , identifier[dict] ):
keyword[return] keyword[False]
keyword[del] ( identifier[category] [ identifier[name] ])
identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[category] , identifier[name] ))
keyword[return] keyword[True] | def remove_from_category(self, category, name):
"""
Removes given action from given category.
:param category: Category to remove the action from.
:type category: unicode
:param name: Action name.
:type name: unicode
:return: Method success.
:rtype: bool
"""
category = self.get_category(category)
if not isinstance(category, dict):
return False # depends on [control=['if'], data=[]]
del category[name]
LOGGER.debug("> Removed '{0}' action from '{1}' category!".format(category, name))
return True |
def generate_GitHub_token(*, note="Doctr token for pushing to gh-pages from Travis", scopes=None, **login_kwargs):
"""
Generate a GitHub token for pushing from Travis
The scope requested is public_repo.
If no password or OTP are provided, they will be requested from the
command line.
The token created here can be revoked at
https://github.com/settings/tokens.
"""
if scopes is None:
scopes = ['public_repo']
AUTH_URL = "https://api.github.com/authorizations"
data = {
"scopes": scopes,
"note": note,
"note_url": "https://github.com/drdoctr/doctr",
"fingerprint": str(uuid.uuid4()),
}
return GitHub_post(data, AUTH_URL, **login_kwargs) | def function[generate_GitHub_token, parameter[]]:
constant[
Generate a GitHub token for pushing from Travis
The scope requested is public_repo.
If no password or OTP are provided, they will be requested from the
command line.
The token created here can be revoked at
https://github.com/settings/tokens.
]
if compare[name[scopes] is constant[None]] begin[:]
variable[scopes] assign[=] list[[<ast.Constant object at 0x7da1b1040bb0>]]
variable[AUTH_URL] assign[=] constant[https://api.github.com/authorizations]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b1043760>, <ast.Constant object at 0x7da1b1042f50>, <ast.Constant object at 0x7da1b1043790>, <ast.Constant object at 0x7da1b1042110>], [<ast.Name object at 0x7da1b1040c40>, <ast.Name object at 0x7da1b10412a0>, <ast.Constant object at 0x7da1b1042b90>, <ast.Call object at 0x7da1b1040700>]]
return[call[name[GitHub_post], parameter[name[data], name[AUTH_URL]]]] | keyword[def] identifier[generate_GitHub_token] (*, identifier[note] = literal[string] , identifier[scopes] = keyword[None] ,** identifier[login_kwargs] ):
literal[string]
keyword[if] identifier[scopes] keyword[is] keyword[None] :
identifier[scopes] =[ literal[string] ]
identifier[AUTH_URL] = literal[string]
identifier[data] ={
literal[string] : identifier[scopes] ,
literal[string] : identifier[note] ,
literal[string] : literal[string] ,
literal[string] : identifier[str] ( identifier[uuid] . identifier[uuid4] ()),
}
keyword[return] identifier[GitHub_post] ( identifier[data] , identifier[AUTH_URL] ,** identifier[login_kwargs] ) | def generate_GitHub_token(*, note='Doctr token for pushing to gh-pages from Travis', scopes=None, **login_kwargs):
"""
Generate a GitHub token for pushing from Travis
The scope requested is public_repo.
If no password or OTP are provided, they will be requested from the
command line.
The token created here can be revoked at
https://github.com/settings/tokens.
"""
if scopes is None:
scopes = ['public_repo'] # depends on [control=['if'], data=['scopes']]
AUTH_URL = 'https://api.github.com/authorizations'
data = {'scopes': scopes, 'note': note, 'note_url': 'https://github.com/drdoctr/doctr', 'fingerprint': str(uuid.uuid4())}
return GitHub_post(data, AUTH_URL, **login_kwargs) |
def present(name, DomainName,
ElasticsearchClusterConfig=None,
EBSOptions=None,
AccessPolicies=None,
SnapshotOptions=None,
AdvancedOptions=None,
Tags=None,
region=None, key=None, keyid=None, profile=None,
ElasticsearchVersion="1.5"):
'''
Ensure domain exists.
name
The name of the state definition
DomainName
Name of the domain.
ElasticsearchClusterConfig
Configuration options for an Elasticsearch domain. Specifies the
instance type and number of instances in the domain cluster.
InstanceType (string) --
The instance type for an Elasticsearch cluster.
InstanceCount (integer) --
The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --
A boolean value to indicate whether a dedicated master node is enabled.
See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --
A boolean value to indicate whether zone awareness is enabled. See About
Zone Awareness for more information.
DedicatedMasterType (string) --
The instance type for a dedicated master node.
DedicatedMasterCount (integer) --
Total number of dedicated master nodes, active and on standby, for the
cluster.
EBSOptions
Options to enable, disable and specify the type and size of EBS storage
volumes.
EBSEnabled (boolean) --
Specifies whether EBS-based storage is enabled.
VolumeType (string) --
Specifies the volume type for EBS-based storage.
VolumeSize (integer) --
Integer to specify the size of an EBS volume.
Iops (integer) --
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
AccessPolicies
IAM access policy
SnapshotOptions
Option to set time, in UTC format, of the daily automated snapshot.
Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --
Specifies the time, in UTC format, when the service takes a daily
automated snapshot of the specified Elasticsearch domain. Default value
is 0 hours.
AdvancedOptions
Option to allow references to indices in an HTTP request body. Must be
false when configuring access to individual sub-resources. By default,
the value is true .
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
ElasticsearchVersion
String of format X.Y to specify version for the Elasticsearch domain eg.
"1.5" or "2.3".
'''
ret = {'name': DomainName,
'result': True,
'comment': '',
'changes': {}
}
if ElasticsearchClusterConfig is None:
ElasticsearchClusterConfig = {
'DedicatedMasterEnabled': False,
'InstanceCount': 1,
'InstanceType': 'm3.medium.elasticsearch',
'ZoneAwarenessEnabled': False
}
if EBSOptions is None:
EBSOptions = {
'EBSEnabled': False,
}
if SnapshotOptions is None:
SnapshotOptions = {
'AutomatedSnapshotStartHour': 0
}
if AdvancedOptions is None:
AdvancedOptions = {
'rest.action.multi.allow_explicit_index': 'true'
}
if Tags is None:
Tags = {}
if AccessPolicies is not None and isinstance(AccessPolicies, six.string_types):
try:
AccessPolicies = salt.utils.json.loads(AccessPolicies)
except ValueError as e:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(e.message)
return ret
r = __salt__['boto_elasticsearch_domain.exists'](DomainName=DomainName,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
if not r.get('exists'):
if __opts__['test']:
ret['comment'] = 'Domain {0} is set to be created.'.format(DomainName)
ret['result'] = None
return ret
r = __salt__['boto_elasticsearch_domain.create'](DomainName=DomainName,
ElasticsearchClusterConfig=ElasticsearchClusterConfig,
EBSOptions=EBSOptions,
AccessPolicies=AccessPolicies,
SnapshotOptions=SnapshotOptions,
AdvancedOptions=AdvancedOptions,
ElasticsearchVersion=str(ElasticsearchVersion), # future lint: disable=blacklisted-function
region=region, key=key,
keyid=keyid, profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName,
region=region, key=key, keyid=keyid, profile=profile)
ret['changes']['old'] = {'domain': None}
ret['changes']['new'] = _describe
ret['comment'] = 'Domain {0} created.'.format(DomainName)
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain {0} is present.'.format(DomainName)])
ret['changes'] = {}
# domain exists, ensure config matches
_status = __salt__['boto_elasticsearch_domain.status'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
if _status.get('ElasticsearchVersion') != str(ElasticsearchVersion): # future lint: disable=blacklisted-function
ret['result'] = False
ret['comment'] = (
'Failed to update domain: version cannot be modified '
'from {0} to {1}.'.format(
_status.get('ElasticsearchVersion'),
str(ElasticsearchVersion) # future lint: disable=blacklisted-function
)
)
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
_describe['AccessPolicies'] = salt.utils.json.loads(_describe['AccessPolicies'])
# When EBSEnabled is false, describe returns extra values that can't be set
if not _describe.get('EBSOptions', {}).get('EBSEnabled'):
opts = _describe.get('EBSOptions', {})
opts.pop('VolumeSize', None)
opts.pop('VolumeType', None)
comm_args = {}
need_update = False
es_opts = {'ElasticsearchClusterConfig': ElasticsearchClusterConfig,
'EBSOptions': EBSOptions,
'AccessPolicies': AccessPolicies,
'SnapshotOptions': SnapshotOptions,
'AdvancedOptions': AdvancedOptions}
for k, v in six.iteritems(es_opts):
if not _compare_json(v, _describe[k]):
need_update = True
comm_args[k] = v
ret['changes'].setdefault('new', {})[k] = v
ret['changes'].setdefault('old', {})[k] = _describe[k]
if need_update:
if __opts__['test']:
msg = 'Domain {0} set to be modified.'.format(DomainName)
ret['comment'] = msg
ret['result'] = None
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain to be modified'])
r = __salt__['boto_elasticsearch_domain.update'](DomainName=DomainName,
region=region, key=key,
keyid=keyid, profile=profile,
**comm_args)
if not r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update domain: {0}.'.format(r['error'])
ret['changes'] = {}
return ret
return ret | def function[present, parameter[name, DomainName, ElasticsearchClusterConfig, EBSOptions, AccessPolicies, SnapshotOptions, AdvancedOptions, Tags, region, key, keyid, profile, ElasticsearchVersion]]:
constant[
Ensure domain exists.
name
The name of the state definition
DomainName
Name of the domain.
ElasticsearchClusterConfig
Configuration options for an Elasticsearch domain. Specifies the
instance type and number of instances in the domain cluster.
InstanceType (string) --
The instance type for an Elasticsearch cluster.
InstanceCount (integer) --
The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --
A boolean value to indicate whether a dedicated master node is enabled.
See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --
A boolean value to indicate whether zone awareness is enabled. See About
Zone Awareness for more information.
DedicatedMasterType (string) --
The instance type for a dedicated master node.
DedicatedMasterCount (integer) --
Total number of dedicated master nodes, active and on standby, for the
cluster.
EBSOptions
Options to enable, disable and specify the type and size of EBS storage
volumes.
EBSEnabled (boolean) --
Specifies whether EBS-based storage is enabled.
VolumeType (string) --
Specifies the volume type for EBS-based storage.
VolumeSize (integer) --
Integer to specify the size of an EBS volume.
Iops (integer) --
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
AccessPolicies
IAM access policy
SnapshotOptions
Option to set time, in UTC format, of the daily automated snapshot.
Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --
Specifies the time, in UTC format, when the service takes a daily
automated snapshot of the specified Elasticsearch domain. Default value
is 0 hours.
AdvancedOptions
Option to allow references to indices in an HTTP request body. Must be
false when configuring access to individual sub-resources. By default,
the value is true .
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
ElasticsearchVersion
String of format X.Y to specify version for the Elasticsearch domain eg.
"1.5" or "2.3".
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b21e8e20>, <ast.Constant object at 0x7da1b21e99c0>, <ast.Constant object at 0x7da1b21eab60>, <ast.Constant object at 0x7da1b21e85b0>], [<ast.Name object at 0x7da1b21e9750>, <ast.Constant object at 0x7da1b21ea620>, <ast.Constant object at 0x7da1b21e9930>, <ast.Dict object at 0x7da1b21e8f70>]]
if compare[name[ElasticsearchClusterConfig] is constant[None]] begin[:]
variable[ElasticsearchClusterConfig] assign[=] dictionary[[<ast.Constant object at 0x7da1b21e8940>, <ast.Constant object at 0x7da1b21e9690>, <ast.Constant object at 0x7da1b21ea110>, <ast.Constant object at 0x7da1b21ea3e0>], [<ast.Constant object at 0x7da1b21eb2e0>, <ast.Constant object at 0x7da1b21eb520>, <ast.Constant object at 0x7da1b21ea290>, <ast.Constant object at 0x7da1b21ebee0>]]
if compare[name[EBSOptions] is constant[None]] begin[:]
variable[EBSOptions] assign[=] dictionary[[<ast.Constant object at 0x7da1b21e83a0>], [<ast.Constant object at 0x7da1b21ea860>]]
if compare[name[SnapshotOptions] is constant[None]] begin[:]
variable[SnapshotOptions] assign[=] dictionary[[<ast.Constant object at 0x7da1b21ea800>], [<ast.Constant object at 0x7da1b21e94b0>]]
if compare[name[AdvancedOptions] is constant[None]] begin[:]
variable[AdvancedOptions] assign[=] dictionary[[<ast.Constant object at 0x7da1b21eb190>], [<ast.Constant object at 0x7da1b21e9e40>]]
if compare[name[Tags] is constant[None]] begin[:]
variable[Tags] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b21e8d90> begin[:]
<ast.Try object at 0x7da1b21e9210>
variable[r] assign[=] call[call[name[__salt__]][constant[boto_elasticsearch_domain.exists]], parameter[]]
if compare[constant[error] in name[r]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] call[constant[Failed to create domain: {0}.].format, parameter[call[call[name[r]][constant[error]]][constant[message]]]]
return[name[ret]]
if <ast.UnaryOp object at 0x7da1b21e9090> begin[:]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Domain {0} is set to be created.].format, parameter[name[DomainName]]]
call[name[ret]][constant[result]] assign[=] constant[None]
return[name[ret]]
variable[r] assign[=] call[call[name[__salt__]][constant[boto_elasticsearch_domain.create]], parameter[]]
if <ast.UnaryOp object at 0x7da1b21eb070> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] call[constant[Failed to create domain: {0}.].format, parameter[call[call[name[r]][constant[error]]][constant[message]]]]
return[name[ret]]
variable[_describe] assign[=] call[call[name[__salt__]][constant[boto_elasticsearch_domain.describe]], parameter[name[DomainName]]]
call[call[name[ret]][constant[changes]]][constant[old]] assign[=] dictionary[[<ast.Constant object at 0x7da1b21aeb00>], [<ast.Constant object at 0x7da1b21ac1c0>]]
call[call[name[ret]][constant[changes]]][constant[new]] assign[=] name[_describe]
call[name[ret]][constant[comment]] assign[=] call[constant[Domain {0} created.].format, parameter[name[DomainName]]]
return[name[ret]]
call[name[ret]][constant[comment]] assign[=] call[name[os].linesep.join, parameter[list[[<ast.Subscript object at 0x7da1b21ac7f0>, <ast.Call object at 0x7da1b21ac490>]]]]
call[name[ret]][constant[changes]] assign[=] dictionary[[], []]
variable[_status] assign[=] call[call[call[name[__salt__]][constant[boto_elasticsearch_domain.status]], parameter[]]][constant[domain]]
if compare[call[name[_status].get, parameter[constant[ElasticsearchVersion]]] not_equal[!=] call[name[str], parameter[name[ElasticsearchVersion]]]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] call[constant[Failed to update domain: version cannot be modified from {0} to {1}.].format, parameter[call[name[_status].get, parameter[constant[ElasticsearchVersion]]], call[name[str], parameter[name[ElasticsearchVersion]]]]]
return[name[ret]]
variable[_describe] assign[=] call[call[call[name[__salt__]][constant[boto_elasticsearch_domain.describe]], parameter[]]][constant[domain]]
call[name[_describe]][constant[AccessPolicies]] assign[=] call[name[salt].utils.json.loads, parameter[call[name[_describe]][constant[AccessPolicies]]]]
if <ast.UnaryOp object at 0x7da1b21af6d0> begin[:]
variable[opts] assign[=] call[name[_describe].get, parameter[constant[EBSOptions], dictionary[[], []]]]
call[name[opts].pop, parameter[constant[VolumeSize], constant[None]]]
call[name[opts].pop, parameter[constant[VolumeType], constant[None]]]
variable[comm_args] assign[=] dictionary[[], []]
variable[need_update] assign[=] constant[False]
variable[es_opts] assign[=] dictionary[[<ast.Constant object at 0x7da1b21ad390>, <ast.Constant object at 0x7da1b21ae2c0>, <ast.Constant object at 0x7da1b21aef20>, <ast.Constant object at 0x7da1b21adfc0>, <ast.Constant object at 0x7da1b21ae6b0>], [<ast.Name object at 0x7da1b21ac3d0>, <ast.Name object at 0x7da1b21ad4e0>, <ast.Name object at 0x7da1b21adae0>, <ast.Name object at 0x7da1b21ad330>, <ast.Name object at 0x7da1b21af850>]]
for taget[tuple[[<ast.Name object at 0x7da1b21ac970>, <ast.Name object at 0x7da1b21ad030>]]] in starred[call[name[six].iteritems, parameter[name[es_opts]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b21ae8f0> begin[:]
variable[need_update] assign[=] constant[True]
call[name[comm_args]][name[k]] assign[=] name[v]
call[call[call[name[ret]][constant[changes]].setdefault, parameter[constant[new], dictionary[[], []]]]][name[k]] assign[=] name[v]
call[call[call[name[ret]][constant[changes]].setdefault, parameter[constant[old], dictionary[[], []]]]][name[k]] assign[=] call[name[_describe]][name[k]]
if name[need_update] begin[:]
if call[name[__opts__]][constant[test]] begin[:]
variable[msg] assign[=] call[constant[Domain {0} set to be modified.].format, parameter[name[DomainName]]]
call[name[ret]][constant[comment]] assign[=] name[msg]
call[name[ret]][constant[result]] assign[=] constant[None]
return[name[ret]]
call[name[ret]][constant[comment]] assign[=] call[name[os].linesep.join, parameter[list[[<ast.Subscript object at 0x7da1b1c5cb50>, <ast.Constant object at 0x7da1b1c5edd0>]]]]
variable[r] assign[=] call[call[name[__salt__]][constant[boto_elasticsearch_domain.update]], parameter[]]
if <ast.UnaryOp object at 0x7da1b1c5ec80> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] call[constant[Failed to update domain: {0}.].format, parameter[call[name[r]][constant[error]]]]
call[name[ret]][constant[changes]] assign[=] dictionary[[], []]
return[name[ret]]
return[name[ret]] | keyword[def] identifier[present] ( identifier[name] , identifier[DomainName] ,
identifier[ElasticsearchClusterConfig] = keyword[None] ,
identifier[EBSOptions] = keyword[None] ,
identifier[AccessPolicies] = keyword[None] ,
identifier[SnapshotOptions] = keyword[None] ,
identifier[AdvancedOptions] = keyword[None] ,
identifier[Tags] = keyword[None] ,
identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ,
identifier[ElasticsearchVersion] = literal[string] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[DomainName] ,
literal[string] : keyword[True] ,
literal[string] : literal[string] ,
literal[string] :{}
}
keyword[if] identifier[ElasticsearchClusterConfig] keyword[is] keyword[None] :
identifier[ElasticsearchClusterConfig] ={
literal[string] : keyword[False] ,
literal[string] : literal[int] ,
literal[string] : literal[string] ,
literal[string] : keyword[False]
}
keyword[if] identifier[EBSOptions] keyword[is] keyword[None] :
identifier[EBSOptions] ={
literal[string] : keyword[False] ,
}
keyword[if] identifier[SnapshotOptions] keyword[is] keyword[None] :
identifier[SnapshotOptions] ={
literal[string] : literal[int]
}
keyword[if] identifier[AdvancedOptions] keyword[is] keyword[None] :
identifier[AdvancedOptions] ={
literal[string] : literal[string]
}
keyword[if] identifier[Tags] keyword[is] keyword[None] :
identifier[Tags] ={}
keyword[if] identifier[AccessPolicies] keyword[is] keyword[not] keyword[None] keyword[and] identifier[isinstance] ( identifier[AccessPolicies] , identifier[six] . identifier[string_types] ):
keyword[try] :
identifier[AccessPolicies] = identifier[salt] . identifier[utils] . identifier[json] . identifier[loads] ( identifier[AccessPolicies] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[e] . identifier[message] )
keyword[return] identifier[ret]
identifier[r] = identifier[__salt__] [ literal[string] ]( identifier[DomainName] = identifier[DomainName] ,
identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[if] literal[string] keyword[in] identifier[r] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[r] [ literal[string] ][ literal[string] ])
keyword[return] identifier[ret]
keyword[if] keyword[not] identifier[r] . identifier[get] ( literal[string] ):
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[DomainName] )
identifier[ret] [ literal[string] ]= keyword[None]
keyword[return] identifier[ret]
identifier[r] = identifier[__salt__] [ literal[string] ]( identifier[DomainName] = identifier[DomainName] ,
identifier[ElasticsearchClusterConfig] = identifier[ElasticsearchClusterConfig] ,
identifier[EBSOptions] = identifier[EBSOptions] ,
identifier[AccessPolicies] = identifier[AccessPolicies] ,
identifier[SnapshotOptions] = identifier[SnapshotOptions] ,
identifier[AdvancedOptions] = identifier[AdvancedOptions] ,
identifier[ElasticsearchVersion] = identifier[str] ( identifier[ElasticsearchVersion] ),
identifier[region] = identifier[region] , identifier[key] = identifier[key] ,
identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[if] keyword[not] identifier[r] . identifier[get] ( literal[string] ):
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[r] [ literal[string] ][ literal[string] ])
keyword[return] identifier[ret]
identifier[_describe] = identifier[__salt__] [ literal[string] ]( identifier[DomainName] ,
identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
identifier[ret] [ literal[string] ][ literal[string] ]={ literal[string] : keyword[None] }
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[_describe]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[DomainName] )
keyword[return] identifier[ret]
identifier[ret] [ literal[string] ]= identifier[os] . identifier[linesep] . identifier[join] ([ identifier[ret] [ literal[string] ], literal[string] . identifier[format] ( identifier[DomainName] )])
identifier[ret] [ literal[string] ]={}
identifier[_status] = identifier[__salt__] [ literal[string] ]( identifier[DomainName] = identifier[DomainName] ,
identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] ,
identifier[profile] = identifier[profile] )[ literal[string] ]
keyword[if] identifier[_status] . identifier[get] ( literal[string] )!= identifier[str] ( identifier[ElasticsearchVersion] ):
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]=(
literal[string]
literal[string] . identifier[format] (
identifier[_status] . identifier[get] ( literal[string] ),
identifier[str] ( identifier[ElasticsearchVersion] )
)
)
keyword[return] identifier[ret]
identifier[_describe] = identifier[__salt__] [ literal[string] ]( identifier[DomainName] = identifier[DomainName] ,
identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] ,
identifier[profile] = identifier[profile] )[ literal[string] ]
identifier[_describe] [ literal[string] ]= identifier[salt] . identifier[utils] . identifier[json] . identifier[loads] ( identifier[_describe] [ literal[string] ])
keyword[if] keyword[not] identifier[_describe] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ):
identifier[opts] = identifier[_describe] . identifier[get] ( literal[string] ,{})
identifier[opts] . identifier[pop] ( literal[string] , keyword[None] )
identifier[opts] . identifier[pop] ( literal[string] , keyword[None] )
identifier[comm_args] ={}
identifier[need_update] = keyword[False]
identifier[es_opts] ={ literal[string] : identifier[ElasticsearchClusterConfig] ,
literal[string] : identifier[EBSOptions] ,
literal[string] : identifier[AccessPolicies] ,
literal[string] : identifier[SnapshotOptions] ,
literal[string] : identifier[AdvancedOptions] }
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[es_opts] ):
keyword[if] keyword[not] identifier[_compare_json] ( identifier[v] , identifier[_describe] [ identifier[k] ]):
identifier[need_update] = keyword[True]
identifier[comm_args] [ identifier[k] ]= identifier[v]
identifier[ret] [ literal[string] ]. identifier[setdefault] ( literal[string] ,{})[ identifier[k] ]= identifier[v]
identifier[ret] [ literal[string] ]. identifier[setdefault] ( literal[string] ,{})[ identifier[k] ]= identifier[_describe] [ identifier[k] ]
keyword[if] identifier[need_update] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[msg] = literal[string] . identifier[format] ( identifier[DomainName] )
identifier[ret] [ literal[string] ]= identifier[msg]
identifier[ret] [ literal[string] ]= keyword[None]
keyword[return] identifier[ret]
identifier[ret] [ literal[string] ]= identifier[os] . identifier[linesep] . identifier[join] ([ identifier[ret] [ literal[string] ], literal[string] ])
identifier[r] = identifier[__salt__] [ literal[string] ]( identifier[DomainName] = identifier[DomainName] ,
identifier[region] = identifier[region] , identifier[key] = identifier[key] ,
identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ,
** identifier[comm_args] )
keyword[if] keyword[not] identifier[r] . identifier[get] ( literal[string] ):
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[r] [ literal[string] ])
identifier[ret] [ literal[string] ]={}
keyword[return] identifier[ret]
keyword[return] identifier[ret] | def present(name, DomainName, ElasticsearchClusterConfig=None, EBSOptions=None, AccessPolicies=None, SnapshotOptions=None, AdvancedOptions=None, Tags=None, region=None, key=None, keyid=None, profile=None, ElasticsearchVersion='1.5'):
"""
Ensure domain exists.
name
The name of the state definition
DomainName
Name of the domain.
ElasticsearchClusterConfig
Configuration options for an Elasticsearch domain. Specifies the
instance type and number of instances in the domain cluster.
InstanceType (string) --
The instance type for an Elasticsearch cluster.
InstanceCount (integer) --
The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --
A boolean value to indicate whether a dedicated master node is enabled.
See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --
A boolean value to indicate whether zone awareness is enabled. See About
Zone Awareness for more information.
DedicatedMasterType (string) --
The instance type for a dedicated master node.
DedicatedMasterCount (integer) --
Total number of dedicated master nodes, active and on standby, for the
cluster.
EBSOptions
Options to enable, disable and specify the type and size of EBS storage
volumes.
EBSEnabled (boolean) --
Specifies whether EBS-based storage is enabled.
VolumeType (string) --
Specifies the volume type for EBS-based storage.
VolumeSize (integer) --
Integer to specify the size of an EBS volume.
Iops (integer) --
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
AccessPolicies
IAM access policy
SnapshotOptions
Option to set time, in UTC format, of the daily automated snapshot.
Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --
Specifies the time, in UTC format, when the service takes a daily
automated snapshot of the specified Elasticsearch domain. Default value
is 0 hours.
AdvancedOptions
Option to allow references to indices in an HTTP request body. Must be
false when configuring access to individual sub-resources. By default,
the value is true .
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
ElasticsearchVersion
String of format X.Y to specify version for the Elasticsearch domain eg.
"1.5" or "2.3".
"""
ret = {'name': DomainName, 'result': True, 'comment': '', 'changes': {}}
if ElasticsearchClusterConfig is None:
ElasticsearchClusterConfig = {'DedicatedMasterEnabled': False, 'InstanceCount': 1, 'InstanceType': 'm3.medium.elasticsearch', 'ZoneAwarenessEnabled': False} # depends on [control=['if'], data=['ElasticsearchClusterConfig']]
if EBSOptions is None:
EBSOptions = {'EBSEnabled': False} # depends on [control=['if'], data=['EBSOptions']]
if SnapshotOptions is None:
SnapshotOptions = {'AutomatedSnapshotStartHour': 0} # depends on [control=['if'], data=['SnapshotOptions']]
if AdvancedOptions is None:
AdvancedOptions = {'rest.action.multi.allow_explicit_index': 'true'} # depends on [control=['if'], data=['AdvancedOptions']]
if Tags is None:
Tags = {} # depends on [control=['if'], data=['Tags']]
if AccessPolicies is not None and isinstance(AccessPolicies, six.string_types):
try:
AccessPolicies = salt.utils.json.loads(AccessPolicies) # depends on [control=['try'], data=[]]
except ValueError as e:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(e.message)
return ret # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
r = __salt__['boto_elasticsearch_domain.exists'](DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret # depends on [control=['if'], data=['r']]
if not r.get('exists'):
if __opts__['test']:
ret['comment'] = 'Domain {0} is set to be created.'.format(DomainName)
ret['result'] = None
return ret # depends on [control=['if'], data=[]] # future lint: disable=blacklisted-function
r = __salt__['boto_elasticsearch_domain.create'](DomainName=DomainName, ElasticsearchClusterConfig=ElasticsearchClusterConfig, EBSOptions=EBSOptions, AccessPolicies=AccessPolicies, SnapshotOptions=SnapshotOptions, AdvancedOptions=AdvancedOptions, ElasticsearchVersion=str(ElasticsearchVersion), region=region, key=key, keyid=keyid, profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret # depends on [control=['if'], data=[]]
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName, region=region, key=key, keyid=keyid, profile=profile)
ret['changes']['old'] = {'domain': None}
ret['changes']['new'] = _describe
ret['comment'] = 'Domain {0} created.'.format(DomainName)
return ret # depends on [control=['if'], data=[]]
ret['comment'] = os.linesep.join([ret['comment'], 'Domain {0} is present.'.format(DomainName)])
ret['changes'] = {}
# domain exists, ensure config matches
_status = __salt__['boto_elasticsearch_domain.status'](DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile)['domain']
if _status.get('ElasticsearchVersion') != str(ElasticsearchVersion): # future lint: disable=blacklisted-function
ret['result'] = False # future lint: disable=blacklisted-function
ret['comment'] = 'Failed to update domain: version cannot be modified from {0} to {1}.'.format(_status.get('ElasticsearchVersion'), str(ElasticsearchVersion))
return ret # depends on [control=['if'], data=[]]
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile)['domain']
_describe['AccessPolicies'] = salt.utils.json.loads(_describe['AccessPolicies'])
# When EBSEnabled is false, describe returns extra values that can't be set
if not _describe.get('EBSOptions', {}).get('EBSEnabled'):
opts = _describe.get('EBSOptions', {})
opts.pop('VolumeSize', None)
opts.pop('VolumeType', None) # depends on [control=['if'], data=[]]
comm_args = {}
need_update = False
es_opts = {'ElasticsearchClusterConfig': ElasticsearchClusterConfig, 'EBSOptions': EBSOptions, 'AccessPolicies': AccessPolicies, 'SnapshotOptions': SnapshotOptions, 'AdvancedOptions': AdvancedOptions}
for (k, v) in six.iteritems(es_opts):
if not _compare_json(v, _describe[k]):
need_update = True
comm_args[k] = v
ret['changes'].setdefault('new', {})[k] = v
ret['changes'].setdefault('old', {})[k] = _describe[k] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if need_update:
if __opts__['test']:
msg = 'Domain {0} set to be modified.'.format(DomainName)
ret['comment'] = msg
ret['result'] = None
return ret # depends on [control=['if'], data=[]]
ret['comment'] = os.linesep.join([ret['comment'], 'Domain to be modified'])
r = __salt__['boto_elasticsearch_domain.update'](DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile, **comm_args)
if not r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update domain: {0}.'.format(r['error'])
ret['changes'] = {}
return ret # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return ret |
def add_workflow(self, workflow, commit=False):
"""
Add a new workflow and optionally commit it to the database
:param workflow: The workflow
:param commit: Whether to commit the workflow to the database
:type workflow: Workflow
:type commit: bool
:return: None
"""
if workflow.workflow_id in self.workflows:
raise KeyError("Workflow with id {} already exists".format(workflow.workflow_id))
self.workflows[workflow.workflow_id] = workflow
logging.info("Added workflow {} to workflow manager".format(workflow.workflow_id))
# Optionally also save the workflow to database
if commit:
self.commit_workflow(workflow.workflow_id)
else:
self.uncommitted_workflows.add(workflow.workflow_id) | def function[add_workflow, parameter[self, workflow, commit]]:
constant[
Add a new workflow and optionally commit it to the database
:param workflow: The workflow
:param commit: Whether to commit the workflow to the database
:type workflow: Workflow
:type commit: bool
:return: None
]
if compare[name[workflow].workflow_id in name[self].workflows] begin[:]
<ast.Raise object at 0x7da207f9ac20>
call[name[self].workflows][name[workflow].workflow_id] assign[=] name[workflow]
call[name[logging].info, parameter[call[constant[Added workflow {} to workflow manager].format, parameter[name[workflow].workflow_id]]]]
if name[commit] begin[:]
call[name[self].commit_workflow, parameter[name[workflow].workflow_id]] | keyword[def] identifier[add_workflow] ( identifier[self] , identifier[workflow] , identifier[commit] = keyword[False] ):
literal[string]
keyword[if] identifier[workflow] . identifier[workflow_id] keyword[in] identifier[self] . identifier[workflows] :
keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] ( identifier[workflow] . identifier[workflow_id] ))
identifier[self] . identifier[workflows] [ identifier[workflow] . identifier[workflow_id] ]= identifier[workflow]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[workflow] . identifier[workflow_id] ))
keyword[if] identifier[commit] :
identifier[self] . identifier[commit_workflow] ( identifier[workflow] . identifier[workflow_id] )
keyword[else] :
identifier[self] . identifier[uncommitted_workflows] . identifier[add] ( identifier[workflow] . identifier[workflow_id] ) | def add_workflow(self, workflow, commit=False):
"""
Add a new workflow and optionally commit it to the database
:param workflow: The workflow
:param commit: Whether to commit the workflow to the database
:type workflow: Workflow
:type commit: bool
:return: None
"""
if workflow.workflow_id in self.workflows:
raise KeyError('Workflow with id {} already exists'.format(workflow.workflow_id)) # depends on [control=['if'], data=[]]
self.workflows[workflow.workflow_id] = workflow
logging.info('Added workflow {} to workflow manager'.format(workflow.workflow_id))
# Optionally also save the workflow to database
if commit:
self.commit_workflow(workflow.workflow_id) # depends on [control=['if'], data=[]]
else:
self.uncommitted_workflows.add(workflow.workflow_id) |
def disconnectNetToMs(Facility_presence=0, ProgressIndicator_presence=0,
UserUser_presence=0, AllowedActions_presence=0):
"""DISCONNECT Section 9.3.7.1"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x25) # 00100101
c = Cause()
packet = a / b / c
if Facility_presence is 1:
d = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
packet = packet / d
if ProgressIndicator_presence is 1:
e = ProgressIndicatorHdr(ieiPI=0x1E, eightBitPI=0x0)
packet = packet / e
if UserUser_presence is 1:
f = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
packet = packet / f
if AllowedActions_presence is 1:
g = AllowedActionsHdr(ieiAA=0x7B, eightBitAA=0x0)
packet = packet / g
return packet | def function[disconnectNetToMs, parameter[Facility_presence, ProgressIndicator_presence, UserUser_presence, AllowedActions_presence]]:
constant[DISCONNECT Section 9.3.7.1]
variable[a] assign[=] call[name[TpPd], parameter[]]
variable[b] assign[=] call[name[MessageType], parameter[]]
variable[c] assign[=] call[name[Cause], parameter[]]
variable[packet] assign[=] binary_operation[binary_operation[name[a] / name[b]] / name[c]]
if compare[name[Facility_presence] is constant[1]] begin[:]
variable[d] assign[=] call[name[FacilityHdr], parameter[]]
variable[packet] assign[=] binary_operation[name[packet] / name[d]]
if compare[name[ProgressIndicator_presence] is constant[1]] begin[:]
variable[e] assign[=] call[name[ProgressIndicatorHdr], parameter[]]
variable[packet] assign[=] binary_operation[name[packet] / name[e]]
if compare[name[UserUser_presence] is constant[1]] begin[:]
variable[f] assign[=] call[name[UserUserHdr], parameter[]]
variable[packet] assign[=] binary_operation[name[packet] / name[f]]
if compare[name[AllowedActions_presence] is constant[1]] begin[:]
variable[g] assign[=] call[name[AllowedActionsHdr], parameter[]]
variable[packet] assign[=] binary_operation[name[packet] / name[g]]
return[name[packet]] | keyword[def] identifier[disconnectNetToMs] ( identifier[Facility_presence] = literal[int] , identifier[ProgressIndicator_presence] = literal[int] ,
identifier[UserUser_presence] = literal[int] , identifier[AllowedActions_presence] = literal[int] ):
literal[string]
identifier[a] = identifier[TpPd] ( identifier[pd] = literal[int] )
identifier[b] = identifier[MessageType] ( identifier[mesType] = literal[int] )
identifier[c] = identifier[Cause] ()
identifier[packet] = identifier[a] / identifier[b] / identifier[c]
keyword[if] identifier[Facility_presence] keyword[is] literal[int] :
identifier[d] = identifier[FacilityHdr] ( identifier[ieiF] = literal[int] , identifier[eightBitF] = literal[int] )
identifier[packet] = identifier[packet] / identifier[d]
keyword[if] identifier[ProgressIndicator_presence] keyword[is] literal[int] :
identifier[e] = identifier[ProgressIndicatorHdr] ( identifier[ieiPI] = literal[int] , identifier[eightBitPI] = literal[int] )
identifier[packet] = identifier[packet] / identifier[e]
keyword[if] identifier[UserUser_presence] keyword[is] literal[int] :
identifier[f] = identifier[UserUserHdr] ( identifier[ieiUU] = literal[int] , identifier[eightBitUU] = literal[int] )
identifier[packet] = identifier[packet] / identifier[f]
keyword[if] identifier[AllowedActions_presence] keyword[is] literal[int] :
identifier[g] = identifier[AllowedActionsHdr] ( identifier[ieiAA] = literal[int] , identifier[eightBitAA] = literal[int] )
identifier[packet] = identifier[packet] / identifier[g]
keyword[return] identifier[packet] | def disconnectNetToMs(Facility_presence=0, ProgressIndicator_presence=0, UserUser_presence=0, AllowedActions_presence=0):
"""DISCONNECT Section 9.3.7.1"""
a = TpPd(pd=3)
b = MessageType(mesType=37) # 00100101
c = Cause()
packet = a / b / c
if Facility_presence is 1:
d = FacilityHdr(ieiF=28, eightBitF=0)
packet = packet / d # depends on [control=['if'], data=[]]
if ProgressIndicator_presence is 1:
e = ProgressIndicatorHdr(ieiPI=30, eightBitPI=0)
packet = packet / e # depends on [control=['if'], data=[]]
if UserUser_presence is 1:
f = UserUserHdr(ieiUU=126, eightBitUU=0)
packet = packet / f # depends on [control=['if'], data=[]]
if AllowedActions_presence is 1:
g = AllowedActionsHdr(ieiAA=123, eightBitAA=0)
packet = packet / g # depends on [control=['if'], data=[]]
return packet |
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
"""
# pylint: disable= no-member, protected-access
if stype == 'csr':
raise ValueError("cast_storage from row_sparse to csr is not supported")
return op.cast_storage(self, stype=stype) | def function[tostype, parameter[self, stype]]:
constant[Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
]
if compare[name[stype] equal[==] constant[csr]] begin[:]
<ast.Raise object at 0x7da1b20f8760>
return[call[name[op].cast_storage, parameter[name[self]]]] | keyword[def] identifier[tostype] ( identifier[self] , identifier[stype] ):
literal[string]
keyword[if] identifier[stype] == literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[op] . identifier[cast_storage] ( identifier[self] , identifier[stype] = identifier[stype] ) | def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
"""
# pylint: disable= no-member, protected-access
if stype == 'csr':
raise ValueError('cast_storage from row_sparse to csr is not supported') # depends on [control=['if'], data=[]]
return op.cast_storage(self, stype=stype) |
def geometric_fba(model, epsilon=1E-06, max_tries=200, processes=None):
"""
Perform geometric FBA to obtain a unique, centered flux distribution.
Geometric FBA [1]_ formulates the problem as a polyhedron and
then solves it by bounding the convex hull of the polyhedron.
The bounding forms a box around the convex hull which reduces
with every iteration and extracts a unique solution in this way.
Parameters
----------
model: cobra.Model
The model to perform geometric FBA on.
epsilon: float, optional
The convergence tolerance of the model (default 1E-06).
max_tries: int, optional
Maximum number of iterations (default 200).
processes : int, optional
The number of parallel processes to run. If not explicitly passed,
will be set from the global configuration singleton.
Returns
-------
cobra.Solution
The solution object containing all the constraints required
for geometric FBA.
References
----------
.. [1] Smallbone, Kieran & Simeonidis, Vangelis. (2009).
Flux balance analysis: A geometric perspective.
Journal of theoretical biology.258. 311-5.
10.1016/j.jtbi.2009.01.027.
"""
with model:
# Variables' and constraints' storage variables.
consts = []
obj_vars = []
updating_vars_cons = []
# The first iteration.
prob = model.problem
add_pfba(model) # Minimize the solution space to a convex hull.
model.optimize()
fva_sol = flux_variability_analysis(model, processes=processes)
mean_flux = (fva_sol["maximum"] + fva_sol["minimum"]).abs() / 2
# Set the gFBA constraints.
for rxn in model.reactions:
var = prob.Variable("geometric_fba_" + rxn.id,
lb=0,
ub=mean_flux[rxn.id])
upper_const = prob.Constraint(rxn.flux_expression - var,
ub=mean_flux[rxn.id],
name="geometric_fba_upper_const_" +
rxn.id)
lower_const = prob.Constraint(rxn.flux_expression + var,
lb=fva_sol.at[rxn.id, "minimum"],
name="geometric_fba_lower_const_" +
rxn.id)
updating_vars_cons.append((rxn.id, var, upper_const, lower_const))
consts.extend([var, upper_const, lower_const])
obj_vars.append(var)
model.add_cons_vars(consts)
# Minimize the distance between the flux distribution and center.
model.objective = prob.Objective(Zero, sloppy=True, direction="min")
model.objective.set_linear_coefficients({v: 1.0 for v in obj_vars})
# Update loop variables.
sol = model.optimize()
fva_sol = flux_variability_analysis(model, processes=processes)
mean_flux = (fva_sol["maximum"] + fva_sol["minimum"]).abs() / 2
delta = (fva_sol["maximum"] - fva_sol["minimum"]).max()
count = 1
LOGGER.debug("Iteration: %d; delta: %.3g; status: %s.",
count, delta, sol.status)
# Following iterations that minimize the distance below threshold.
while delta > epsilon and count < max_tries:
for rxn_id, var, u_c, l_c in updating_vars_cons:
var.ub = mean_flux[rxn_id]
u_c.ub = mean_flux[rxn_id]
l_c.lb = fva_sol.at[rxn_id, "minimum"]
# Update loop variables.
sol = model.optimize()
fva_sol = flux_variability_analysis(model, processes=processes)
mean_flux = (fva_sol["maximum"] + fva_sol["minimum"]).abs() / 2
delta = (fva_sol["maximum"] - fva_sol["minimum"]).max()
count += 1
LOGGER.debug("Iteration: %d; delta: %.3g; status: %s.",
count, delta, sol.status)
if count == max_tries:
raise RuntimeError(
"The iterations have exceeded the maximum value of {}. "
"This is probably due to the increased complexity of the "
"model and can lead to inaccurate results. Please set a "
"different convergence tolerance and/or increase the "
"maximum iterations".format(max_tries)
)
return sol | def function[geometric_fba, parameter[model, epsilon, max_tries, processes]]:
constant[
Perform geometric FBA to obtain a unique, centered flux distribution.
Geometric FBA [1]_ formulates the problem as a polyhedron and
then solves it by bounding the convex hull of the polyhedron.
The bounding forms a box around the convex hull which reduces
with every iteration and extracts a unique solution in this way.
Parameters
----------
model: cobra.Model
The model to perform geometric FBA on.
epsilon: float, optional
The convergence tolerance of the model (default 1E-06).
max_tries: int, optional
Maximum number of iterations (default 200).
processes : int, optional
The number of parallel processes to run. If not explicitly passed,
will be set from the global configuration singleton.
Returns
-------
cobra.Solution
The solution object containing all the constraints required
for geometric FBA.
References
----------
.. [1] Smallbone, Kieran & Simeonidis, Vangelis. (2009).
Flux balance analysis: A geometric perspective.
Journal of theoretical biology.258. 311-5.
10.1016/j.jtbi.2009.01.027.
]
with name[model] begin[:]
variable[consts] assign[=] list[[]]
variable[obj_vars] assign[=] list[[]]
variable[updating_vars_cons] assign[=] list[[]]
variable[prob] assign[=] name[model].problem
call[name[add_pfba], parameter[name[model]]]
call[name[model].optimize, parameter[]]
variable[fva_sol] assign[=] call[name[flux_variability_analysis], parameter[name[model]]]
variable[mean_flux] assign[=] binary_operation[call[binary_operation[call[name[fva_sol]][constant[maximum]] + call[name[fva_sol]][constant[minimum]]].abs, parameter[]] / constant[2]]
for taget[name[rxn]] in starred[name[model].reactions] begin[:]
variable[var] assign[=] call[name[prob].Variable, parameter[binary_operation[constant[geometric_fba_] + name[rxn].id]]]
variable[upper_const] assign[=] call[name[prob].Constraint, parameter[binary_operation[name[rxn].flux_expression - name[var]]]]
variable[lower_const] assign[=] call[name[prob].Constraint, parameter[binary_operation[name[rxn].flux_expression + name[var]]]]
call[name[updating_vars_cons].append, parameter[tuple[[<ast.Attribute object at 0x7da1b01c1600>, <ast.Name object at 0x7da1b01c0e20>, <ast.Name object at 0x7da1b01c27d0>, <ast.Name object at 0x7da1b01c3910>]]]]
call[name[consts].extend, parameter[list[[<ast.Name object at 0x7da1b01c35e0>, <ast.Name object at 0x7da1b01c2290>, <ast.Name object at 0x7da1b01c19c0>]]]]
call[name[obj_vars].append, parameter[name[var]]]
call[name[model].add_cons_vars, parameter[name[consts]]]
name[model].objective assign[=] call[name[prob].Objective, parameter[name[Zero]]]
call[name[model].objective.set_linear_coefficients, parameter[<ast.DictComp object at 0x7da1b01e2410>]]
variable[sol] assign[=] call[name[model].optimize, parameter[]]
variable[fva_sol] assign[=] call[name[flux_variability_analysis], parameter[name[model]]]
variable[mean_flux] assign[=] binary_operation[call[binary_operation[call[name[fva_sol]][constant[maximum]] + call[name[fva_sol]][constant[minimum]]].abs, parameter[]] / constant[2]]
variable[delta] assign[=] call[binary_operation[call[name[fva_sol]][constant[maximum]] - call[name[fva_sol]][constant[minimum]]].max, parameter[]]
variable[count] assign[=] constant[1]
call[name[LOGGER].debug, parameter[constant[Iteration: %d; delta: %.3g; status: %s.], name[count], name[delta], name[sol].status]]
while <ast.BoolOp object at 0x7da1b0003970> begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0003d60>, <ast.Name object at 0x7da1b0002aa0>, <ast.Name object at 0x7da1b0002170>, <ast.Name object at 0x7da1b0000e50>]]] in starred[name[updating_vars_cons]] begin[:]
name[var].ub assign[=] call[name[mean_flux]][name[rxn_id]]
name[u_c].ub assign[=] call[name[mean_flux]][name[rxn_id]]
name[l_c].lb assign[=] call[name[fva_sol].at][tuple[[<ast.Name object at 0x7da1b0000160>, <ast.Constant object at 0x7da1b0000070>]]]
variable[sol] assign[=] call[name[model].optimize, parameter[]]
variable[fva_sol] assign[=] call[name[flux_variability_analysis], parameter[name[model]]]
variable[mean_flux] assign[=] binary_operation[call[binary_operation[call[name[fva_sol]][constant[maximum]] + call[name[fva_sol]][constant[minimum]]].abs, parameter[]] / constant[2]]
variable[delta] assign[=] call[binary_operation[call[name[fva_sol]][constant[maximum]] - call[name[fva_sol]][constant[minimum]]].max, parameter[]]
<ast.AugAssign object at 0x7da1b0001d50>
call[name[LOGGER].debug, parameter[constant[Iteration: %d; delta: %.3g; status: %s.], name[count], name[delta], name[sol].status]]
if compare[name[count] equal[==] name[max_tries]] begin[:]
<ast.Raise object at 0x7da1b0001270>
return[name[sol]] | keyword[def] identifier[geometric_fba] ( identifier[model] , identifier[epsilon] = literal[int] , identifier[max_tries] = literal[int] , identifier[processes] = keyword[None] ):
literal[string]
keyword[with] identifier[model] :
identifier[consts] =[]
identifier[obj_vars] =[]
identifier[updating_vars_cons] =[]
identifier[prob] = identifier[model] . identifier[problem]
identifier[add_pfba] ( identifier[model] )
identifier[model] . identifier[optimize] ()
identifier[fva_sol] = identifier[flux_variability_analysis] ( identifier[model] , identifier[processes] = identifier[processes] )
identifier[mean_flux] =( identifier[fva_sol] [ literal[string] ]+ identifier[fva_sol] [ literal[string] ]). identifier[abs] ()/ literal[int]
keyword[for] identifier[rxn] keyword[in] identifier[model] . identifier[reactions] :
identifier[var] = identifier[prob] . identifier[Variable] ( literal[string] + identifier[rxn] . identifier[id] ,
identifier[lb] = literal[int] ,
identifier[ub] = identifier[mean_flux] [ identifier[rxn] . identifier[id] ])
identifier[upper_const] = identifier[prob] . identifier[Constraint] ( identifier[rxn] . identifier[flux_expression] - identifier[var] ,
identifier[ub] = identifier[mean_flux] [ identifier[rxn] . identifier[id] ],
identifier[name] = literal[string] +
identifier[rxn] . identifier[id] )
identifier[lower_const] = identifier[prob] . identifier[Constraint] ( identifier[rxn] . identifier[flux_expression] + identifier[var] ,
identifier[lb] = identifier[fva_sol] . identifier[at] [ identifier[rxn] . identifier[id] , literal[string] ],
identifier[name] = literal[string] +
identifier[rxn] . identifier[id] )
identifier[updating_vars_cons] . identifier[append] (( identifier[rxn] . identifier[id] , identifier[var] , identifier[upper_const] , identifier[lower_const] ))
identifier[consts] . identifier[extend] ([ identifier[var] , identifier[upper_const] , identifier[lower_const] ])
identifier[obj_vars] . identifier[append] ( identifier[var] )
identifier[model] . identifier[add_cons_vars] ( identifier[consts] )
identifier[model] . identifier[objective] = identifier[prob] . identifier[Objective] ( identifier[Zero] , identifier[sloppy] = keyword[True] , identifier[direction] = literal[string] )
identifier[model] . identifier[objective] . identifier[set_linear_coefficients] ({ identifier[v] : literal[int] keyword[for] identifier[v] keyword[in] identifier[obj_vars] })
identifier[sol] = identifier[model] . identifier[optimize] ()
identifier[fva_sol] = identifier[flux_variability_analysis] ( identifier[model] , identifier[processes] = identifier[processes] )
identifier[mean_flux] =( identifier[fva_sol] [ literal[string] ]+ identifier[fva_sol] [ literal[string] ]). identifier[abs] ()/ literal[int]
identifier[delta] =( identifier[fva_sol] [ literal[string] ]- identifier[fva_sol] [ literal[string] ]). identifier[max] ()
identifier[count] = literal[int]
identifier[LOGGER] . identifier[debug] ( literal[string] ,
identifier[count] , identifier[delta] , identifier[sol] . identifier[status] )
keyword[while] identifier[delta] > identifier[epsilon] keyword[and] identifier[count] < identifier[max_tries] :
keyword[for] identifier[rxn_id] , identifier[var] , identifier[u_c] , identifier[l_c] keyword[in] identifier[updating_vars_cons] :
identifier[var] . identifier[ub] = identifier[mean_flux] [ identifier[rxn_id] ]
identifier[u_c] . identifier[ub] = identifier[mean_flux] [ identifier[rxn_id] ]
identifier[l_c] . identifier[lb] = identifier[fva_sol] . identifier[at] [ identifier[rxn_id] , literal[string] ]
identifier[sol] = identifier[model] . identifier[optimize] ()
identifier[fva_sol] = identifier[flux_variability_analysis] ( identifier[model] , identifier[processes] = identifier[processes] )
identifier[mean_flux] =( identifier[fva_sol] [ literal[string] ]+ identifier[fva_sol] [ literal[string] ]). identifier[abs] ()/ literal[int]
identifier[delta] =( identifier[fva_sol] [ literal[string] ]- identifier[fva_sol] [ literal[string] ]). identifier[max] ()
identifier[count] += literal[int]
identifier[LOGGER] . identifier[debug] ( literal[string] ,
identifier[count] , identifier[delta] , identifier[sol] . identifier[status] )
keyword[if] identifier[count] == identifier[max_tries] :
keyword[raise] identifier[RuntimeError] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[max_tries] )
)
keyword[return] identifier[sol] | def geometric_fba(model, epsilon=1e-06, max_tries=200, processes=None):
"""
Perform geometric FBA to obtain a unique, centered flux distribution.
Geometric FBA [1]_ formulates the problem as a polyhedron and
then solves it by bounding the convex hull of the polyhedron.
The bounding forms a box around the convex hull which reduces
with every iteration and extracts a unique solution in this way.
Parameters
----------
model: cobra.Model
The model to perform geometric FBA on.
epsilon: float, optional
The convergence tolerance of the model (default 1E-06).
max_tries: int, optional
Maximum number of iterations (default 200).
processes : int, optional
The number of parallel processes to run. If not explicitly passed,
will be set from the global configuration singleton.
Returns
-------
cobra.Solution
The solution object containing all the constraints required
for geometric FBA.
References
----------
.. [1] Smallbone, Kieran & Simeonidis, Vangelis. (2009).
Flux balance analysis: A geometric perspective.
Journal of theoretical biology.258. 311-5.
10.1016/j.jtbi.2009.01.027.
"""
with model:
# Variables' and constraints' storage variables.
consts = []
obj_vars = []
updating_vars_cons = []
# The first iteration.
prob = model.problem
add_pfba(model) # Minimize the solution space to a convex hull.
model.optimize()
fva_sol = flux_variability_analysis(model, processes=processes)
mean_flux = (fva_sol['maximum'] + fva_sol['minimum']).abs() / 2
# Set the gFBA constraints.
for rxn in model.reactions:
var = prob.Variable('geometric_fba_' + rxn.id, lb=0, ub=mean_flux[rxn.id])
upper_const = prob.Constraint(rxn.flux_expression - var, ub=mean_flux[rxn.id], name='geometric_fba_upper_const_' + rxn.id)
lower_const = prob.Constraint(rxn.flux_expression + var, lb=fva_sol.at[rxn.id, 'minimum'], name='geometric_fba_lower_const_' + rxn.id)
updating_vars_cons.append((rxn.id, var, upper_const, lower_const))
consts.extend([var, upper_const, lower_const])
obj_vars.append(var) # depends on [control=['for'], data=['rxn']]
model.add_cons_vars(consts)
# Minimize the distance between the flux distribution and center.
model.objective = prob.Objective(Zero, sloppy=True, direction='min')
model.objective.set_linear_coefficients({v: 1.0 for v in obj_vars})
# Update loop variables.
sol = model.optimize()
fva_sol = flux_variability_analysis(model, processes=processes)
mean_flux = (fva_sol['maximum'] + fva_sol['minimum']).abs() / 2
delta = (fva_sol['maximum'] - fva_sol['minimum']).max()
count = 1
LOGGER.debug('Iteration: %d; delta: %.3g; status: %s.', count, delta, sol.status)
# Following iterations that minimize the distance below threshold.
while delta > epsilon and count < max_tries:
for (rxn_id, var, u_c, l_c) in updating_vars_cons:
var.ub = mean_flux[rxn_id]
u_c.ub = mean_flux[rxn_id]
l_c.lb = fva_sol.at[rxn_id, 'minimum'] # depends on [control=['for'], data=[]]
# Update loop variables.
sol = model.optimize()
fva_sol = flux_variability_analysis(model, processes=processes)
mean_flux = (fva_sol['maximum'] + fva_sol['minimum']).abs() / 2
delta = (fva_sol['maximum'] - fva_sol['minimum']).max()
count += 1
LOGGER.debug('Iteration: %d; delta: %.3g; status: %s.', count, delta, sol.status) # depends on [control=['while'], data=[]]
if count == max_tries:
raise RuntimeError('The iterations have exceeded the maximum value of {}. This is probably due to the increased complexity of the model and can lead to inaccurate results. Please set a different convergence tolerance and/or increase the maximum iterations'.format(max_tries)) # depends on [control=['if'], data=['max_tries']] # depends on [control=['with'], data=[]]
return sol |
def etree_to_dict(tree):
"""Translate etree into dictionary.
:param tree: etree dictionary object
:type tree: <http://lxml.de/api/lxml.etree-module.html>
"""
d = {tree.tag.split('}')[1]: map(
etree_to_dict, tree.iterchildren()
) or tree.text}
return d | def function[etree_to_dict, parameter[tree]]:
constant[Translate etree into dictionary.
:param tree: etree dictionary object
:type tree: <http://lxml.de/api/lxml.etree-module.html>
]
variable[d] assign[=] dictionary[[<ast.Subscript object at 0x7da20c795e70>], [<ast.BoolOp object at 0x7da204347010>]]
return[name[d]] | keyword[def] identifier[etree_to_dict] ( identifier[tree] ):
literal[string]
identifier[d] ={ identifier[tree] . identifier[tag] . identifier[split] ( literal[string] )[ literal[int] ]: identifier[map] (
identifier[etree_to_dict] , identifier[tree] . identifier[iterchildren] ()
) keyword[or] identifier[tree] . identifier[text] }
keyword[return] identifier[d] | def etree_to_dict(tree):
"""Translate etree into dictionary.
:param tree: etree dictionary object
:type tree: <http://lxml.de/api/lxml.etree-module.html>
"""
d = {tree.tag.split('}')[1]: map(etree_to_dict, tree.iterchildren()) or tree.text}
return d |
def convert_column(self, values):
"""Normalize values."""
assert all(values >= 0), 'Cannot normalize a column with negatives'
total = sum(values)
if total > 0:
return values / total
else:
return values | def function[convert_column, parameter[self, values]]:
constant[Normalize values.]
assert[call[name[all], parameter[compare[name[values] greater_or_equal[>=] constant[0]]]]]
variable[total] assign[=] call[name[sum], parameter[name[values]]]
if compare[name[total] greater[>] constant[0]] begin[:]
return[binary_operation[name[values] / name[total]]] | keyword[def] identifier[convert_column] ( identifier[self] , identifier[values] ):
literal[string]
keyword[assert] identifier[all] ( identifier[values] >= literal[int] ), literal[string]
identifier[total] = identifier[sum] ( identifier[values] )
keyword[if] identifier[total] > literal[int] :
keyword[return] identifier[values] / identifier[total]
keyword[else] :
keyword[return] identifier[values] | def convert_column(self, values):
"""Normalize values."""
assert all(values >= 0), 'Cannot normalize a column with negatives'
total = sum(values)
if total > 0:
return values / total # depends on [control=['if'], data=['total']]
else:
return values |
async def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
"""
buff = b''
while True:
try:
packet_header = await self._read_bytes(4)
except asyncio.CancelledError:
self._close_on_cancel()
raise
btrl, btrh, packet_number = struct.unpack(
'<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
# Outbound and inbound packets are numbered sequentialy, so
# we increment in both write_packet and read_packet. The count
# is reset at new COMMAND PHASE.
if packet_number != self._next_seq_id:
raise InternalError(
"Packet sequence number wrong - got %d expected %d" %
(packet_number, self._next_seq_id))
self._next_seq_id = (self._next_seq_id + 1) % 256
try:
recv_data = await self._read_bytes(bytes_to_read)
except asyncio.CancelledError:
self._close_on_cancel()
raise
buff += recv_data
# https://dev.mysql.com/doc/internals/en/sending-more-than-16mbyte.html
if bytes_to_read == 0xffffff:
continue
if bytes_to_read < MAX_PACKET_LEN:
break
packet = packet_type(buff, self._encoding)
packet.check_error()
return packet | <ast.AsyncFunctionDef object at 0x7da1b1d8d7e0> | keyword[async] keyword[def] identifier[_read_packet] ( identifier[self] , identifier[packet_type] = identifier[MysqlPacket] ):
literal[string]
identifier[buff] = literal[string]
keyword[while] keyword[True] :
keyword[try] :
identifier[packet_header] = keyword[await] identifier[self] . identifier[_read_bytes] ( literal[int] )
keyword[except] identifier[asyncio] . identifier[CancelledError] :
identifier[self] . identifier[_close_on_cancel] ()
keyword[raise]
identifier[btrl] , identifier[btrh] , identifier[packet_number] = identifier[struct] . identifier[unpack] (
literal[string] , identifier[packet_header] )
identifier[bytes_to_read] = identifier[btrl] +( identifier[btrh] << literal[int] )
keyword[if] identifier[packet_number] != identifier[self] . identifier[_next_seq_id] :
keyword[raise] identifier[InternalError] (
literal[string] %
( identifier[packet_number] , identifier[self] . identifier[_next_seq_id] ))
identifier[self] . identifier[_next_seq_id] =( identifier[self] . identifier[_next_seq_id] + literal[int] )% literal[int]
keyword[try] :
identifier[recv_data] = keyword[await] identifier[self] . identifier[_read_bytes] ( identifier[bytes_to_read] )
keyword[except] identifier[asyncio] . identifier[CancelledError] :
identifier[self] . identifier[_close_on_cancel] ()
keyword[raise]
identifier[buff] += identifier[recv_data]
keyword[if] identifier[bytes_to_read] == literal[int] :
keyword[continue]
keyword[if] identifier[bytes_to_read] < identifier[MAX_PACKET_LEN] :
keyword[break]
identifier[packet] = identifier[packet_type] ( identifier[buff] , identifier[self] . identifier[_encoding] )
identifier[packet] . identifier[check_error] ()
keyword[return] identifier[packet] | async def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
"""
buff = b''
while True:
try:
packet_header = await self._read_bytes(4) # depends on [control=['try'], data=[]]
except asyncio.CancelledError:
self._close_on_cancel()
raise # depends on [control=['except'], data=[]]
(btrl, btrh, packet_number) = struct.unpack('<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
# Outbound and inbound packets are numbered sequentialy, so
# we increment in both write_packet and read_packet. The count
# is reset at new COMMAND PHASE.
if packet_number != self._next_seq_id:
raise InternalError('Packet sequence number wrong - got %d expected %d' % (packet_number, self._next_seq_id)) # depends on [control=['if'], data=['packet_number']]
self._next_seq_id = (self._next_seq_id + 1) % 256
try:
recv_data = await self._read_bytes(bytes_to_read) # depends on [control=['try'], data=[]]
except asyncio.CancelledError:
self._close_on_cancel()
raise # depends on [control=['except'], data=[]]
buff += recv_data
# https://dev.mysql.com/doc/internals/en/sending-more-than-16mbyte.html
if bytes_to_read == 16777215:
continue # depends on [control=['if'], data=[]]
if bytes_to_read < MAX_PACKET_LEN:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
packet = packet_type(buff, self._encoding)
packet.check_error()
return packet |
def _set_src_ip_any(self, v, load=False):
"""
Setter method for src_ip_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/src_ip_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_ip_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_ip_any() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="src-ip-any", rest_name="src-ip-any", parent=self, choice=(u'choice-src-ip', u'case-src-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src ip address: any', u'display-when': u'(../dst-ip-host) or (../dst-ip) or (../dst-ip-any)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """src_ip_any must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="src-ip-any", rest_name="src-ip-any", parent=self, choice=(u'choice-src-ip', u'case-src-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src ip address: any', u'display-when': u'(../dst-ip-host) or (../dst-ip) or (../dst-ip-any)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)""",
})
self.__src_ip_any = t
if hasattr(self, '_set'):
self._set() | def function[_set_src_ip_any, parameter[self, v, load]]:
constant[
Setter method for src_ip_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/src_ip_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_ip_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_ip_any() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da2041da020>
name[self].__src_ip_any assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_src_ip_any] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[YANGBool] , identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[choice] =( literal[string] , literal[string] ), identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__src_ip_any] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_src_ip_any(self, v, load=False):
"""
Setter method for src_ip_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/src_ip_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_ip_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_ip_any() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=YANGBool, is_leaf=True, yang_name='src-ip-any', rest_name='src-ip-any', parent=self, choice=(u'choice-src-ip', u'case-src-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src ip address: any', u'display-when': u'(../dst-ip-host) or (../dst-ip) or (../dst-ip-any)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'src_ip_any must be of a type compatible with empty', 'defined-type': 'empty', 'generated-type': 'YANGDynClass(base=YANGBool, is_leaf=True, yang_name="src-ip-any", rest_name="src-ip-any", parent=self, choice=(u\'choice-src-ip\', u\'case-src-ip-any\'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'src ip address: any\', u\'display-when\': u\'(../dst-ip-host) or (../dst-ip) or (../dst-ip-any)\', u\'cli-incomplete-command\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-vxlan-visibility\', defining_module=\'brocade-vxlan-visibility\', yang_type=\'empty\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__src_ip_any = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def forum_topic_delete(self, topic_id):
"""Delete a topic (Login Requires) (Moderator+) (UNTESTED).
Parameters:
topic_id (int): Where topic_id is the topic id.
"""
return self._get('forum_topics/{0}.json'.format(topic_id),
method='DELETE', auth=True) | def function[forum_topic_delete, parameter[self, topic_id]]:
constant[Delete a topic (Login Requires) (Moderator+) (UNTESTED).
Parameters:
topic_id (int): Where topic_id is the topic id.
]
return[call[name[self]._get, parameter[call[constant[forum_topics/{0}.json].format, parameter[name[topic_id]]]]]] | keyword[def] identifier[forum_topic_delete] ( identifier[self] , identifier[topic_id] ):
literal[string]
keyword[return] identifier[self] . identifier[_get] ( literal[string] . identifier[format] ( identifier[topic_id] ),
identifier[method] = literal[string] , identifier[auth] = keyword[True] ) | def forum_topic_delete(self, topic_id):
"""Delete a topic (Login Requires) (Moderator+) (UNTESTED).
Parameters:
topic_id (int): Where topic_id is the topic id.
"""
return self._get('forum_topics/{0}.json'.format(topic_id), method='DELETE', auth=True) |
def get_onchain_locksroots(
chain: 'BlockChainService',
canonical_identifier: CanonicalIdentifier,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
) -> Tuple[Locksroot, Locksroot]:
"""Return the locksroot for `participant1` and `participant2` at `block_identifier`."""
payment_channel = chain.payment_channel(canonical_identifier=canonical_identifier)
token_network = payment_channel.token_network
# This will not raise RaidenRecoverableError because we are providing the channel_identifier
participants_details = token_network.detail_participants(
participant1=participant1,
participant2=participant2,
channel_identifier=canonical_identifier.channel_identifier,
block_identifier=block_identifier,
)
our_details = participants_details.our_details
our_locksroot = our_details.locksroot
partner_details = participants_details.partner_details
partner_locksroot = partner_details.locksroot
return our_locksroot, partner_locksroot | def function[get_onchain_locksroots, parameter[chain, canonical_identifier, participant1, participant2, block_identifier]]:
constant[Return the locksroot for `participant1` and `participant2` at `block_identifier`.]
variable[payment_channel] assign[=] call[name[chain].payment_channel, parameter[]]
variable[token_network] assign[=] name[payment_channel].token_network
variable[participants_details] assign[=] call[name[token_network].detail_participants, parameter[]]
variable[our_details] assign[=] name[participants_details].our_details
variable[our_locksroot] assign[=] name[our_details].locksroot
variable[partner_details] assign[=] name[participants_details].partner_details
variable[partner_locksroot] assign[=] name[partner_details].locksroot
return[tuple[[<ast.Name object at 0x7da1b170a020>, <ast.Name object at 0x7da1b170a050>]]] | keyword[def] identifier[get_onchain_locksroots] (
identifier[chain] : literal[string] ,
identifier[canonical_identifier] : identifier[CanonicalIdentifier] ,
identifier[participant1] : identifier[Address] ,
identifier[participant2] : identifier[Address] ,
identifier[block_identifier] : identifier[BlockSpecification] ,
)-> identifier[Tuple] [ identifier[Locksroot] , identifier[Locksroot] ]:
literal[string]
identifier[payment_channel] = identifier[chain] . identifier[payment_channel] ( identifier[canonical_identifier] = identifier[canonical_identifier] )
identifier[token_network] = identifier[payment_channel] . identifier[token_network]
identifier[participants_details] = identifier[token_network] . identifier[detail_participants] (
identifier[participant1] = identifier[participant1] ,
identifier[participant2] = identifier[participant2] ,
identifier[channel_identifier] = identifier[canonical_identifier] . identifier[channel_identifier] ,
identifier[block_identifier] = identifier[block_identifier] ,
)
identifier[our_details] = identifier[participants_details] . identifier[our_details]
identifier[our_locksroot] = identifier[our_details] . identifier[locksroot]
identifier[partner_details] = identifier[participants_details] . identifier[partner_details]
identifier[partner_locksroot] = identifier[partner_details] . identifier[locksroot]
keyword[return] identifier[our_locksroot] , identifier[partner_locksroot] | def get_onchain_locksroots(chain: 'BlockChainService', canonical_identifier: CanonicalIdentifier, participant1: Address, participant2: Address, block_identifier: BlockSpecification) -> Tuple[Locksroot, Locksroot]:
"""Return the locksroot for `participant1` and `participant2` at `block_identifier`."""
payment_channel = chain.payment_channel(canonical_identifier=canonical_identifier)
token_network = payment_channel.token_network
# This will not raise RaidenRecoverableError because we are providing the channel_identifier
participants_details = token_network.detail_participants(participant1=participant1, participant2=participant2, channel_identifier=canonical_identifier.channel_identifier, block_identifier=block_identifier)
our_details = participants_details.our_details
our_locksroot = our_details.locksroot
partner_details = participants_details.partner_details
partner_locksroot = partner_details.locksroot
return (our_locksroot, partner_locksroot) |
def Bernoulli(prob_true: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of probTrue to
a matching shaped Bernoulli.
:param prob_true: probTrue with same shape as desired Bernoulli tensor or scalar
"""
return Boolean(context.jvm_view().BernoulliVertex, label, cast_to_double_vertex(prob_true)) | def function[Bernoulli, parameter[prob_true, label]]:
constant[
One to one constructor for mapping some shape of probTrue to
a matching shaped Bernoulli.
:param prob_true: probTrue with same shape as desired Bernoulli tensor or scalar
]
return[call[name[Boolean], parameter[call[name[context].jvm_view, parameter[]].BernoulliVertex, name[label], call[name[cast_to_double_vertex], parameter[name[prob_true]]]]]] | keyword[def] identifier[Bernoulli] ( identifier[prob_true] : identifier[vertex_constructor_param_types] , identifier[label] : identifier[Optional] [ identifier[str] ]= keyword[None] )-> identifier[Vertex] :
literal[string]
keyword[return] identifier[Boolean] ( identifier[context] . identifier[jvm_view] (). identifier[BernoulliVertex] , identifier[label] , identifier[cast_to_double_vertex] ( identifier[prob_true] )) | def Bernoulli(prob_true: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of probTrue to
a matching shaped Bernoulli.
:param prob_true: probTrue with same shape as desired Bernoulli tensor or scalar
"""
return Boolean(context.jvm_view().BernoulliVertex, label, cast_to_double_vertex(prob_true)) |
def get_queryset(self, request):
"""Limit Pages to those that belong to the request's user."""
qs = super(VISADeviceAdmin, self).get_queryset(request)
return qs.filter(protocol_id=PROTOCOL_ID) | def function[get_queryset, parameter[self, request]]:
constant[Limit Pages to those that belong to the request's user.]
variable[qs] assign[=] call[call[name[super], parameter[name[VISADeviceAdmin], name[self]]].get_queryset, parameter[name[request]]]
return[call[name[qs].filter, parameter[]]] | keyword[def] identifier[get_queryset] ( identifier[self] , identifier[request] ):
literal[string]
identifier[qs] = identifier[super] ( identifier[VISADeviceAdmin] , identifier[self] ). identifier[get_queryset] ( identifier[request] )
keyword[return] identifier[qs] . identifier[filter] ( identifier[protocol_id] = identifier[PROTOCOL_ID] ) | def get_queryset(self, request):
"""Limit Pages to those that belong to the request's user."""
qs = super(VISADeviceAdmin, self).get_queryset(request)
return qs.filter(protocol_id=PROTOCOL_ID) |
def posr(string, substr, start):
"""
Find the first occurrence in a string of a substring, starting at
a specified location, searching backward.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/posr_c.html
:param string: Any character string.
:type string: str
:param substr: Substring to locate in the character string.
:type substr: str
:param start: Position to begin looking for substr in string.
:type start: int
:return:
The index of the last occurrence of substr
in string at or preceding index start.
:rtype: int
"""
string = stypes.stringToCharP(string)
substr = stypes.stringToCharP(substr)
start = ctypes.c_int(start)
return libspice.posr_c(string, substr, start) | def function[posr, parameter[string, substr, start]]:
constant[
Find the first occurrence in a string of a substring, starting at
a specified location, searching backward.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/posr_c.html
:param string: Any character string.
:type string: str
:param substr: Substring to locate in the character string.
:type substr: str
:param start: Position to begin looking for substr in string.
:type start: int
:return:
The index of the last occurrence of substr
in string at or preceding index start.
:rtype: int
]
variable[string] assign[=] call[name[stypes].stringToCharP, parameter[name[string]]]
variable[substr] assign[=] call[name[stypes].stringToCharP, parameter[name[substr]]]
variable[start] assign[=] call[name[ctypes].c_int, parameter[name[start]]]
return[call[name[libspice].posr_c, parameter[name[string], name[substr], name[start]]]] | keyword[def] identifier[posr] ( identifier[string] , identifier[substr] , identifier[start] ):
literal[string]
identifier[string] = identifier[stypes] . identifier[stringToCharP] ( identifier[string] )
identifier[substr] = identifier[stypes] . identifier[stringToCharP] ( identifier[substr] )
identifier[start] = identifier[ctypes] . identifier[c_int] ( identifier[start] )
keyword[return] identifier[libspice] . identifier[posr_c] ( identifier[string] , identifier[substr] , identifier[start] ) | def posr(string, substr, start):
"""
Find the first occurrence in a string of a substring, starting at
a specified location, searching backward.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/posr_c.html
:param string: Any character string.
:type string: str
:param substr: Substring to locate in the character string.
:type substr: str
:param start: Position to begin looking for substr in string.
:type start: int
:return:
The index of the last occurrence of substr
in string at or preceding index start.
:rtype: int
"""
string = stypes.stringToCharP(string)
substr = stypes.stringToCharP(substr)
start = ctypes.c_int(start)
return libspice.posr_c(string, substr, start) |
def update_record(self, msg_id, rec):
"""Update the data in an existing record."""
query = "UPDATE %s SET "%self.table
sets = []
keys = sorted(rec.keys())
values = []
for key in keys:
sets.append('%s = ?'%key)
values.append(rec[key])
query += ', '.join(sets)
query += ' WHERE msg_id == ?'
values.append(msg_id)
self._db.execute(query, values) | def function[update_record, parameter[self, msg_id, rec]]:
constant[Update the data in an existing record.]
variable[query] assign[=] binary_operation[constant[UPDATE %s SET ] <ast.Mod object at 0x7da2590d6920> name[self].table]
variable[sets] assign[=] list[[]]
variable[keys] assign[=] call[name[sorted], parameter[call[name[rec].keys, parameter[]]]]
variable[values] assign[=] list[[]]
for taget[name[key]] in starred[name[keys]] begin[:]
call[name[sets].append, parameter[binary_operation[constant[%s = ?] <ast.Mod object at 0x7da2590d6920> name[key]]]]
call[name[values].append, parameter[call[name[rec]][name[key]]]]
<ast.AugAssign object at 0x7da1b021d3f0>
<ast.AugAssign object at 0x7da1b021c130>
call[name[values].append, parameter[name[msg_id]]]
call[name[self]._db.execute, parameter[name[query], name[values]]] | keyword[def] identifier[update_record] ( identifier[self] , identifier[msg_id] , identifier[rec] ):
literal[string]
identifier[query] = literal[string] % identifier[self] . identifier[table]
identifier[sets] =[]
identifier[keys] = identifier[sorted] ( identifier[rec] . identifier[keys] ())
identifier[values] =[]
keyword[for] identifier[key] keyword[in] identifier[keys] :
identifier[sets] . identifier[append] ( literal[string] % identifier[key] )
identifier[values] . identifier[append] ( identifier[rec] [ identifier[key] ])
identifier[query] += literal[string] . identifier[join] ( identifier[sets] )
identifier[query] += literal[string]
identifier[values] . identifier[append] ( identifier[msg_id] )
identifier[self] . identifier[_db] . identifier[execute] ( identifier[query] , identifier[values] ) | def update_record(self, msg_id, rec):
"""Update the data in an existing record."""
query = 'UPDATE %s SET ' % self.table
sets = []
keys = sorted(rec.keys())
values = []
for key in keys:
sets.append('%s = ?' % key)
values.append(rec[key]) # depends on [control=['for'], data=['key']]
query += ', '.join(sets)
query += ' WHERE msg_id == ?'
values.append(msg_id)
self._db.execute(query, values) |
def _exponential_timeout_generator(initial, maximum, multiplier, deadline):
"""A generator that yields exponential timeout values.
Args:
initial (float): The initial timeout.
maximum (float): The maximum timeout.
multiplier (float): The multiplier applied to the timeout.
deadline (float): The overall deadline across all invocations.
Yields:
float: A timeout value.
"""
if deadline is not None:
deadline_datetime = datetime_helpers.utcnow() + datetime.timedelta(
seconds=deadline
)
else:
deadline_datetime = datetime.datetime.max
timeout = initial
while True:
now = datetime_helpers.utcnow()
yield min(
# The calculated timeout based on invocations.
timeout,
# The set maximum timeout.
maximum,
# The remaining time before the deadline is reached.
float((deadline_datetime - now).seconds),
)
timeout = timeout * multiplier | def function[_exponential_timeout_generator, parameter[initial, maximum, multiplier, deadline]]:
constant[A generator that yields exponential timeout values.
Args:
initial (float): The initial timeout.
maximum (float): The maximum timeout.
multiplier (float): The multiplier applied to the timeout.
deadline (float): The overall deadline across all invocations.
Yields:
float: A timeout value.
]
if compare[name[deadline] is_not constant[None]] begin[:]
variable[deadline_datetime] assign[=] binary_operation[call[name[datetime_helpers].utcnow, parameter[]] + call[name[datetime].timedelta, parameter[]]]
variable[timeout] assign[=] name[initial]
while constant[True] begin[:]
variable[now] assign[=] call[name[datetime_helpers].utcnow, parameter[]]
<ast.Yield object at 0x7da207f00640>
variable[timeout] assign[=] binary_operation[name[timeout] * name[multiplier]] | keyword[def] identifier[_exponential_timeout_generator] ( identifier[initial] , identifier[maximum] , identifier[multiplier] , identifier[deadline] ):
literal[string]
keyword[if] identifier[deadline] keyword[is] keyword[not] keyword[None] :
identifier[deadline_datetime] = identifier[datetime_helpers] . identifier[utcnow] ()+ identifier[datetime] . identifier[timedelta] (
identifier[seconds] = identifier[deadline]
)
keyword[else] :
identifier[deadline_datetime] = identifier[datetime] . identifier[datetime] . identifier[max]
identifier[timeout] = identifier[initial]
keyword[while] keyword[True] :
identifier[now] = identifier[datetime_helpers] . identifier[utcnow] ()
keyword[yield] identifier[min] (
identifier[timeout] ,
identifier[maximum] ,
identifier[float] (( identifier[deadline_datetime] - identifier[now] ). identifier[seconds] ),
)
identifier[timeout] = identifier[timeout] * identifier[multiplier] | def _exponential_timeout_generator(initial, maximum, multiplier, deadline):
"""A generator that yields exponential timeout values.
Args:
initial (float): The initial timeout.
maximum (float): The maximum timeout.
multiplier (float): The multiplier applied to the timeout.
deadline (float): The overall deadline across all invocations.
Yields:
float: A timeout value.
"""
if deadline is not None:
deadline_datetime = datetime_helpers.utcnow() + datetime.timedelta(seconds=deadline) # depends on [control=['if'], data=['deadline']]
else:
deadline_datetime = datetime.datetime.max
timeout = initial
while True:
now = datetime_helpers.utcnow()
# The calculated timeout based on invocations.
# The set maximum timeout.
# The remaining time before the deadline is reached.
yield min(timeout, maximum, float((deadline_datetime - now).seconds))
timeout = timeout * multiplier # depends on [control=['while'], data=[]] |
def create(cls, *operands, **kwargs):
"""Instantiate the product while applying simplification rules"""
converted_operands = []
for op in operands:
if not isinstance(op, Scalar):
op = ScalarValue.create(op)
converted_operands.append(op)
return super().create(*converted_operands, **kwargs) | def function[create, parameter[cls]]:
constant[Instantiate the product while applying simplification rules]
variable[converted_operands] assign[=] list[[]]
for taget[name[op]] in starred[name[operands]] begin[:]
if <ast.UnaryOp object at 0x7da20c6abcd0> begin[:]
variable[op] assign[=] call[name[ScalarValue].create, parameter[name[op]]]
call[name[converted_operands].append, parameter[name[op]]]
return[call[call[name[super], parameter[]].create, parameter[<ast.Starred object at 0x7da20c7959f0>]]] | keyword[def] identifier[create] ( identifier[cls] ,* identifier[operands] ,** identifier[kwargs] ):
literal[string]
identifier[converted_operands] =[]
keyword[for] identifier[op] keyword[in] identifier[operands] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[op] , identifier[Scalar] ):
identifier[op] = identifier[ScalarValue] . identifier[create] ( identifier[op] )
identifier[converted_operands] . identifier[append] ( identifier[op] )
keyword[return] identifier[super] (). identifier[create] (* identifier[converted_operands] ,** identifier[kwargs] ) | def create(cls, *operands, **kwargs):
"""Instantiate the product while applying simplification rules"""
converted_operands = []
for op in operands:
if not isinstance(op, Scalar):
op = ScalarValue.create(op) # depends on [control=['if'], data=[]]
converted_operands.append(op) # depends on [control=['for'], data=['op']]
return super().create(*converted_operands, **kwargs) |
def _get_foreign_keys_in_altered_table(self, diff):
"""
:param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: dict
"""
foreign_keys = diff.from_table.get_foreign_keys()
column_names = self._get_column_names_in_altered_table(diff)
for key, constraint in foreign_keys.items():
changed = False
local_columns = []
for column_name in constraint.get_local_columns():
normalized_column_name = column_name.lower()
if normalized_column_name not in column_names:
del foreign_keys[key]
break
else:
local_columns.append(column_names[normalized_column_name])
if column_name != column_names[normalized_column_name]:
changed = True
if changed:
foreign_keys[key] = ForeignKeyConstraint(
local_columns,
constraint.get_foreign_table_name(),
constraint.get_foreign_columns(),
constraint.get_name(),
constraint.get_options(),
)
for constraint in diff.removed_foreign_keys:
constraint_name = constraint.get_name().lower()
if constraint_name and constraint_name in foreign_keys:
del foreign_keys[constraint_name]
foreign_keys_diff = diff.changed_foreign_keys + diff.added_foreign_keys
for constraint in foreign_keys_diff:
constraint_name = constraint.get_name().lower()
if constraint_name:
foreign_keys[constraint_name] = constraint
else:
foreign_keys[len(foreign_keys)] = constraint
return foreign_keys | def function[_get_foreign_keys_in_altered_table, parameter[self, diff]]:
constant[
:param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: dict
]
variable[foreign_keys] assign[=] call[name[diff].from_table.get_foreign_keys, parameter[]]
variable[column_names] assign[=] call[name[self]._get_column_names_in_altered_table, parameter[name[diff]]]
for taget[tuple[[<ast.Name object at 0x7da20c7c8fa0>, <ast.Name object at 0x7da20c7c9810>]]] in starred[call[name[foreign_keys].items, parameter[]]] begin[:]
variable[changed] assign[=] constant[False]
variable[local_columns] assign[=] list[[]]
for taget[name[column_name]] in starred[call[name[constraint].get_local_columns, parameter[]]] begin[:]
variable[normalized_column_name] assign[=] call[name[column_name].lower, parameter[]]
if compare[name[normalized_column_name] <ast.NotIn object at 0x7da2590d7190> name[column_names]] begin[:]
<ast.Delete object at 0x7da20c7c9de0>
break
if name[changed] begin[:]
call[name[foreign_keys]][name[key]] assign[=] call[name[ForeignKeyConstraint], parameter[name[local_columns], call[name[constraint].get_foreign_table_name, parameter[]], call[name[constraint].get_foreign_columns, parameter[]], call[name[constraint].get_name, parameter[]], call[name[constraint].get_options, parameter[]]]]
for taget[name[constraint]] in starred[name[diff].removed_foreign_keys] begin[:]
variable[constraint_name] assign[=] call[call[name[constraint].get_name, parameter[]].lower, parameter[]]
if <ast.BoolOp object at 0x7da18eb56620> begin[:]
<ast.Delete object at 0x7da18eb54700>
variable[foreign_keys_diff] assign[=] binary_operation[name[diff].changed_foreign_keys + name[diff].added_foreign_keys]
for taget[name[constraint]] in starred[name[foreign_keys_diff]] begin[:]
variable[constraint_name] assign[=] call[call[name[constraint].get_name, parameter[]].lower, parameter[]]
if name[constraint_name] begin[:]
call[name[foreign_keys]][name[constraint_name]] assign[=] name[constraint]
return[name[foreign_keys]] | keyword[def] identifier[_get_foreign_keys_in_altered_table] ( identifier[self] , identifier[diff] ):
literal[string]
identifier[foreign_keys] = identifier[diff] . identifier[from_table] . identifier[get_foreign_keys] ()
identifier[column_names] = identifier[self] . identifier[_get_column_names_in_altered_table] ( identifier[diff] )
keyword[for] identifier[key] , identifier[constraint] keyword[in] identifier[foreign_keys] . identifier[items] ():
identifier[changed] = keyword[False]
identifier[local_columns] =[]
keyword[for] identifier[column_name] keyword[in] identifier[constraint] . identifier[get_local_columns] ():
identifier[normalized_column_name] = identifier[column_name] . identifier[lower] ()
keyword[if] identifier[normalized_column_name] keyword[not] keyword[in] identifier[column_names] :
keyword[del] identifier[foreign_keys] [ identifier[key] ]
keyword[break]
keyword[else] :
identifier[local_columns] . identifier[append] ( identifier[column_names] [ identifier[normalized_column_name] ])
keyword[if] identifier[column_name] != identifier[column_names] [ identifier[normalized_column_name] ]:
identifier[changed] = keyword[True]
keyword[if] identifier[changed] :
identifier[foreign_keys] [ identifier[key] ]= identifier[ForeignKeyConstraint] (
identifier[local_columns] ,
identifier[constraint] . identifier[get_foreign_table_name] (),
identifier[constraint] . identifier[get_foreign_columns] (),
identifier[constraint] . identifier[get_name] (),
identifier[constraint] . identifier[get_options] (),
)
keyword[for] identifier[constraint] keyword[in] identifier[diff] . identifier[removed_foreign_keys] :
identifier[constraint_name] = identifier[constraint] . identifier[get_name] (). identifier[lower] ()
keyword[if] identifier[constraint_name] keyword[and] identifier[constraint_name] keyword[in] identifier[foreign_keys] :
keyword[del] identifier[foreign_keys] [ identifier[constraint_name] ]
identifier[foreign_keys_diff] = identifier[diff] . identifier[changed_foreign_keys] + identifier[diff] . identifier[added_foreign_keys]
keyword[for] identifier[constraint] keyword[in] identifier[foreign_keys_diff] :
identifier[constraint_name] = identifier[constraint] . identifier[get_name] (). identifier[lower] ()
keyword[if] identifier[constraint_name] :
identifier[foreign_keys] [ identifier[constraint_name] ]= identifier[constraint]
keyword[else] :
identifier[foreign_keys] [ identifier[len] ( identifier[foreign_keys] )]= identifier[constraint]
keyword[return] identifier[foreign_keys] | def _get_foreign_keys_in_altered_table(self, diff):
"""
:param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: dict
"""
foreign_keys = diff.from_table.get_foreign_keys()
column_names = self._get_column_names_in_altered_table(diff)
for (key, constraint) in foreign_keys.items():
changed = False
local_columns = []
for column_name in constraint.get_local_columns():
normalized_column_name = column_name.lower()
if normalized_column_name not in column_names:
del foreign_keys[key]
break # depends on [control=['if'], data=[]]
else:
local_columns.append(column_names[normalized_column_name])
if column_name != column_names[normalized_column_name]:
changed = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['column_name']]
if changed:
foreign_keys[key] = ForeignKeyConstraint(local_columns, constraint.get_foreign_table_name(), constraint.get_foreign_columns(), constraint.get_name(), constraint.get_options()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
for constraint in diff.removed_foreign_keys:
constraint_name = constraint.get_name().lower()
if constraint_name and constraint_name in foreign_keys:
del foreign_keys[constraint_name] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['constraint']]
foreign_keys_diff = diff.changed_foreign_keys + diff.added_foreign_keys
for constraint in foreign_keys_diff:
constraint_name = constraint.get_name().lower()
if constraint_name:
foreign_keys[constraint_name] = constraint # depends on [control=['if'], data=[]]
else:
foreign_keys[len(foreign_keys)] = constraint # depends on [control=['for'], data=['constraint']]
return foreign_keys |
def secure_randint(min_value, max_value, system_random=None):
""" Return a random integer N such that a <= N <= b.
Uses SystemRandom for generating random numbers.
(which uses os.urandom(), which pulls from /dev/urandom)
"""
if not system_random:
system_random = random.SystemRandom()
return system_random.randint(min_value, max_value) | def function[secure_randint, parameter[min_value, max_value, system_random]]:
constant[ Return a random integer N such that a <= N <= b.
Uses SystemRandom for generating random numbers.
(which uses os.urandom(), which pulls from /dev/urandom)
]
if <ast.UnaryOp object at 0x7da1b23b0730> begin[:]
variable[system_random] assign[=] call[name[random].SystemRandom, parameter[]]
return[call[name[system_random].randint, parameter[name[min_value], name[max_value]]]] | keyword[def] identifier[secure_randint] ( identifier[min_value] , identifier[max_value] , identifier[system_random] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[system_random] :
identifier[system_random] = identifier[random] . identifier[SystemRandom] ()
keyword[return] identifier[system_random] . identifier[randint] ( identifier[min_value] , identifier[max_value] ) | def secure_randint(min_value, max_value, system_random=None):
""" Return a random integer N such that a <= N <= b.
Uses SystemRandom for generating random numbers.
(which uses os.urandom(), which pulls from /dev/urandom)
"""
if not system_random:
system_random = random.SystemRandom() # depends on [control=['if'], data=[]]
return system_random.randint(min_value, max_value) |
def visit_call(self, node):
"""return an astroid.Call node as string"""
expr_str = self._precedence_parens(node, node.func)
args = [arg.accept(self) for arg in node.args]
if node.keywords:
keywords = [kwarg.accept(self) for kwarg in node.keywords]
else:
keywords = []
args.extend(keywords)
return "%s(%s)" % (expr_str, ", ".join(args)) | def function[visit_call, parameter[self, node]]:
constant[return an astroid.Call node as string]
variable[expr_str] assign[=] call[name[self]._precedence_parens, parameter[name[node], name[node].func]]
variable[args] assign[=] <ast.ListComp object at 0x7da1b1e650f0>
if name[node].keywords begin[:]
variable[keywords] assign[=] <ast.ListComp object at 0x7da1b1e64520>
call[name[args].extend, parameter[name[keywords]]]
return[binary_operation[constant[%s(%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1e65c30>, <ast.Call object at 0x7da1b1e66710>]]]] | keyword[def] identifier[visit_call] ( identifier[self] , identifier[node] ):
literal[string]
identifier[expr_str] = identifier[self] . identifier[_precedence_parens] ( identifier[node] , identifier[node] . identifier[func] )
identifier[args] =[ identifier[arg] . identifier[accept] ( identifier[self] ) keyword[for] identifier[arg] keyword[in] identifier[node] . identifier[args] ]
keyword[if] identifier[node] . identifier[keywords] :
identifier[keywords] =[ identifier[kwarg] . identifier[accept] ( identifier[self] ) keyword[for] identifier[kwarg] keyword[in] identifier[node] . identifier[keywords] ]
keyword[else] :
identifier[keywords] =[]
identifier[args] . identifier[extend] ( identifier[keywords] )
keyword[return] literal[string] %( identifier[expr_str] , literal[string] . identifier[join] ( identifier[args] )) | def visit_call(self, node):
"""return an astroid.Call node as string"""
expr_str = self._precedence_parens(node, node.func)
args = [arg.accept(self) for arg in node.args]
if node.keywords:
keywords = [kwarg.accept(self) for kwarg in node.keywords] # depends on [control=['if'], data=[]]
else:
keywords = []
args.extend(keywords)
return '%s(%s)' % (expr_str, ', '.join(args)) |
def _fast_dataset(
variables: 'OrderedDict[Any, Variable]',
coord_variables: Mapping[Any, Variable],
) -> 'Dataset':
"""Create a dataset as quickly as possible.
Beware: the `variables` OrderedDict is modified INPLACE.
"""
from .dataset import Dataset
variables.update(coord_variables)
coord_names = set(coord_variables)
return Dataset._from_vars_and_coord_names(variables, coord_names) | def function[_fast_dataset, parameter[variables, coord_variables]]:
constant[Create a dataset as quickly as possible.
Beware: the `variables` OrderedDict is modified INPLACE.
]
from relative_module[dataset] import module[Dataset]
call[name[variables].update, parameter[name[coord_variables]]]
variable[coord_names] assign[=] call[name[set], parameter[name[coord_variables]]]
return[call[name[Dataset]._from_vars_and_coord_names, parameter[name[variables], name[coord_names]]]] | keyword[def] identifier[_fast_dataset] (
identifier[variables] : literal[string] ,
identifier[coord_variables] : identifier[Mapping] [ identifier[Any] , identifier[Variable] ],
)-> literal[string] :
literal[string]
keyword[from] . identifier[dataset] keyword[import] identifier[Dataset]
identifier[variables] . identifier[update] ( identifier[coord_variables] )
identifier[coord_names] = identifier[set] ( identifier[coord_variables] )
keyword[return] identifier[Dataset] . identifier[_from_vars_and_coord_names] ( identifier[variables] , identifier[coord_names] ) | def _fast_dataset(variables: 'OrderedDict[Any, Variable]', coord_variables: Mapping[Any, Variable]) -> 'Dataset':
"""Create a dataset as quickly as possible.
Beware: the `variables` OrderedDict is modified INPLACE.
"""
from .dataset import Dataset
variables.update(coord_variables)
coord_names = set(coord_variables)
return Dataset._from_vars_and_coord_names(variables, coord_names) |
def update_vpc_entry(nexus_ips, vpc_id, learned, active):
"""Change active state in vpc_allocate data base."""
LOG.debug("update_vpc_entry called")
session = bc.get_writer_session()
with session.begin():
for n_ip in nexus_ips:
flipit = not active
x = session.execute(
sa.update(nexus_models_v2.NexusVPCAlloc).values({
'learned': learned,
'active': active}).where(sa.and_(
nexus_models_v2.NexusVPCAlloc.switch_ip == n_ip,
nexus_models_v2.NexusVPCAlloc.vpc_id == vpc_id,
nexus_models_v2.NexusVPCAlloc.active == flipit
)))
if x.rowcount != 1:
raise c_exc.NexusVPCAllocNotFound(
switch_ip=n_ip, vpc_id=vpc_id, active=active) | def function[update_vpc_entry, parameter[nexus_ips, vpc_id, learned, active]]:
constant[Change active state in vpc_allocate data base.]
call[name[LOG].debug, parameter[constant[update_vpc_entry called]]]
variable[session] assign[=] call[name[bc].get_writer_session, parameter[]]
with call[name[session].begin, parameter[]] begin[:]
for taget[name[n_ip]] in starred[name[nexus_ips]] begin[:]
variable[flipit] assign[=] <ast.UnaryOp object at 0x7da2041daa40>
variable[x] assign[=] call[name[session].execute, parameter[call[call[call[name[sa].update, parameter[name[nexus_models_v2].NexusVPCAlloc]].values, parameter[dictionary[[<ast.Constant object at 0x7da2041d8c10>, <ast.Constant object at 0x7da2041db3a0>], [<ast.Name object at 0x7da2041db850>, <ast.Name object at 0x7da2041db8b0>]]]].where, parameter[call[name[sa].and_, parameter[compare[name[nexus_models_v2].NexusVPCAlloc.switch_ip equal[==] name[n_ip]], compare[name[nexus_models_v2].NexusVPCAlloc.vpc_id equal[==] name[vpc_id]], compare[name[nexus_models_v2].NexusVPCAlloc.active equal[==] name[flipit]]]]]]]]
if compare[name[x].rowcount not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da20c6e68c0> | keyword[def] identifier[update_vpc_entry] ( identifier[nexus_ips] , identifier[vpc_id] , identifier[learned] , identifier[active] ):
literal[string]
identifier[LOG] . identifier[debug] ( literal[string] )
identifier[session] = identifier[bc] . identifier[get_writer_session] ()
keyword[with] identifier[session] . identifier[begin] ():
keyword[for] identifier[n_ip] keyword[in] identifier[nexus_ips] :
identifier[flipit] = keyword[not] identifier[active]
identifier[x] = identifier[session] . identifier[execute] (
identifier[sa] . identifier[update] ( identifier[nexus_models_v2] . identifier[NexusVPCAlloc] ). identifier[values] ({
literal[string] : identifier[learned] ,
literal[string] : identifier[active] }). identifier[where] ( identifier[sa] . identifier[and_] (
identifier[nexus_models_v2] . identifier[NexusVPCAlloc] . identifier[switch_ip] == identifier[n_ip] ,
identifier[nexus_models_v2] . identifier[NexusVPCAlloc] . identifier[vpc_id] == identifier[vpc_id] ,
identifier[nexus_models_v2] . identifier[NexusVPCAlloc] . identifier[active] == identifier[flipit]
)))
keyword[if] identifier[x] . identifier[rowcount] != literal[int] :
keyword[raise] identifier[c_exc] . identifier[NexusVPCAllocNotFound] (
identifier[switch_ip] = identifier[n_ip] , identifier[vpc_id] = identifier[vpc_id] , identifier[active] = identifier[active] ) | def update_vpc_entry(nexus_ips, vpc_id, learned, active):
"""Change active state in vpc_allocate data base."""
LOG.debug('update_vpc_entry called')
session = bc.get_writer_session()
with session.begin():
for n_ip in nexus_ips:
flipit = not active
x = session.execute(sa.update(nexus_models_v2.NexusVPCAlloc).values({'learned': learned, 'active': active}).where(sa.and_(nexus_models_v2.NexusVPCAlloc.switch_ip == n_ip, nexus_models_v2.NexusVPCAlloc.vpc_id == vpc_id, nexus_models_v2.NexusVPCAlloc.active == flipit)))
if x.rowcount != 1:
raise c_exc.NexusVPCAllocNotFound(switch_ip=n_ip, vpc_id=vpc_id, active=active) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n_ip']] # depends on [control=['with'], data=[]] |
def hmtk_histogram_2D(xvalues, yvalues, bins, x_offset=1.0E-10,
y_offset=1.0E-10):
"""
See the explanation for the 1D case - now applied to 2D.
:param numpy.ndarray xvalues:
Values of x-data
:param numpy.ndarray yvalues:
Values of y-data
:param tuple bins:
Tuple containing bin intervals for x-data and y-data (as numpy arrays)
:param float x_offset:
Small amount to offset the x-bins for floating point precision
:param float y_offset:
Small amount to offset the y-bins for floating point precision
:returns:
Count in each bin (as float)
"""
xbins, ybins = (bins[0] - x_offset, bins[1] - y_offset)
n_x = len(xbins) - 1
n_y = len(ybins) - 1
counter = np.zeros([n_y, n_x], dtype=float)
for j in range(n_y):
y_idx = np.logical_and(yvalues >= ybins[j], yvalues < ybins[j + 1])
x_vals = xvalues[y_idx]
for i in range(n_x):
idx = np.logical_and(x_vals >= xbins[i], x_vals < xbins[i + 1])
counter[j, i] += float(np.sum(idx))
return counter.T | def function[hmtk_histogram_2D, parameter[xvalues, yvalues, bins, x_offset, y_offset]]:
constant[
See the explanation for the 1D case - now applied to 2D.
:param numpy.ndarray xvalues:
Values of x-data
:param numpy.ndarray yvalues:
Values of y-data
:param tuple bins:
Tuple containing bin intervals for x-data and y-data (as numpy arrays)
:param float x_offset:
Small amount to offset the x-bins for floating point precision
:param float y_offset:
Small amount to offset the y-bins for floating point precision
:returns:
Count in each bin (as float)
]
<ast.Tuple object at 0x7da207f03f40> assign[=] tuple[[<ast.BinOp object at 0x7da207f006d0>, <ast.BinOp object at 0x7da207f03130>]]
variable[n_x] assign[=] binary_operation[call[name[len], parameter[name[xbins]]] - constant[1]]
variable[n_y] assign[=] binary_operation[call[name[len], parameter[name[ybins]]] - constant[1]]
variable[counter] assign[=] call[name[np].zeros, parameter[list[[<ast.Name object at 0x7da207f01c60>, <ast.Name object at 0x7da207f01690>]]]]
for taget[name[j]] in starred[call[name[range], parameter[name[n_y]]]] begin[:]
variable[y_idx] assign[=] call[name[np].logical_and, parameter[compare[name[yvalues] greater_or_equal[>=] call[name[ybins]][name[j]]], compare[name[yvalues] less[<] call[name[ybins]][binary_operation[name[j] + constant[1]]]]]]
variable[x_vals] assign[=] call[name[xvalues]][name[y_idx]]
for taget[name[i]] in starred[call[name[range], parameter[name[n_x]]]] begin[:]
variable[idx] assign[=] call[name[np].logical_and, parameter[compare[name[x_vals] greater_or_equal[>=] call[name[xbins]][name[i]]], compare[name[x_vals] less[<] call[name[xbins]][binary_operation[name[i] + constant[1]]]]]]
<ast.AugAssign object at 0x7da207f02a10>
return[name[counter].T] | keyword[def] identifier[hmtk_histogram_2D] ( identifier[xvalues] , identifier[yvalues] , identifier[bins] , identifier[x_offset] = literal[int] ,
identifier[y_offset] = literal[int] ):
literal[string]
identifier[xbins] , identifier[ybins] =( identifier[bins] [ literal[int] ]- identifier[x_offset] , identifier[bins] [ literal[int] ]- identifier[y_offset] )
identifier[n_x] = identifier[len] ( identifier[xbins] )- literal[int]
identifier[n_y] = identifier[len] ( identifier[ybins] )- literal[int]
identifier[counter] = identifier[np] . identifier[zeros] ([ identifier[n_y] , identifier[n_x] ], identifier[dtype] = identifier[float] )
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[n_y] ):
identifier[y_idx] = identifier[np] . identifier[logical_and] ( identifier[yvalues] >= identifier[ybins] [ identifier[j] ], identifier[yvalues] < identifier[ybins] [ identifier[j] + literal[int] ])
identifier[x_vals] = identifier[xvalues] [ identifier[y_idx] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n_x] ):
identifier[idx] = identifier[np] . identifier[logical_and] ( identifier[x_vals] >= identifier[xbins] [ identifier[i] ], identifier[x_vals] < identifier[xbins] [ identifier[i] + literal[int] ])
identifier[counter] [ identifier[j] , identifier[i] ]+= identifier[float] ( identifier[np] . identifier[sum] ( identifier[idx] ))
keyword[return] identifier[counter] . identifier[T] | def hmtk_histogram_2D(xvalues, yvalues, bins, x_offset=1e-10, y_offset=1e-10):
"""
See the explanation for the 1D case - now applied to 2D.
:param numpy.ndarray xvalues:
Values of x-data
:param numpy.ndarray yvalues:
Values of y-data
:param tuple bins:
Tuple containing bin intervals for x-data and y-data (as numpy arrays)
:param float x_offset:
Small amount to offset the x-bins for floating point precision
:param float y_offset:
Small amount to offset the y-bins for floating point precision
:returns:
Count in each bin (as float)
"""
(xbins, ybins) = (bins[0] - x_offset, bins[1] - y_offset)
n_x = len(xbins) - 1
n_y = len(ybins) - 1
counter = np.zeros([n_y, n_x], dtype=float)
for j in range(n_y):
y_idx = np.logical_and(yvalues >= ybins[j], yvalues < ybins[j + 1])
x_vals = xvalues[y_idx]
for i in range(n_x):
idx = np.logical_and(x_vals >= xbins[i], x_vals < xbins[i + 1])
counter[j, i] += float(np.sum(idx)) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['j']]
return counter.T |
def create(self, healthCheckNotification, instance, ipAddressResourceId, name, notificationContacts, rules,
loadBalancerClassOfServiceID=1, *args, **kwargs):
"""
:type healthCheckNotification: bool
:type instance: list[Instance]
:type ipAddressResourceId: list[int]
:type loadBalancerClassOfServiceID: int
:type name: str
:type notificationContacts: NotificationContacts or list[NotificationContact]
:type rules: Rules
:param healthCheckNotification: Enable or disable notifications
:param instance: List of balanced IP Addresses (VM or server)
:param ipAddressResourceId: ID of the IP Address resource of the Load Balancer
:param loadBalancerClassOfServiceID: default 1
:param name: Name of the Load Balancer
:param notificationContacts: Nullable if notificationContacts is false
:param rules: List of NewLoadBalancerRule object containing the list of rules to be configured with the service
"""
response = self._call(method=SetEnqueueLoadBalancerCreation,
healthCheckNotification=healthCheckNotification,
instance=instance,
ipAddressResourceId=ipAddressResourceId,
name=name,
notificationContacts=notificationContacts,
rules=rules,
loadBalancerClassOfServiceID=loadBalancerClassOfServiceID,
*args, **kwargs) | def function[create, parameter[self, healthCheckNotification, instance, ipAddressResourceId, name, notificationContacts, rules, loadBalancerClassOfServiceID]]:
constant[
:type healthCheckNotification: bool
:type instance: list[Instance]
:type ipAddressResourceId: list[int]
:type loadBalancerClassOfServiceID: int
:type name: str
:type notificationContacts: NotificationContacts or list[NotificationContact]
:type rules: Rules
:param healthCheckNotification: Enable or disable notifications
:param instance: List of balanced IP Addresses (VM or server)
:param ipAddressResourceId: ID of the IP Address resource of the Load Balancer
:param loadBalancerClassOfServiceID: default 1
:param name: Name of the Load Balancer
:param notificationContacts: Nullable if notificationContacts is false
:param rules: List of NewLoadBalancerRule object containing the list of rules to be configured with the service
]
variable[response] assign[=] call[name[self]._call, parameter[<ast.Starred object at 0x7da1b0f58ca0>]] | keyword[def] identifier[create] ( identifier[self] , identifier[healthCheckNotification] , identifier[instance] , identifier[ipAddressResourceId] , identifier[name] , identifier[notificationContacts] , identifier[rules] ,
identifier[loadBalancerClassOfServiceID] = literal[int] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[response] = identifier[self] . identifier[_call] ( identifier[method] = identifier[SetEnqueueLoadBalancerCreation] ,
identifier[healthCheckNotification] = identifier[healthCheckNotification] ,
identifier[instance] = identifier[instance] ,
identifier[ipAddressResourceId] = identifier[ipAddressResourceId] ,
identifier[name] = identifier[name] ,
identifier[notificationContacts] = identifier[notificationContacts] ,
identifier[rules] = identifier[rules] ,
identifier[loadBalancerClassOfServiceID] = identifier[loadBalancerClassOfServiceID] ,
* identifier[args] ,** identifier[kwargs] ) | def create(self, healthCheckNotification, instance, ipAddressResourceId, name, notificationContacts, rules, loadBalancerClassOfServiceID=1, *args, **kwargs):
"""
:type healthCheckNotification: bool
:type instance: list[Instance]
:type ipAddressResourceId: list[int]
:type loadBalancerClassOfServiceID: int
:type name: str
:type notificationContacts: NotificationContacts or list[NotificationContact]
:type rules: Rules
:param healthCheckNotification: Enable or disable notifications
:param instance: List of balanced IP Addresses (VM or server)
:param ipAddressResourceId: ID of the IP Address resource of the Load Balancer
:param loadBalancerClassOfServiceID: default 1
:param name: Name of the Load Balancer
:param notificationContacts: Nullable if notificationContacts is false
:param rules: List of NewLoadBalancerRule object containing the list of rules to be configured with the service
"""
response = self._call(*args, method=SetEnqueueLoadBalancerCreation, healthCheckNotification=healthCheckNotification, instance=instance, ipAddressResourceId=ipAddressResourceId, name=name, notificationContacts=notificationContacts, rules=rules, loadBalancerClassOfServiceID=loadBalancerClassOfServiceID, **kwargs) |
def analyzeModelWeightDistribution(modelName,base):
"""Plot histogram of non-zero weight values."""
model = torch.load(modelName)
model.eval()
analyzeWeightDistribution(model.l1.weight.data, base) | def function[analyzeModelWeightDistribution, parameter[modelName, base]]:
constant[Plot histogram of non-zero weight values.]
variable[model] assign[=] call[name[torch].load, parameter[name[modelName]]]
call[name[model].eval, parameter[]]
call[name[analyzeWeightDistribution], parameter[name[model].l1.weight.data, name[base]]] | keyword[def] identifier[analyzeModelWeightDistribution] ( identifier[modelName] , identifier[base] ):
literal[string]
identifier[model] = identifier[torch] . identifier[load] ( identifier[modelName] )
identifier[model] . identifier[eval] ()
identifier[analyzeWeightDistribution] ( identifier[model] . identifier[l1] . identifier[weight] . identifier[data] , identifier[base] ) | def analyzeModelWeightDistribution(modelName, base):
"""Plot histogram of non-zero weight values."""
model = torch.load(modelName)
model.eval()
analyzeWeightDistribution(model.l1.weight.data, base) |
def run_tensorboard(run_id, tflog_id):
"""Launch TensorBoard for a given run ID and log ID of that run."""
data = current_app.config["data"]
# optimisticaly suppose the run exists...
run = data.get_run_dao().get(run_id)
base_dir = Path(run["experiment"]["base_dir"])
log_dir = Path(run["info"]["tensorflow"]["logdirs"][tflog_id])
# TODO ugly!!!
if log_dir.is_absolute():
path_to_log_dir = log_dir
else:
path_to_log_dir = base_dir.joinpath(log_dir)
port = int(tensorboard.run_tensorboard(str(path_to_log_dir)))
url_root = request.url_root
url_parts = re.search("://([^:/]+)", url_root)
redirect_to_address = url_parts.group(1)
return redirect("http://%s:%d" % (redirect_to_address, port)) | def function[run_tensorboard, parameter[run_id, tflog_id]]:
constant[Launch TensorBoard for a given run ID and log ID of that run.]
variable[data] assign[=] call[name[current_app].config][constant[data]]
variable[run] assign[=] call[call[name[data].get_run_dao, parameter[]].get, parameter[name[run_id]]]
variable[base_dir] assign[=] call[name[Path], parameter[call[call[name[run]][constant[experiment]]][constant[base_dir]]]]
variable[log_dir] assign[=] call[name[Path], parameter[call[call[call[call[name[run]][constant[info]]][constant[tensorflow]]][constant[logdirs]]][name[tflog_id]]]]
if call[name[log_dir].is_absolute, parameter[]] begin[:]
variable[path_to_log_dir] assign[=] name[log_dir]
variable[port] assign[=] call[name[int], parameter[call[name[tensorboard].run_tensorboard, parameter[call[name[str], parameter[name[path_to_log_dir]]]]]]]
variable[url_root] assign[=] name[request].url_root
variable[url_parts] assign[=] call[name[re].search, parameter[constant[://([^:/]+)], name[url_root]]]
variable[redirect_to_address] assign[=] call[name[url_parts].group, parameter[constant[1]]]
return[call[name[redirect], parameter[binary_operation[constant[http://%s:%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6a9db0>, <ast.Name object at 0x7da20c6abc70>]]]]]] | keyword[def] identifier[run_tensorboard] ( identifier[run_id] , identifier[tflog_id] ):
literal[string]
identifier[data] = identifier[current_app] . identifier[config] [ literal[string] ]
identifier[run] = identifier[data] . identifier[get_run_dao] (). identifier[get] ( identifier[run_id] )
identifier[base_dir] = identifier[Path] ( identifier[run] [ literal[string] ][ literal[string] ])
identifier[log_dir] = identifier[Path] ( identifier[run] [ literal[string] ][ literal[string] ][ literal[string] ][ identifier[tflog_id] ])
keyword[if] identifier[log_dir] . identifier[is_absolute] ():
identifier[path_to_log_dir] = identifier[log_dir]
keyword[else] :
identifier[path_to_log_dir] = identifier[base_dir] . identifier[joinpath] ( identifier[log_dir] )
identifier[port] = identifier[int] ( identifier[tensorboard] . identifier[run_tensorboard] ( identifier[str] ( identifier[path_to_log_dir] )))
identifier[url_root] = identifier[request] . identifier[url_root]
identifier[url_parts] = identifier[re] . identifier[search] ( literal[string] , identifier[url_root] )
identifier[redirect_to_address] = identifier[url_parts] . identifier[group] ( literal[int] )
keyword[return] identifier[redirect] ( literal[string] %( identifier[redirect_to_address] , identifier[port] )) | def run_tensorboard(run_id, tflog_id):
"""Launch TensorBoard for a given run ID and log ID of that run."""
data = current_app.config['data']
# optimisticaly suppose the run exists...
run = data.get_run_dao().get(run_id)
base_dir = Path(run['experiment']['base_dir'])
log_dir = Path(run['info']['tensorflow']['logdirs'][tflog_id])
# TODO ugly!!!
if log_dir.is_absolute():
path_to_log_dir = log_dir # depends on [control=['if'], data=[]]
else:
path_to_log_dir = base_dir.joinpath(log_dir)
port = int(tensorboard.run_tensorboard(str(path_to_log_dir)))
url_root = request.url_root
url_parts = re.search('://([^:/]+)', url_root)
redirect_to_address = url_parts.group(1)
return redirect('http://%s:%d' % (redirect_to_address, port)) |
def showGridColumns( self ):
"""
Returns whether or not this delegate should draw columns when \
rendering the grid.
:return <bool>
"""
delegate = self.itemDelegate()
if ( isinstance(delegate, XTreeWidgetDelegate) ):
return delegate.showGridColumns()
return False | def function[showGridColumns, parameter[self]]:
constant[
Returns whether or not this delegate should draw columns when rendering the grid.
:return <bool>
]
variable[delegate] assign[=] call[name[self].itemDelegate, parameter[]]
if call[name[isinstance], parameter[name[delegate], name[XTreeWidgetDelegate]]] begin[:]
return[call[name[delegate].showGridColumns, parameter[]]]
return[constant[False]] | keyword[def] identifier[showGridColumns] ( identifier[self] ):
literal[string]
identifier[delegate] = identifier[self] . identifier[itemDelegate] ()
keyword[if] ( identifier[isinstance] ( identifier[delegate] , identifier[XTreeWidgetDelegate] )):
keyword[return] identifier[delegate] . identifier[showGridColumns] ()
keyword[return] keyword[False] | def showGridColumns(self):
"""
Returns whether or not this delegate should draw columns when rendering the grid.
:return <bool>
"""
delegate = self.itemDelegate()
if isinstance(delegate, XTreeWidgetDelegate):
return delegate.showGridColumns() # depends on [control=['if'], data=[]]
return False |
def update(self,
clearEmptyFields=True,
title=None,
description=None,
snippet=None,
tags=None,
phone=None,
access=None,
sortField=None,
sortOrder=None,
isViewOnly=None,
isInvitationOnly=None,
thumbnail=None):
"""
The Update Group operation (POST only) modifies properties such as
the group title, tags, description, sort field and order, and
member sharing capabilities. Available only to the group
administrators or to the administrator of the organization if the
user is a member.
Only the properties that are to be updated need to be specified in
the request. Properties not specified will not be affected.
The group ID cannot be modified.
Inputs:
title - The group title must be unique for the username, and the
character limit is 250.
Example: title=Redlands Fire Department
description - A description of the group that can be any length.
snippet - Snippet or summary of the group that has a character
limit of 250 characters.
tags - Tags are words or short phrases that describe the group.
Separate terms with commas.
phone - Phone is the group contact information. It can be a
combination of letters and numbers. The character limit
is 250.
access - Sets the access level for the group. private is the
default. Setting to org restricts group access to
members of your organization. If public, all users can
access the group.
Values: private | org |public
sortField - Sets sort field for group items.
Values: title | owner | avgRating |
numViews| created | modified
sortOrder - Sets sort order for group items.
Values: asc | desc
isViewOnly - Allows the group owner or admin to create view-only
groups where members are not able to share items.
If members try to share, view-only groups are
returned in the notshared response property.
Values: false | true
"""
params = {
"f" : "json"
}
if title is not None:
params['title'] = title
if description is not None:
params['description'] = description
if snippet is not None:
params['snippet'] = snippet
if tags is not None:
params['tags'] = tags
if phone is not None:
params['phone'] = phone
if access is not None:
params['access'] = access
if sortField is not None:
params['sortField'] = sortField
if isViewOnly is not None:
params['isViewOnly'] = isViewOnly
if isInvitationOnly is not None:
params['isInvitationOnly'] = isInvitationOnly
if clearEmptyFields is not None:
params['clearEmptyFields'] = clearEmptyFields
files = {}
url = self._url + "/update"
if thumbnail is not None and \
os.path.isfile(thumbnail):
files['thumbnail'] =thumbnail
res = None
if thumbnail is not None and \
os.path.isfile(thumbnail):
res = self._post(url=url,
param_dict=params,
files=files,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return res
else:
res = self._post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self.__init()
return res | def function[update, parameter[self, clearEmptyFields, title, description, snippet, tags, phone, access, sortField, sortOrder, isViewOnly, isInvitationOnly, thumbnail]]:
constant[
The Update Group operation (POST only) modifies properties such as
the group title, tags, description, sort field and order, and
member sharing capabilities. Available only to the group
administrators or to the administrator of the organization if the
user is a member.
Only the properties that are to be updated need to be specified in
the request. Properties not specified will not be affected.
The group ID cannot be modified.
Inputs:
title - The group title must be unique for the username, and the
character limit is 250.
Example: title=Redlands Fire Department
description - A description of the group that can be any length.
snippet - Snippet or summary of the group that has a character
limit of 250 characters.
tags - Tags are words or short phrases that describe the group.
Separate terms with commas.
phone - Phone is the group contact information. It can be a
combination of letters and numbers. The character limit
is 250.
access - Sets the access level for the group. private is the
default. Setting to org restricts group access to
members of your organization. If public, all users can
access the group.
Values: private | org |public
sortField - Sets sort field for group items.
Values: title | owner | avgRating |
numViews| created | modified
sortOrder - Sets sort order for group items.
Values: asc | desc
isViewOnly - Allows the group owner or admin to create view-only
groups where members are not able to share items.
If members try to share, view-only groups are
returned in the notshared response property.
Values: false | true
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18bc73040>], [<ast.Constant object at 0x7da18bc73610>]]
if compare[name[title] is_not constant[None]] begin[:]
call[name[params]][constant[title]] assign[=] name[title]
if compare[name[description] is_not constant[None]] begin[:]
call[name[params]][constant[description]] assign[=] name[description]
if compare[name[snippet] is_not constant[None]] begin[:]
call[name[params]][constant[snippet]] assign[=] name[snippet]
if compare[name[tags] is_not constant[None]] begin[:]
call[name[params]][constant[tags]] assign[=] name[tags]
if compare[name[phone] is_not constant[None]] begin[:]
call[name[params]][constant[phone]] assign[=] name[phone]
if compare[name[access] is_not constant[None]] begin[:]
call[name[params]][constant[access]] assign[=] name[access]
if compare[name[sortField] is_not constant[None]] begin[:]
call[name[params]][constant[sortField]] assign[=] name[sortField]
if compare[name[isViewOnly] is_not constant[None]] begin[:]
call[name[params]][constant[isViewOnly]] assign[=] name[isViewOnly]
if compare[name[isInvitationOnly] is_not constant[None]] begin[:]
call[name[params]][constant[isInvitationOnly]] assign[=] name[isInvitationOnly]
if compare[name[clearEmptyFields] is_not constant[None]] begin[:]
call[name[params]][constant[clearEmptyFields]] assign[=] name[clearEmptyFields]
variable[files] assign[=] dictionary[[], []]
variable[url] assign[=] binary_operation[name[self]._url + constant[/update]]
if <ast.BoolOp object at 0x7da1b1257850> begin[:]
call[name[files]][constant[thumbnail]] assign[=] name[thumbnail]
variable[res] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b12543d0> begin[:]
variable[res] assign[=] call[name[self]._post, parameter[]]
return[name[res]]
call[name[self].__init, parameter[]]
return[name[res]] | keyword[def] identifier[update] ( identifier[self] ,
identifier[clearEmptyFields] = keyword[True] ,
identifier[title] = keyword[None] ,
identifier[description] = keyword[None] ,
identifier[snippet] = keyword[None] ,
identifier[tags] = keyword[None] ,
identifier[phone] = keyword[None] ,
identifier[access] = keyword[None] ,
identifier[sortField] = keyword[None] ,
identifier[sortOrder] = keyword[None] ,
identifier[isViewOnly] = keyword[None] ,
identifier[isInvitationOnly] = keyword[None] ,
identifier[thumbnail] = keyword[None] ):
literal[string]
identifier[params] ={
literal[string] : literal[string]
}
keyword[if] identifier[title] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[title]
keyword[if] identifier[description] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[description]
keyword[if] identifier[snippet] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[snippet]
keyword[if] identifier[tags] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[tags]
keyword[if] identifier[phone] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[phone]
keyword[if] identifier[access] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[access]
keyword[if] identifier[sortField] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[sortField]
keyword[if] identifier[isViewOnly] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[isViewOnly]
keyword[if] identifier[isInvitationOnly] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[isInvitationOnly]
keyword[if] identifier[clearEmptyFields] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[clearEmptyFields]
identifier[files] ={}
identifier[url] = identifier[self] . identifier[_url] + literal[string]
keyword[if] identifier[thumbnail] keyword[is] keyword[not] keyword[None] keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[thumbnail] ):
identifier[files] [ literal[string] ]= identifier[thumbnail]
identifier[res] = keyword[None]
keyword[if] identifier[thumbnail] keyword[is] keyword[not] keyword[None] keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[thumbnail] ):
identifier[res] = identifier[self] . identifier[_post] ( identifier[url] = identifier[url] ,
identifier[param_dict] = identifier[params] ,
identifier[files] = identifier[files] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] )
keyword[return] identifier[res]
keyword[else] :
identifier[res] = identifier[self] . identifier[_post] ( identifier[url] = identifier[url] , identifier[param_dict] = identifier[params] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] )
identifier[self] . identifier[__init] ()
keyword[return] identifier[res] | def update(self, clearEmptyFields=True, title=None, description=None, snippet=None, tags=None, phone=None, access=None, sortField=None, sortOrder=None, isViewOnly=None, isInvitationOnly=None, thumbnail=None):
"""
The Update Group operation (POST only) modifies properties such as
the group title, tags, description, sort field and order, and
member sharing capabilities. Available only to the group
administrators or to the administrator of the organization if the
user is a member.
Only the properties that are to be updated need to be specified in
the request. Properties not specified will not be affected.
The group ID cannot be modified.
Inputs:
title - The group title must be unique for the username, and the
character limit is 250.
Example: title=Redlands Fire Department
description - A description of the group that can be any length.
snippet - Snippet or summary of the group that has a character
limit of 250 characters.
tags - Tags are words or short phrases that describe the group.
Separate terms with commas.
phone - Phone is the group contact information. It can be a
combination of letters and numbers. The character limit
is 250.
access - Sets the access level for the group. private is the
default. Setting to org restricts group access to
members of your organization. If public, all users can
access the group.
Values: private | org |public
sortField - Sets sort field for group items.
Values: title | owner | avgRating |
numViews| created | modified
sortOrder - Sets sort order for group items.
Values: asc | desc
isViewOnly - Allows the group owner or admin to create view-only
groups where members are not able to share items.
If members try to share, view-only groups are
returned in the notshared response property.
Values: false | true
"""
params = {'f': 'json'}
if title is not None:
params['title'] = title # depends on [control=['if'], data=['title']]
if description is not None:
params['description'] = description # depends on [control=['if'], data=['description']]
if snippet is not None:
params['snippet'] = snippet # depends on [control=['if'], data=['snippet']]
if tags is not None:
params['tags'] = tags # depends on [control=['if'], data=['tags']]
if phone is not None:
params['phone'] = phone # depends on [control=['if'], data=['phone']]
if access is not None:
params['access'] = access # depends on [control=['if'], data=['access']]
if sortField is not None:
params['sortField'] = sortField # depends on [control=['if'], data=['sortField']]
if isViewOnly is not None:
params['isViewOnly'] = isViewOnly # depends on [control=['if'], data=['isViewOnly']]
if isInvitationOnly is not None:
params['isInvitationOnly'] = isInvitationOnly # depends on [control=['if'], data=['isInvitationOnly']]
if clearEmptyFields is not None:
params['clearEmptyFields'] = clearEmptyFields # depends on [control=['if'], data=['clearEmptyFields']]
files = {}
url = self._url + '/update'
if thumbnail is not None and os.path.isfile(thumbnail):
files['thumbnail'] = thumbnail # depends on [control=['if'], data=[]]
res = None
if thumbnail is not None and os.path.isfile(thumbnail):
res = self._post(url=url, param_dict=params, files=files, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
return res # depends on [control=['if'], data=[]]
else:
res = self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
self.__init()
return res |
def pack_image(filename, max_size, form_field='image'):
"""Pack an image from file into multipart-formdata post body"""
try:
if os.path.getsize(filename) > (max_size * 1024):
raise IdeaScalyError('File is too big, must be less than %skb.' % max_size)
except os.error as e:
raise IdeaScalyError('Unable to access file: %s' % e.strerror)
# build the mulitpart-formdata body
fp = open(filename, 'rb')
# image must be gif, jpeg, or png
file_type = mimetypes.guess_type(filename)
if file_type is None:
raise IdeaScalyError('Could not determine file type')
file_type = file_type[0]
if file_type not in ['image/gif', 'image/jpeg', 'image/png']:
raise IdeaScalyError('Invalid file type for image: %s' % file_type)
if isinstance(filename, six.text_type):
filename = filename.encode('utf-8')
BOUNDARY = b'Id34Sc4ly'
body = list()
body.append(b'--' + BOUNDARY)
body.append('content-disposition: form-data; name="{0}";'
' filename="{1}"'.format(form_field, filename)
.encode('utf-8'))
body.append('content-type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read())
body.append(b'--' + BOUNDARY + b'--')
body.append(b'')
fp.close()
body = b'\r\n'.join(body)
body_length = str(len(body))
# build headers
headers = {
'content-type': 'multipart/form-data; boundary={0}'.format(BOUNDARY),
'content-length': body_length
}
return headers, body | def function[pack_image, parameter[filename, max_size, form_field]]:
constant[Pack an image from file into multipart-formdata post body]
<ast.Try object at 0x7da2047e8af0>
variable[fp] assign[=] call[name[open], parameter[name[filename], constant[rb]]]
variable[file_type] assign[=] call[name[mimetypes].guess_type, parameter[name[filename]]]
if compare[name[file_type] is constant[None]] begin[:]
<ast.Raise object at 0x7da2047e8490>
variable[file_type] assign[=] call[name[file_type]][constant[0]]
if compare[name[file_type] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da2047e8430>, <ast.Constant object at 0x7da2047e93f0>, <ast.Constant object at 0x7da2047e98d0>]]] begin[:]
<ast.Raise object at 0x7da2047eac50>
if call[name[isinstance], parameter[name[filename], name[six].text_type]] begin[:]
variable[filename] assign[=] call[name[filename].encode, parameter[constant[utf-8]]]
variable[BOUNDARY] assign[=] constant[b'Id34Sc4ly']
variable[body] assign[=] call[name[list], parameter[]]
call[name[body].append, parameter[binary_operation[constant[b'--'] + name[BOUNDARY]]]]
call[name[body].append, parameter[call[call[constant[content-disposition: form-data; name="{0}"; filename="{1}"].format, parameter[name[form_field], name[filename]]].encode, parameter[constant[utf-8]]]]]
call[name[body].append, parameter[call[call[constant[content-type: {0}].format, parameter[name[file_type]]].encode, parameter[constant[utf-8]]]]]
call[name[body].append, parameter[constant[b'']]]
call[name[body].append, parameter[call[name[fp].read, parameter[]]]]
call[name[body].append, parameter[binary_operation[binary_operation[constant[b'--'] + name[BOUNDARY]] + constant[b'--']]]]
call[name[body].append, parameter[constant[b'']]]
call[name[fp].close, parameter[]]
variable[body] assign[=] call[constant[b'\r\n'].join, parameter[name[body]]]
variable[body_length] assign[=] call[name[str], parameter[call[name[len], parameter[name[body]]]]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b1301d50>, <ast.Constant object at 0x7da1b1300cd0>], [<ast.Call object at 0x7da1b1300be0>, <ast.Name object at 0x7da1b13024d0>]]
return[tuple[[<ast.Name object at 0x7da1b13016f0>, <ast.Name object at 0x7da1b1300f40>]]] | keyword[def] identifier[pack_image] ( identifier[filename] , identifier[max_size] , identifier[form_field] = literal[string] ):
literal[string]
keyword[try] :
keyword[if] identifier[os] . identifier[path] . identifier[getsize] ( identifier[filename] )>( identifier[max_size] * literal[int] ):
keyword[raise] identifier[IdeaScalyError] ( literal[string] % identifier[max_size] )
keyword[except] identifier[os] . identifier[error] keyword[as] identifier[e] :
keyword[raise] identifier[IdeaScalyError] ( literal[string] % identifier[e] . identifier[strerror] )
identifier[fp] = identifier[open] ( identifier[filename] , literal[string] )
identifier[file_type] = identifier[mimetypes] . identifier[guess_type] ( identifier[filename] )
keyword[if] identifier[file_type] keyword[is] keyword[None] :
keyword[raise] identifier[IdeaScalyError] ( literal[string] )
identifier[file_type] = identifier[file_type] [ literal[int] ]
keyword[if] identifier[file_type] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[raise] identifier[IdeaScalyError] ( literal[string] % identifier[file_type] )
keyword[if] identifier[isinstance] ( identifier[filename] , identifier[six] . identifier[text_type] ):
identifier[filename] = identifier[filename] . identifier[encode] ( literal[string] )
identifier[BOUNDARY] = literal[string]
identifier[body] = identifier[list] ()
identifier[body] . identifier[append] ( literal[string] + identifier[BOUNDARY] )
identifier[body] . identifier[append] ( literal[string]
literal[string] . identifier[format] ( identifier[form_field] , identifier[filename] )
. identifier[encode] ( literal[string] ))
identifier[body] . identifier[append] ( literal[string] . identifier[format] ( identifier[file_type] ). identifier[encode] ( literal[string] ))
identifier[body] . identifier[append] ( literal[string] )
identifier[body] . identifier[append] ( identifier[fp] . identifier[read] ())
identifier[body] . identifier[append] ( literal[string] + identifier[BOUNDARY] + literal[string] )
identifier[body] . identifier[append] ( literal[string] )
identifier[fp] . identifier[close] ()
identifier[body] = literal[string] . identifier[join] ( identifier[body] )
identifier[body_length] = identifier[str] ( identifier[len] ( identifier[body] ))
identifier[headers] ={
literal[string] : literal[string] . identifier[format] ( identifier[BOUNDARY] ),
literal[string] : identifier[body_length]
}
keyword[return] identifier[headers] , identifier[body] | def pack_image(filename, max_size, form_field='image'):
"""Pack an image from file into multipart-formdata post body"""
try:
if os.path.getsize(filename) > max_size * 1024:
raise IdeaScalyError('File is too big, must be less than %skb.' % max_size) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except os.error as e:
raise IdeaScalyError('Unable to access file: %s' % e.strerror) # depends on [control=['except'], data=['e']]
# build the mulitpart-formdata body
fp = open(filename, 'rb')
# image must be gif, jpeg, or png
file_type = mimetypes.guess_type(filename)
if file_type is None:
raise IdeaScalyError('Could not determine file type') # depends on [control=['if'], data=[]]
file_type = file_type[0]
if file_type not in ['image/gif', 'image/jpeg', 'image/png']:
raise IdeaScalyError('Invalid file type for image: %s' % file_type) # depends on [control=['if'], data=['file_type']]
if isinstance(filename, six.text_type):
filename = filename.encode('utf-8') # depends on [control=['if'], data=[]]
BOUNDARY = b'Id34Sc4ly'
body = list()
body.append(b'--' + BOUNDARY)
body.append('content-disposition: form-data; name="{0}"; filename="{1}"'.format(form_field, filename).encode('utf-8'))
body.append('content-type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read())
body.append(b'--' + BOUNDARY + b'--')
body.append(b'')
fp.close()
body = b'\r\n'.join(body)
body_length = str(len(body))
# build headers
headers = {'content-type': 'multipart/form-data; boundary={0}'.format(BOUNDARY), 'content-length': body_length}
return (headers, body) |
def _pairwise_corr(self, columns=None, covar=None, tail='two-sided',
method='pearson', padjust='none', export_filename=None):
"""Pairwise (partial) correlations."""
stats = pairwise_corr(data=self, columns=columns, covar=covar,
tail=tail, method=method, padjust=padjust,
export_filename=export_filename)
return stats | def function[_pairwise_corr, parameter[self, columns, covar, tail, method, padjust, export_filename]]:
constant[Pairwise (partial) correlations.]
variable[stats] assign[=] call[name[pairwise_corr], parameter[]]
return[name[stats]] | keyword[def] identifier[_pairwise_corr] ( identifier[self] , identifier[columns] = keyword[None] , identifier[covar] = keyword[None] , identifier[tail] = literal[string] ,
identifier[method] = literal[string] , identifier[padjust] = literal[string] , identifier[export_filename] = keyword[None] ):
literal[string]
identifier[stats] = identifier[pairwise_corr] ( identifier[data] = identifier[self] , identifier[columns] = identifier[columns] , identifier[covar] = identifier[covar] ,
identifier[tail] = identifier[tail] , identifier[method] = identifier[method] , identifier[padjust] = identifier[padjust] ,
identifier[export_filename] = identifier[export_filename] )
keyword[return] identifier[stats] | def _pairwise_corr(self, columns=None, covar=None, tail='two-sided', method='pearson', padjust='none', export_filename=None):
"""Pairwise (partial) correlations."""
stats = pairwise_corr(data=self, columns=columns, covar=covar, tail=tail, method=method, padjust=padjust, export_filename=export_filename)
return stats |
def calc_uniform_lim_glorot(inmaps, outmaps, kernel=(1, 1)):
r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al.
.. math::
b &= \sqrt{\frac{6}{NK + M}}\\
a &= -b
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64)
w = I.UniformInitializer((lb,ub))
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
"""
d = np.sqrt(6. / (np.prod(kernel) * inmaps + outmaps))
return -d, d | def function[calc_uniform_lim_glorot, parameter[inmaps, outmaps, kernel]]:
constant[Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al.
.. math::
b &= \sqrt{\frac{6}{NK + M}}\\
a &= -b
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64)
w = I.UniformInitializer((lb,ub))
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
]
variable[d] assign[=] call[name[np].sqrt, parameter[binary_operation[constant[6.0] / binary_operation[binary_operation[call[name[np].prod, parameter[name[kernel]]] * name[inmaps]] + name[outmaps]]]]]
return[tuple[[<ast.UnaryOp object at 0x7da1b1663220>, <ast.Name object at 0x7da1b1663190>]]] | keyword[def] identifier[calc_uniform_lim_glorot] ( identifier[inmaps] , identifier[outmaps] , identifier[kernel] =( literal[int] , literal[int] )):
literal[string]
identifier[d] = identifier[np] . identifier[sqrt] ( literal[int] /( identifier[np] . identifier[prod] ( identifier[kernel] )* identifier[inmaps] + identifier[outmaps] ))
keyword[return] - identifier[d] , identifier[d] | def calc_uniform_lim_glorot(inmaps, outmaps, kernel=(1, 1)):
"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al.
.. math::
b &= \\sqrt{\\frac{6}{NK + M}}\\\\
a &= -b
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64)
w = I.UniformInitializer((lb,ub))
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
"""
d = np.sqrt(6.0 / (np.prod(kernel) * inmaps + outmaps))
return (-d, d) |
def check_modpath_has_init(path, mod_path):
"""check there are some __init__.py all along the way"""
modpath = []
for part in mod_path:
modpath.append(part)
path = os.path.join(path, part)
if not _has_init(path):
old_namespace = util.is_namespace(".".join(modpath))
if not old_namespace:
return False
return True | def function[check_modpath_has_init, parameter[path, mod_path]]:
constant[check there are some __init__.py all along the way]
variable[modpath] assign[=] list[[]]
for taget[name[part]] in starred[name[mod_path]] begin[:]
call[name[modpath].append, parameter[name[part]]]
variable[path] assign[=] call[name[os].path.join, parameter[name[path], name[part]]]
if <ast.UnaryOp object at 0x7da1b1e7ce20> begin[:]
variable[old_namespace] assign[=] call[name[util].is_namespace, parameter[call[constant[.].join, parameter[name[modpath]]]]]
if <ast.UnaryOp object at 0x7da1b1e7feb0> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[check_modpath_has_init] ( identifier[path] , identifier[mod_path] ):
literal[string]
identifier[modpath] =[]
keyword[for] identifier[part] keyword[in] identifier[mod_path] :
identifier[modpath] . identifier[append] ( identifier[part] )
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[part] )
keyword[if] keyword[not] identifier[_has_init] ( identifier[path] ):
identifier[old_namespace] = identifier[util] . identifier[is_namespace] ( literal[string] . identifier[join] ( identifier[modpath] ))
keyword[if] keyword[not] identifier[old_namespace] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def check_modpath_has_init(path, mod_path):
"""check there are some __init__.py all along the way"""
modpath = []
for part in mod_path:
modpath.append(part)
path = os.path.join(path, part)
if not _has_init(path):
old_namespace = util.is_namespace('.'.join(modpath))
if not old_namespace:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['part']]
return True |
def gallery(args):
"""
%prog gallery folder link_prefix
Convert a folder of figures to a HTML table. For example:
$ python -m jcvi.formats.html gallery Paper-figures/
https://dl.dropboxusercontent.com/u/15937715/Data/Paper-figures/
Maps the images from local to remote.
"""
from jcvi.apps.base import iglob
from jcvi.utils.iter import grouper
p = OptionParser(gallery.__doc__)
p.add_option("--columns", default=3, type="int",
help="How many cells per row")
p.add_option("--width", default=200, type="int",
help="Image width")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
folder, link_prefix = args
width = opts.width
images = iglob(folder, "*.jpg,*.JPG,*.png")
td = '<td>{0}<br><a href="{1}"><img src="{1}" width="{2}"></a></td>'
print("<table>")
for ims in grouper(images, opts.columns):
print('<tr height="{0}" valign="top">'.format(width + 5))
for im in ims:
if not im:
continue
im = op.basename(im)
pf = im.split('.')[0].replace('_', '-')
link = link_prefix.rstrip("/") + "/" + im
print(td.format(pf, link, width))
print("</tr>")
print("</table>") | def function[gallery, parameter[args]]:
constant[
%prog gallery folder link_prefix
Convert a folder of figures to a HTML table. For example:
$ python -m jcvi.formats.html gallery Paper-figures/
https://dl.dropboxusercontent.com/u/15937715/Data/Paper-figures/
Maps the images from local to remote.
]
from relative_module[jcvi.apps.base] import module[iglob]
from relative_module[jcvi.utils.iter] import module[grouper]
variable[p] assign[=] call[name[OptionParser], parameter[name[gallery].__doc__]]
call[name[p].add_option, parameter[constant[--columns]]]
call[name[p].add_option, parameter[constant[--width]]]
<ast.Tuple object at 0x7da204621ae0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da18f720880>]]
<ast.Tuple object at 0x7da18f723100> assign[=] name[args]
variable[width] assign[=] name[opts].width
variable[images] assign[=] call[name[iglob], parameter[name[folder], constant[*.jpg,*.JPG,*.png]]]
variable[td] assign[=] constant[<td>{0}<br><a href="{1}"><img src="{1}" width="{2}"></a></td>]
call[name[print], parameter[constant[<table>]]]
for taget[name[ims]] in starred[call[name[grouper], parameter[name[images], name[opts].columns]]] begin[:]
call[name[print], parameter[call[constant[<tr height="{0}" valign="top">].format, parameter[binary_operation[name[width] + constant[5]]]]]]
for taget[name[im]] in starred[name[ims]] begin[:]
if <ast.UnaryOp object at 0x7da18f721900> begin[:]
continue
variable[im] assign[=] call[name[op].basename, parameter[name[im]]]
variable[pf] assign[=] call[call[call[name[im].split, parameter[constant[.]]]][constant[0]].replace, parameter[constant[_], constant[-]]]
variable[link] assign[=] binary_operation[binary_operation[call[name[link_prefix].rstrip, parameter[constant[/]]] + constant[/]] + name[im]]
call[name[print], parameter[call[name[td].format, parameter[name[pf], name[link], name[width]]]]]
call[name[print], parameter[constant[</tr>]]]
call[name[print], parameter[constant[</table>]]] | keyword[def] identifier[gallery] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[apps] . identifier[base] keyword[import] identifier[iglob]
keyword[from] identifier[jcvi] . identifier[utils] . identifier[iter] keyword[import] identifier[grouper]
identifier[p] = identifier[OptionParser] ( identifier[gallery] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[int] , identifier[type] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[int] , identifier[type] = literal[string] ,
identifier[help] = literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[folder] , identifier[link_prefix] = identifier[args]
identifier[width] = identifier[opts] . identifier[width]
identifier[images] = identifier[iglob] ( identifier[folder] , literal[string] )
identifier[td] = literal[string]
identifier[print] ( literal[string] )
keyword[for] identifier[ims] keyword[in] identifier[grouper] ( identifier[images] , identifier[opts] . identifier[columns] ):
identifier[print] ( literal[string] . identifier[format] ( identifier[width] + literal[int] ))
keyword[for] identifier[im] keyword[in] identifier[ims] :
keyword[if] keyword[not] identifier[im] :
keyword[continue]
identifier[im] = identifier[op] . identifier[basename] ( identifier[im] )
identifier[pf] = identifier[im] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[replace] ( literal[string] , literal[string] )
identifier[link] = identifier[link_prefix] . identifier[rstrip] ( literal[string] )+ literal[string] + identifier[im]
identifier[print] ( identifier[td] . identifier[format] ( identifier[pf] , identifier[link] , identifier[width] ))
identifier[print] ( literal[string] )
identifier[print] ( literal[string] ) | def gallery(args):
"""
%prog gallery folder link_prefix
Convert a folder of figures to a HTML table. For example:
$ python -m jcvi.formats.html gallery Paper-figures/
https://dl.dropboxusercontent.com/u/15937715/Data/Paper-figures/
Maps the images from local to remote.
"""
from jcvi.apps.base import iglob
from jcvi.utils.iter import grouper
p = OptionParser(gallery.__doc__)
p.add_option('--columns', default=3, type='int', help='How many cells per row')
p.add_option('--width', default=200, type='int', help='Image width')
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(folder, link_prefix) = args
width = opts.width
images = iglob(folder, '*.jpg,*.JPG,*.png')
td = '<td>{0}<br><a href="{1}"><img src="{1}" width="{2}"></a></td>'
print('<table>')
for ims in grouper(images, opts.columns):
print('<tr height="{0}" valign="top">'.format(width + 5))
for im in ims:
if not im:
continue # depends on [control=['if'], data=[]]
im = op.basename(im)
pf = im.split('.')[0].replace('_', '-')
link = link_prefix.rstrip('/') + '/' + im
print(td.format(pf, link, width)) # depends on [control=['for'], data=['im']]
print('</tr>') # depends on [control=['for'], data=['ims']]
print('</table>') |
async def get_player(self) -> Player:
"""Get information about the users current playback.
Returns
-------
player : Player
A player object representing the current playback.
"""
self._player = player = Player(self.__client, self, await self.http.current_player())
return player | <ast.AsyncFunctionDef object at 0x7da20e956680> | keyword[async] keyword[def] identifier[get_player] ( identifier[self] )-> identifier[Player] :
literal[string]
identifier[self] . identifier[_player] = identifier[player] = identifier[Player] ( identifier[self] . identifier[__client] , identifier[self] , keyword[await] identifier[self] . identifier[http] . identifier[current_player] ())
keyword[return] identifier[player] | async def get_player(self) -> Player:
"""Get information about the users current playback.
Returns
-------
player : Player
A player object representing the current playback.
"""
self._player = player = Player(self.__client, self, await self.http.current_player())
return player |
def get_best_electronegativity_anonymous_mapping(self, struct1, struct2):
"""
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. E.g., to compare if the Li2O and Na2O
structures are similar. If multiple substitutions are within tolerance
this will return the one which minimizes the difference in
electronegativity between the matches species.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
min_mapping (Dict): Mapping of struct1 species to struct2 species
"""
struct1, struct2 = self._process_species([struct1, struct2])
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell,
use_rms=True, break_on_match=True)
if matches:
min_X_diff = np.inf
for m in matches:
X_diff = 0
for k, v in m[0].items():
X_diff += struct1.composition[k] * (k.X - v.X) ** 2
if X_diff < min_X_diff:
min_X_diff = X_diff
best = m[0]
return best | def function[get_best_electronegativity_anonymous_mapping, parameter[self, struct1, struct2]]:
constant[
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. E.g., to compare if the Li2O and Na2O
structures are similar. If multiple substitutions are within tolerance
this will return the one which minimizes the difference in
electronegativity between the matches species.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
min_mapping (Dict): Mapping of struct1 species to struct2 species
]
<ast.Tuple object at 0x7da18dc07f40> assign[=] call[name[self]._process_species, parameter[list[[<ast.Name object at 0x7da18dc07550>, <ast.Name object at 0x7da18dc05bd0>]]]]
<ast.Tuple object at 0x7da18dc04370> assign[=] call[name[self]._preprocess, parameter[name[struct1], name[struct2]]]
variable[matches] assign[=] call[name[self]._anonymous_match, parameter[name[struct1], name[struct2], name[fu], name[s1_supercell]]]
if name[matches] begin[:]
variable[min_X_diff] assign[=] name[np].inf
for taget[name[m]] in starred[name[matches]] begin[:]
variable[X_diff] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b1c58700>, <ast.Name object at 0x7da1b1c59750>]]] in starred[call[call[name[m]][constant[0]].items, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b1c58850>
if compare[name[X_diff] less[<] name[min_X_diff]] begin[:]
variable[min_X_diff] assign[=] name[X_diff]
variable[best] assign[=] call[name[m]][constant[0]]
return[name[best]] | keyword[def] identifier[get_best_electronegativity_anonymous_mapping] ( identifier[self] , identifier[struct1] , identifier[struct2] ):
literal[string]
identifier[struct1] , identifier[struct2] = identifier[self] . identifier[_process_species] ([ identifier[struct1] , identifier[struct2] ])
identifier[struct1] , identifier[struct2] , identifier[fu] , identifier[s1_supercell] = identifier[self] . identifier[_preprocess] ( identifier[struct1] , identifier[struct2] )
identifier[matches] = identifier[self] . identifier[_anonymous_match] ( identifier[struct1] , identifier[struct2] , identifier[fu] , identifier[s1_supercell] ,
identifier[use_rms] = keyword[True] , identifier[break_on_match] = keyword[True] )
keyword[if] identifier[matches] :
identifier[min_X_diff] = identifier[np] . identifier[inf]
keyword[for] identifier[m] keyword[in] identifier[matches] :
identifier[X_diff] = literal[int]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[m] [ literal[int] ]. identifier[items] ():
identifier[X_diff] += identifier[struct1] . identifier[composition] [ identifier[k] ]*( identifier[k] . identifier[X] - identifier[v] . identifier[X] )** literal[int]
keyword[if] identifier[X_diff] < identifier[min_X_diff] :
identifier[min_X_diff] = identifier[X_diff]
identifier[best] = identifier[m] [ literal[int] ]
keyword[return] identifier[best] | def get_best_electronegativity_anonymous_mapping(self, struct1, struct2):
"""
Performs an anonymous fitting, which allows distinct species in one
structure to map to another. E.g., to compare if the Li2O and Na2O
structures are similar. If multiple substitutions are within tolerance
this will return the one which minimizes the difference in
electronegativity between the matches species.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
min_mapping (Dict): Mapping of struct1 species to struct2 species
"""
(struct1, struct2) = self._process_species([struct1, struct2])
(struct1, struct2, fu, s1_supercell) = self._preprocess(struct1, struct2)
matches = self._anonymous_match(struct1, struct2, fu, s1_supercell, use_rms=True, break_on_match=True)
if matches:
min_X_diff = np.inf
for m in matches:
X_diff = 0
for (k, v) in m[0].items():
X_diff += struct1.composition[k] * (k.X - v.X) ** 2 # depends on [control=['for'], data=[]]
if X_diff < min_X_diff:
min_X_diff = X_diff
best = m[0] # depends on [control=['if'], data=['X_diff', 'min_X_diff']] # depends on [control=['for'], data=['m']]
return best # depends on [control=['if'], data=[]] |
def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None, tags=None,
region=None, key=None, keyid=None, profile=None, return_objs=False):
'''
Given image properties, find and return matching AMI ids
CLI Examples:
.. code-block:: bash
salt myminion boto_ec2.find_images tags='{"mytag": "value"}'
'''
retries = 30
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
while retries:
try:
filter_parameters = {'filters': {}}
if image_ids:
filter_parameters['image_ids'] = [image_ids]
if executable_by:
filter_parameters['executable_by'] = [executable_by]
if owners:
filter_parameters['owners'] = [owners]
if ami_name:
filter_parameters['filters']['name'] = ami_name
if tags:
for tag_name, tag_value in six.iteritems(tags):
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
images = conn.get_all_images(**filter_parameters)
log.debug('The filters criteria %s matched the following '
'images:%s', filter_parameters, images)
if images:
if return_objs:
return images
return [image.id for image in images]
else:
return False
except boto.exception.BotoServerError as exc:
if exc.error_code == 'Throttling':
log.debug("Throttled by AWS API, will retry in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error('Failed to convert AMI name `%s` to an AMI ID: %s', ami_name, exc)
return False
return False | def function[find_images, parameter[ami_name, executable_by, owners, image_ids, tags, region, key, keyid, profile, return_objs]]:
constant[
Given image properties, find and return matching AMI ids
CLI Examples:
.. code-block:: bash
salt myminion boto_ec2.find_images tags='{"mytag": "value"}'
]
variable[retries] assign[=] constant[30]
variable[conn] assign[=] call[name[_get_conn], parameter[]]
while name[retries] begin[:]
<ast.Try object at 0x7da207f9b550>
return[constant[False]] | keyword[def] identifier[find_images] ( identifier[ami_name] = keyword[None] , identifier[executable_by] = keyword[None] , identifier[owners] = keyword[None] , identifier[image_ids] = keyword[None] , identifier[tags] = keyword[None] ,
identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] , identifier[return_objs] = keyword[False] ):
literal[string]
identifier[retries] = literal[int]
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[while] identifier[retries] :
keyword[try] :
identifier[filter_parameters] ={ literal[string] :{}}
keyword[if] identifier[image_ids] :
identifier[filter_parameters] [ literal[string] ]=[ identifier[image_ids] ]
keyword[if] identifier[executable_by] :
identifier[filter_parameters] [ literal[string] ]=[ identifier[executable_by] ]
keyword[if] identifier[owners] :
identifier[filter_parameters] [ literal[string] ]=[ identifier[owners] ]
keyword[if] identifier[ami_name] :
identifier[filter_parameters] [ literal[string] ][ literal[string] ]= identifier[ami_name]
keyword[if] identifier[tags] :
keyword[for] identifier[tag_name] , identifier[tag_value] keyword[in] identifier[six] . identifier[iteritems] ( identifier[tags] ):
identifier[filter_parameters] [ literal[string] ][ literal[string] . identifier[format] ( identifier[tag_name] )]= identifier[tag_value]
identifier[images] = identifier[conn] . identifier[get_all_images] (** identifier[filter_parameters] )
identifier[log] . identifier[debug] ( literal[string]
literal[string] , identifier[filter_parameters] , identifier[images] )
keyword[if] identifier[images] :
keyword[if] identifier[return_objs] :
keyword[return] identifier[images]
keyword[return] [ identifier[image] . identifier[id] keyword[for] identifier[image] keyword[in] identifier[images] ]
keyword[else] :
keyword[return] keyword[False]
keyword[except] identifier[boto] . identifier[exception] . identifier[BotoServerError] keyword[as] identifier[exc] :
keyword[if] identifier[exc] . identifier[error_code] == literal[string] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[time] . identifier[sleep] ( literal[int] )
identifier[retries] -= literal[int]
keyword[continue]
identifier[log] . identifier[error] ( literal[string] , identifier[ami_name] , identifier[exc] )
keyword[return] keyword[False]
keyword[return] keyword[False] | def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None, tags=None, region=None, key=None, keyid=None, profile=None, return_objs=False):
"""
Given image properties, find and return matching AMI ids
CLI Examples:
.. code-block:: bash
salt myminion boto_ec2.find_images tags='{"mytag": "value"}'
"""
retries = 30
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
while retries:
try:
filter_parameters = {'filters': {}}
if image_ids:
filter_parameters['image_ids'] = [image_ids] # depends on [control=['if'], data=[]]
if executable_by:
filter_parameters['executable_by'] = [executable_by] # depends on [control=['if'], data=[]]
if owners:
filter_parameters['owners'] = [owners] # depends on [control=['if'], data=[]]
if ami_name:
filter_parameters['filters']['name'] = ami_name # depends on [control=['if'], data=[]]
if tags:
for (tag_name, tag_value) in six.iteritems(tags):
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
images = conn.get_all_images(**filter_parameters)
log.debug('The filters criteria %s matched the following images:%s', filter_parameters, images)
if images:
if return_objs:
return images # depends on [control=['if'], data=[]]
return [image.id for image in images] # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['try'], data=[]]
except boto.exception.BotoServerError as exc:
if exc.error_code == 'Throttling':
log.debug('Throttled by AWS API, will retry in 5 seconds...')
time.sleep(5)
retries -= 1
continue # depends on [control=['if'], data=[]]
log.error('Failed to convert AMI name `%s` to an AMI ID: %s', ami_name, exc)
return False # depends on [control=['except'], data=['exc']] # depends on [control=['while'], data=[]]
return False |
def _one_to_many_query(cls, query_obj, search4, model_attrib):
"""extends and returns a SQLAlchemy query object to allow one-to-many queries
:param query_obj: SQL Alchemy query object
:param str search4: search string
:param model_attrib: attribute in model
"""
model = model_attrib.parent.class_
already_joined_tables = [mapper.class_ for mapper in query_obj._join_entities]
if isinstance(search4, (str, int, Iterable)) and model not in already_joined_tables:
query_obj = query_obj.join(model)
if isinstance(search4, str):
query_obj = query_obj.filter(model_attrib.like(search4))
elif isinstance(search4, int):
query_obj = query_obj.filter(model_attrib == search4)
elif isinstance(search4, Iterable):
query_obj = query_obj.filter(model_attrib.in_(search4))
return query_obj | def function[_one_to_many_query, parameter[cls, query_obj, search4, model_attrib]]:
constant[extends and returns a SQLAlchemy query object to allow one-to-many queries
:param query_obj: SQL Alchemy query object
:param str search4: search string
:param model_attrib: attribute in model
]
variable[model] assign[=] name[model_attrib].parent.class_
variable[already_joined_tables] assign[=] <ast.ListComp object at 0x7da1b1da7730>
if <ast.BoolOp object at 0x7da1b1da60e0> begin[:]
variable[query_obj] assign[=] call[name[query_obj].join, parameter[name[model]]]
if call[name[isinstance], parameter[name[search4], name[str]]] begin[:]
variable[query_obj] assign[=] call[name[query_obj].filter, parameter[call[name[model_attrib].like, parameter[name[search4]]]]]
return[name[query_obj]] | keyword[def] identifier[_one_to_many_query] ( identifier[cls] , identifier[query_obj] , identifier[search4] , identifier[model_attrib] ):
literal[string]
identifier[model] = identifier[model_attrib] . identifier[parent] . identifier[class_]
identifier[already_joined_tables] =[ identifier[mapper] . identifier[class_] keyword[for] identifier[mapper] keyword[in] identifier[query_obj] . identifier[_join_entities] ]
keyword[if] identifier[isinstance] ( identifier[search4] ,( identifier[str] , identifier[int] , identifier[Iterable] )) keyword[and] identifier[model] keyword[not] keyword[in] identifier[already_joined_tables] :
identifier[query_obj] = identifier[query_obj] . identifier[join] ( identifier[model] )
keyword[if] identifier[isinstance] ( identifier[search4] , identifier[str] ):
identifier[query_obj] = identifier[query_obj] . identifier[filter] ( identifier[model_attrib] . identifier[like] ( identifier[search4] ))
keyword[elif] identifier[isinstance] ( identifier[search4] , identifier[int] ):
identifier[query_obj] = identifier[query_obj] . identifier[filter] ( identifier[model_attrib] == identifier[search4] )
keyword[elif] identifier[isinstance] ( identifier[search4] , identifier[Iterable] ):
identifier[query_obj] = identifier[query_obj] . identifier[filter] ( identifier[model_attrib] . identifier[in_] ( identifier[search4] ))
keyword[return] identifier[query_obj] | def _one_to_many_query(cls, query_obj, search4, model_attrib):
"""extends and returns a SQLAlchemy query object to allow one-to-many queries
:param query_obj: SQL Alchemy query object
:param str search4: search string
:param model_attrib: attribute in model
"""
model = model_attrib.parent.class_
already_joined_tables = [mapper.class_ for mapper in query_obj._join_entities]
if isinstance(search4, (str, int, Iterable)) and model not in already_joined_tables:
query_obj = query_obj.join(model) # depends on [control=['if'], data=[]]
if isinstance(search4, str):
query_obj = query_obj.filter(model_attrib.like(search4)) # depends on [control=['if'], data=[]]
elif isinstance(search4, int):
query_obj = query_obj.filter(model_attrib == search4) # depends on [control=['if'], data=[]]
elif isinstance(search4, Iterable):
query_obj = query_obj.filter(model_attrib.in_(search4)) # depends on [control=['if'], data=[]]
return query_obj |
def security_code_date(self):
""" Date of user's security code update """
return sa.Column(
sa.TIMESTAMP(timezone=False),
default=datetime(2000, 1, 1),
server_default="2000-01-01 01:01",
) | def function[security_code_date, parameter[self]]:
constant[ Date of user's security code update ]
return[call[name[sa].Column, parameter[call[name[sa].TIMESTAMP, parameter[]]]]] | keyword[def] identifier[security_code_date] ( identifier[self] ):
literal[string]
keyword[return] identifier[sa] . identifier[Column] (
identifier[sa] . identifier[TIMESTAMP] ( identifier[timezone] = keyword[False] ),
identifier[default] = identifier[datetime] ( literal[int] , literal[int] , literal[int] ),
identifier[server_default] = literal[string] ,
) | def security_code_date(self):
""" Date of user's security code update """
return sa.Column(sa.TIMESTAMP(timezone=False), default=datetime(2000, 1, 1), server_default='2000-01-01 01:01') |
def times(self, factor):
"""Return a new set with each element's multiplicity multiplied with the given scalar factor.
>>> ms = Multiset('aab')
>>> sorted(ms.times(2))
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*`` operator for the same effect:
>>> sorted(ms * 3)
['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`times_update`.
Args:
factor: The factor to multiply each multiplicity with.
"""
if factor == 0:
return self.__class__()
if factor < 0:
raise ValueError('The factor must no be negative.')
result = self.__copy__()
_elements = result._elements
for element in _elements:
_elements[element] *= factor
result._total *= factor
return result | def function[times, parameter[self, factor]]:
constant[Return a new set with each element's multiplicity multiplied with the given scalar factor.
>>> ms = Multiset('aab')
>>> sorted(ms.times(2))
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*`` operator for the same effect:
>>> sorted(ms * 3)
['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`times_update`.
Args:
factor: The factor to multiply each multiplicity with.
]
if compare[name[factor] equal[==] constant[0]] begin[:]
return[call[name[self].__class__, parameter[]]]
if compare[name[factor] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da1b25d3ac0>
variable[result] assign[=] call[name[self].__copy__, parameter[]]
variable[_elements] assign[=] name[result]._elements
for taget[name[element]] in starred[name[_elements]] begin[:]
<ast.AugAssign object at 0x7da1b25d1bd0>
<ast.AugAssign object at 0x7da1b25d2350>
return[name[result]] | keyword[def] identifier[times] ( identifier[self] , identifier[factor] ):
literal[string]
keyword[if] identifier[factor] == literal[int] :
keyword[return] identifier[self] . identifier[__class__] ()
keyword[if] identifier[factor] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[result] = identifier[self] . identifier[__copy__] ()
identifier[_elements] = identifier[result] . identifier[_elements]
keyword[for] identifier[element] keyword[in] identifier[_elements] :
identifier[_elements] [ identifier[element] ]*= identifier[factor]
identifier[result] . identifier[_total] *= identifier[factor]
keyword[return] identifier[result] | def times(self, factor):
"""Return a new set with each element's multiplicity multiplied with the given scalar factor.
>>> ms = Multiset('aab')
>>> sorted(ms.times(2))
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*`` operator for the same effect:
>>> sorted(ms * 3)
['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`times_update`.
Args:
factor: The factor to multiply each multiplicity with.
"""
if factor == 0:
return self.__class__() # depends on [control=['if'], data=[]]
if factor < 0:
raise ValueError('The factor must no be negative.') # depends on [control=['if'], data=[]]
result = self.__copy__()
_elements = result._elements
for element in _elements:
_elements[element] *= factor # depends on [control=['for'], data=['element']]
result._total *= factor
return result |
def getoutput(cmd):
"""Return standard output of executing cmd in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
stdout : str
"""
out = process_handler(cmd, lambda p: p.communicate()[0], subprocess.STDOUT)
if out is None:
return ''
return py3compat.bytes_to_str(out) | def function[getoutput, parameter[cmd]]:
constant[Return standard output of executing cmd in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
stdout : str
]
variable[out] assign[=] call[name[process_handler], parameter[name[cmd], <ast.Lambda object at 0x7da2041d96c0>, name[subprocess].STDOUT]]
if compare[name[out] is constant[None]] begin[:]
return[constant[]]
return[call[name[py3compat].bytes_to_str, parameter[name[out]]]] | keyword[def] identifier[getoutput] ( identifier[cmd] ):
literal[string]
identifier[out] = identifier[process_handler] ( identifier[cmd] , keyword[lambda] identifier[p] : identifier[p] . identifier[communicate] ()[ literal[int] ], identifier[subprocess] . identifier[STDOUT] )
keyword[if] identifier[out] keyword[is] keyword[None] :
keyword[return] literal[string]
keyword[return] identifier[py3compat] . identifier[bytes_to_str] ( identifier[out] ) | def getoutput(cmd):
"""Return standard output of executing cmd in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
stdout : str
"""
out = process_handler(cmd, lambda p: p.communicate()[0], subprocess.STDOUT)
if out is None:
return '' # depends on [control=['if'], data=[]]
return py3compat.bytes_to_str(out) |
def update(self, move):
"""
Updates position by applying selected move
:type: move: Move
"""
if move is None:
raise TypeError("Move cannot be type None")
if self.king_loc_dict is not None and isinstance(move.piece, King):
self.king_loc_dict[move.color] = move.end_loc
# Invalidates en-passant
for square in self:
pawn = square
if isinstance(pawn, Pawn):
pawn.just_moved_two_steps = False
# Sets King and Rook has_moved property to True is piece has moved
if type(move.piece) is King or type(move.piece) is Rook:
move.piece.has_moved = True
elif move.status == notation_const.MOVEMENT and \
isinstance(move.piece, Pawn) and \
fabs(move.end_loc.rank - move.start_loc.rank) == 2:
move.piece.just_moved_two_steps = True
if move.status == notation_const.KING_SIDE_CASTLE:
self.move_piece(Location(move.end_loc.rank, 7), Location(move.end_loc.rank, 5))
self.piece_at_square(Location(move.end_loc.rank, 5)).has_moved = True
elif move.status == notation_const.QUEEN_SIDE_CASTLE:
self.move_piece(Location(move.end_loc.rank, 0), Location(move.end_loc.rank, 3))
self.piece_at_square(Location(move.end_loc.rank, 3)).has_moved = True
elif move.status == notation_const.EN_PASSANT:
self.remove_piece_at_square(Location(move.start_loc.rank, move.end_loc.file))
elif move.status == notation_const.PROMOTE or \
move.status == notation_const.CAPTURE_AND_PROMOTE:
try:
self.remove_piece_at_square(move.start_loc)
self.place_piece_at_square(move.promoted_to_piece(move.color, move.end_loc), move.end_loc)
except TypeError as e:
raise ValueError("Promoted to piece cannot be None in Move {}\n{}".format(repr(move), e))
return
self.move_piece(move.piece.location, move.end_loc) | def function[update, parameter[self, move]]:
constant[
Updates position by applying selected move
:type: move: Move
]
if compare[name[move] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c795c00>
if <ast.BoolOp object at 0x7da20c795060> begin[:]
call[name[self].king_loc_dict][name[move].color] assign[=] name[move].end_loc
for taget[name[square]] in starred[name[self]] begin[:]
variable[pawn] assign[=] name[square]
if call[name[isinstance], parameter[name[pawn], name[Pawn]]] begin[:]
name[pawn].just_moved_two_steps assign[=] constant[False]
if <ast.BoolOp object at 0x7da20c795360> begin[:]
name[move].piece.has_moved assign[=] constant[True]
if compare[name[move].status equal[==] name[notation_const].KING_SIDE_CASTLE] begin[:]
call[name[self].move_piece, parameter[call[name[Location], parameter[name[move].end_loc.rank, constant[7]]], call[name[Location], parameter[name[move].end_loc.rank, constant[5]]]]]
call[name[self].piece_at_square, parameter[call[name[Location], parameter[name[move].end_loc.rank, constant[5]]]]].has_moved assign[=] constant[True]
call[name[self].move_piece, parameter[name[move].piece.location, name[move].end_loc]] | keyword[def] identifier[update] ( identifier[self] , identifier[move] ):
literal[string]
keyword[if] identifier[move] keyword[is] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[self] . identifier[king_loc_dict] keyword[is] keyword[not] keyword[None] keyword[and] identifier[isinstance] ( identifier[move] . identifier[piece] , identifier[King] ):
identifier[self] . identifier[king_loc_dict] [ identifier[move] . identifier[color] ]= identifier[move] . identifier[end_loc]
keyword[for] identifier[square] keyword[in] identifier[self] :
identifier[pawn] = identifier[square]
keyword[if] identifier[isinstance] ( identifier[pawn] , identifier[Pawn] ):
identifier[pawn] . identifier[just_moved_two_steps] = keyword[False]
keyword[if] identifier[type] ( identifier[move] . identifier[piece] ) keyword[is] identifier[King] keyword[or] identifier[type] ( identifier[move] . identifier[piece] ) keyword[is] identifier[Rook] :
identifier[move] . identifier[piece] . identifier[has_moved] = keyword[True]
keyword[elif] identifier[move] . identifier[status] == identifier[notation_const] . identifier[MOVEMENT] keyword[and] identifier[isinstance] ( identifier[move] . identifier[piece] , identifier[Pawn] ) keyword[and] identifier[fabs] ( identifier[move] . identifier[end_loc] . identifier[rank] - identifier[move] . identifier[start_loc] . identifier[rank] )== literal[int] :
identifier[move] . identifier[piece] . identifier[just_moved_two_steps] = keyword[True]
keyword[if] identifier[move] . identifier[status] == identifier[notation_const] . identifier[KING_SIDE_CASTLE] :
identifier[self] . identifier[move_piece] ( identifier[Location] ( identifier[move] . identifier[end_loc] . identifier[rank] , literal[int] ), identifier[Location] ( identifier[move] . identifier[end_loc] . identifier[rank] , literal[int] ))
identifier[self] . identifier[piece_at_square] ( identifier[Location] ( identifier[move] . identifier[end_loc] . identifier[rank] , literal[int] )). identifier[has_moved] = keyword[True]
keyword[elif] identifier[move] . identifier[status] == identifier[notation_const] . identifier[QUEEN_SIDE_CASTLE] :
identifier[self] . identifier[move_piece] ( identifier[Location] ( identifier[move] . identifier[end_loc] . identifier[rank] , literal[int] ), identifier[Location] ( identifier[move] . identifier[end_loc] . identifier[rank] , literal[int] ))
identifier[self] . identifier[piece_at_square] ( identifier[Location] ( identifier[move] . identifier[end_loc] . identifier[rank] , literal[int] )). identifier[has_moved] = keyword[True]
keyword[elif] identifier[move] . identifier[status] == identifier[notation_const] . identifier[EN_PASSANT] :
identifier[self] . identifier[remove_piece_at_square] ( identifier[Location] ( identifier[move] . identifier[start_loc] . identifier[rank] , identifier[move] . identifier[end_loc] . identifier[file] ))
keyword[elif] identifier[move] . identifier[status] == identifier[notation_const] . identifier[PROMOTE] keyword[or] identifier[move] . identifier[status] == identifier[notation_const] . identifier[CAPTURE_AND_PROMOTE] :
keyword[try] :
identifier[self] . identifier[remove_piece_at_square] ( identifier[move] . identifier[start_loc] )
identifier[self] . identifier[place_piece_at_square] ( identifier[move] . identifier[promoted_to_piece] ( identifier[move] . identifier[color] , identifier[move] . identifier[end_loc] ), identifier[move] . identifier[end_loc] )
keyword[except] identifier[TypeError] keyword[as] identifier[e] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[move] ), identifier[e] ))
keyword[return]
identifier[self] . identifier[move_piece] ( identifier[move] . identifier[piece] . identifier[location] , identifier[move] . identifier[end_loc] ) | def update(self, move):
"""
Updates position by applying selected move
:type: move: Move
"""
if move is None:
raise TypeError('Move cannot be type None') # depends on [control=['if'], data=[]]
if self.king_loc_dict is not None and isinstance(move.piece, King):
self.king_loc_dict[move.color] = move.end_loc # depends on [control=['if'], data=[]]
# Invalidates en-passant
for square in self:
pawn = square
if isinstance(pawn, Pawn):
pawn.just_moved_two_steps = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['square']]
# Sets King and Rook has_moved property to True is piece has moved
if type(move.piece) is King or type(move.piece) is Rook:
move.piece.has_moved = True # depends on [control=['if'], data=[]]
elif move.status == notation_const.MOVEMENT and isinstance(move.piece, Pawn) and (fabs(move.end_loc.rank - move.start_loc.rank) == 2):
move.piece.just_moved_two_steps = True # depends on [control=['if'], data=[]]
if move.status == notation_const.KING_SIDE_CASTLE:
self.move_piece(Location(move.end_loc.rank, 7), Location(move.end_loc.rank, 5))
self.piece_at_square(Location(move.end_loc.rank, 5)).has_moved = True # depends on [control=['if'], data=[]]
elif move.status == notation_const.QUEEN_SIDE_CASTLE:
self.move_piece(Location(move.end_loc.rank, 0), Location(move.end_loc.rank, 3))
self.piece_at_square(Location(move.end_loc.rank, 3)).has_moved = True # depends on [control=['if'], data=[]]
elif move.status == notation_const.EN_PASSANT:
self.remove_piece_at_square(Location(move.start_loc.rank, move.end_loc.file)) # depends on [control=['if'], data=[]]
elif move.status == notation_const.PROMOTE or move.status == notation_const.CAPTURE_AND_PROMOTE:
try:
self.remove_piece_at_square(move.start_loc)
self.place_piece_at_square(move.promoted_to_piece(move.color, move.end_loc), move.end_loc) # depends on [control=['try'], data=[]]
except TypeError as e:
raise ValueError('Promoted to piece cannot be None in Move {}\n{}'.format(repr(move), e)) # depends on [control=['except'], data=['e']]
return # depends on [control=['if'], data=[]]
self.move_piece(move.piece.location, move.end_loc) |
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn | def function[get_connection, parameter[self, url, proxies]]:
constant[Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
]
variable[proxy] assign[=] call[name[select_proxy], parameter[name[url], name[proxies]]]
if name[proxy] begin[:]
variable[proxy] assign[=] call[name[prepend_scheme_if_needed], parameter[name[proxy], constant[http]]]
variable[proxy_manager] assign[=] call[name[self].proxy_manager_for, parameter[name[proxy]]]
variable[conn] assign[=] call[name[proxy_manager].connection_from_url, parameter[name[url]]]
return[name[conn]] | keyword[def] identifier[get_connection] ( identifier[self] , identifier[url] , identifier[proxies] = keyword[None] ):
literal[string]
identifier[proxy] = identifier[select_proxy] ( identifier[url] , identifier[proxies] )
keyword[if] identifier[proxy] :
identifier[proxy] = identifier[prepend_scheme_if_needed] ( identifier[proxy] , literal[string] )
identifier[proxy_manager] = identifier[self] . identifier[proxy_manager_for] ( identifier[proxy] )
identifier[conn] = identifier[proxy_manager] . identifier[connection_from_url] ( identifier[url] )
keyword[else] :
identifier[parsed] = identifier[urlparse] ( identifier[url] )
identifier[url] = identifier[parsed] . identifier[geturl] ()
identifier[conn] = identifier[self] . identifier[poolmanager] . identifier[connection_from_url] ( identifier[url] )
keyword[return] identifier[conn] | def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url) # depends on [control=['if'], data=[]]
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn |
def density(pressure, temperature, mixing, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate density.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.67.
Parameters
----------
temperature: `pint.Quantity`
The temperature
pressure: `pint.Quantity`
Total atmospheric pressure
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding density of the parcel
Notes
-----
.. math:: \rho = \frac{p}{R_dT_v}
"""
virttemp = virtual_temperature(temperature, mixing, molecular_weight_ratio)
return (pressure / (mpconsts.Rd * virttemp)).to(units.kilogram / units.meter ** 3) | def function[density, parameter[pressure, temperature, mixing, molecular_weight_ratio]]:
constant[Calculate density.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.67.
Parameters
----------
temperature: `pint.Quantity`
The temperature
pressure: `pint.Quantity`
Total atmospheric pressure
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding density of the parcel
Notes
-----
.. math:: \rho = \frac{p}{R_dT_v}
]
variable[virttemp] assign[=] call[name[virtual_temperature], parameter[name[temperature], name[mixing], name[molecular_weight_ratio]]]
return[call[binary_operation[name[pressure] / binary_operation[name[mpconsts].Rd * name[virttemp]]].to, parameter[binary_operation[name[units].kilogram / binary_operation[name[units].meter ** constant[3]]]]]] | keyword[def] identifier[density] ( identifier[pressure] , identifier[temperature] , identifier[mixing] , identifier[molecular_weight_ratio] = identifier[mpconsts] . identifier[epsilon] ):
literal[string]
identifier[virttemp] = identifier[virtual_temperature] ( identifier[temperature] , identifier[mixing] , identifier[molecular_weight_ratio] )
keyword[return] ( identifier[pressure] /( identifier[mpconsts] . identifier[Rd] * identifier[virttemp] )). identifier[to] ( identifier[units] . identifier[kilogram] / identifier[units] . identifier[meter] ** literal[int] ) | def density(pressure, temperature, mixing, molecular_weight_ratio=mpconsts.epsilon):
"""Calculate density.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.67.
Parameters
----------
temperature: `pint.Quantity`
The temperature
pressure: `pint.Quantity`
Total atmospheric pressure
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\\epsilon\\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding density of the parcel
Notes
-----
.. math:: \\rho = \\frac{p}{R_dT_v}
"""
virttemp = virtual_temperature(temperature, mixing, molecular_weight_ratio)
return (pressure / (mpconsts.Rd * virttemp)).to(units.kilogram / units.meter ** 3) |
def get_project(id=None, name=None):
"""
Get a specific Project by ID or name
"""
content = get_project_raw(id, name)
if content:
return utils.format_json(content) | def function[get_project, parameter[id, name]]:
constant[
Get a specific Project by ID or name
]
variable[content] assign[=] call[name[get_project_raw], parameter[name[id], name[name]]]
if name[content] begin[:]
return[call[name[utils].format_json, parameter[name[content]]]] | keyword[def] identifier[get_project] ( identifier[id] = keyword[None] , identifier[name] = keyword[None] ):
literal[string]
identifier[content] = identifier[get_project_raw] ( identifier[id] , identifier[name] )
keyword[if] identifier[content] :
keyword[return] identifier[utils] . identifier[format_json] ( identifier[content] ) | def get_project(id=None, name=None):
"""
Get a specific Project by ID or name
"""
content = get_project_raw(id, name)
if content:
return utils.format_json(content) # depends on [control=['if'], data=[]] |
def get_robust_background_threshold(image,
mask = None,
lower_outlier_fraction = 0.05,
upper_outlier_fraction = 0.05,
deviations_above_average = 2.0,
average_fn = np.mean,
variance_fn = np.std):
"""Calculate threshold based on mean & standard deviation
The threshold is calculated by trimming the top and bottom 5% of
pixels off the image, then calculating the mean and standard deviation
of the remaining image. The threshold is then set at 2 (empirical
value) standard deviations above the mean.
image - the image to threshold
mask - mask of pixels to consider (default = all pixels)
lower_outlier_fraction - after ordering the pixels by intensity, remove
the pixels from 0 to len(image) * lower_outlier_fraction from
the threshold calculation (default = .05).
upper_outlier_fraction - remove the pixels from
len(image) * (1 - upper_outlier_fraction) to len(image) from
consideration (default = .05).
deviations_above_average - calculate the standard deviation or MAD and
multiply by this number and add to the average to get the final
threshold (default = 2)
average_fn - function used to calculate the average intensity (e.g.
np.mean, np.median or some sort of mode function). Default = np.mean
variance_fn - function used to calculate the amount of variance.
Default = np.sd
"""
cropped_image = np.array(image.flat) if mask is None else image[mask]
n_pixels = np.product(cropped_image.shape)
if n_pixels<3:
return 0
cropped_image.sort()
if cropped_image[0] == cropped_image[-1]:
return cropped_image[0]
low_chop = int(round(n_pixels * lower_outlier_fraction))
hi_chop = n_pixels - int(round(n_pixels * upper_outlier_fraction))
im = cropped_image if low_chop == 0 else cropped_image[low_chop:hi_chop]
mean = average_fn(im)
sd = variance_fn(im)
return mean+sd*deviations_above_average | def function[get_robust_background_threshold, parameter[image, mask, lower_outlier_fraction, upper_outlier_fraction, deviations_above_average, average_fn, variance_fn]]:
constant[Calculate threshold based on mean & standard deviation
The threshold is calculated by trimming the top and bottom 5% of
pixels off the image, then calculating the mean and standard deviation
of the remaining image. The threshold is then set at 2 (empirical
value) standard deviations above the mean.
image - the image to threshold
mask - mask of pixels to consider (default = all pixels)
lower_outlier_fraction - after ordering the pixels by intensity, remove
the pixels from 0 to len(image) * lower_outlier_fraction from
the threshold calculation (default = .05).
upper_outlier_fraction - remove the pixels from
len(image) * (1 - upper_outlier_fraction) to len(image) from
consideration (default = .05).
deviations_above_average - calculate the standard deviation or MAD and
multiply by this number and add to the average to get the final
threshold (default = 2)
average_fn - function used to calculate the average intensity (e.g.
np.mean, np.median or some sort of mode function). Default = np.mean
variance_fn - function used to calculate the amount of variance.
Default = np.sd
]
variable[cropped_image] assign[=] <ast.IfExp object at 0x7da18c4ce440>
variable[n_pixels] assign[=] call[name[np].product, parameter[name[cropped_image].shape]]
if compare[name[n_pixels] less[<] constant[3]] begin[:]
return[constant[0]]
call[name[cropped_image].sort, parameter[]]
if compare[call[name[cropped_image]][constant[0]] equal[==] call[name[cropped_image]][<ast.UnaryOp object at 0x7da1b052b850>]] begin[:]
return[call[name[cropped_image]][constant[0]]]
variable[low_chop] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[name[n_pixels] * name[lower_outlier_fraction]]]]]]
variable[hi_chop] assign[=] binary_operation[name[n_pixels] - call[name[int], parameter[call[name[round], parameter[binary_operation[name[n_pixels] * name[upper_outlier_fraction]]]]]]]
variable[im] assign[=] <ast.IfExp object at 0x7da1b052b460>
variable[mean] assign[=] call[name[average_fn], parameter[name[im]]]
variable[sd] assign[=] call[name[variance_fn], parameter[name[im]]]
return[binary_operation[name[mean] + binary_operation[name[sd] * name[deviations_above_average]]]] | keyword[def] identifier[get_robust_background_threshold] ( identifier[image] ,
identifier[mask] = keyword[None] ,
identifier[lower_outlier_fraction] = literal[int] ,
identifier[upper_outlier_fraction] = literal[int] ,
identifier[deviations_above_average] = literal[int] ,
identifier[average_fn] = identifier[np] . identifier[mean] ,
identifier[variance_fn] = identifier[np] . identifier[std] ):
literal[string]
identifier[cropped_image] = identifier[np] . identifier[array] ( identifier[image] . identifier[flat] ) keyword[if] identifier[mask] keyword[is] keyword[None] keyword[else] identifier[image] [ identifier[mask] ]
identifier[n_pixels] = identifier[np] . identifier[product] ( identifier[cropped_image] . identifier[shape] )
keyword[if] identifier[n_pixels] < literal[int] :
keyword[return] literal[int]
identifier[cropped_image] . identifier[sort] ()
keyword[if] identifier[cropped_image] [ literal[int] ]== identifier[cropped_image] [- literal[int] ]:
keyword[return] identifier[cropped_image] [ literal[int] ]
identifier[low_chop] = identifier[int] ( identifier[round] ( identifier[n_pixels] * identifier[lower_outlier_fraction] ))
identifier[hi_chop] = identifier[n_pixels] - identifier[int] ( identifier[round] ( identifier[n_pixels] * identifier[upper_outlier_fraction] ))
identifier[im] = identifier[cropped_image] keyword[if] identifier[low_chop] == literal[int] keyword[else] identifier[cropped_image] [ identifier[low_chop] : identifier[hi_chop] ]
identifier[mean] = identifier[average_fn] ( identifier[im] )
identifier[sd] = identifier[variance_fn] ( identifier[im] )
keyword[return] identifier[mean] + identifier[sd] * identifier[deviations_above_average] | def get_robust_background_threshold(image, mask=None, lower_outlier_fraction=0.05, upper_outlier_fraction=0.05, deviations_above_average=2.0, average_fn=np.mean, variance_fn=np.std):
"""Calculate threshold based on mean & standard deviation
The threshold is calculated by trimming the top and bottom 5% of
pixels off the image, then calculating the mean and standard deviation
of the remaining image. The threshold is then set at 2 (empirical
value) standard deviations above the mean.
image - the image to threshold
mask - mask of pixels to consider (default = all pixels)
lower_outlier_fraction - after ordering the pixels by intensity, remove
the pixels from 0 to len(image) * lower_outlier_fraction from
the threshold calculation (default = .05).
upper_outlier_fraction - remove the pixels from
len(image) * (1 - upper_outlier_fraction) to len(image) from
consideration (default = .05).
deviations_above_average - calculate the standard deviation or MAD and
multiply by this number and add to the average to get the final
threshold (default = 2)
average_fn - function used to calculate the average intensity (e.g.
np.mean, np.median or some sort of mode function). Default = np.mean
variance_fn - function used to calculate the amount of variance.
Default = np.sd
"""
cropped_image = np.array(image.flat) if mask is None else image[mask]
n_pixels = np.product(cropped_image.shape)
if n_pixels < 3:
return 0 # depends on [control=['if'], data=[]]
cropped_image.sort()
if cropped_image[0] == cropped_image[-1]:
return cropped_image[0] # depends on [control=['if'], data=[]]
low_chop = int(round(n_pixels * lower_outlier_fraction))
hi_chop = n_pixels - int(round(n_pixels * upper_outlier_fraction))
im = cropped_image if low_chop == 0 else cropped_image[low_chop:hi_chop]
mean = average_fn(im)
sd = variance_fn(im)
return mean + sd * deviations_above_average |
def get_user_entitlements(self, top=None, skip=None, filter=None, sort_option=None):
"""GetUserEntitlements.
[Preview API] Get a paged set of user entitlements matching the filter criteria. If no filter is is passed, a page from all the account users is returned.
:param int top: Maximum number of the user entitlements to return. Max value is 10000. Default value is 100
:param int skip: Offset: Number of records to skip. Default value is 0
:param str filter: Comma (",") separated list of properties and their values to filter on. Currently, the API only supports filtering by ExtensionId. An example parameter would be filter=extensionId eq search.
:param str sort_option: PropertyName and Order (separated by a space ( )) to sort on (e.g. LastAccessDate Desc)
:rtype: :class:`<PagedGraphMemberList> <azure.devops.v5_0.member_entitlement_management.models.PagedGraphMemberList>`
"""
query_parameters = {}
if top is not None:
query_parameters['top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['skip'] = self._serialize.query('skip', skip, 'int')
if filter is not None:
query_parameters['filter'] = self._serialize.query('filter', filter, 'str')
if sort_option is not None:
query_parameters['sortOption'] = self._serialize.query('sort_option', sort_option, 'str')
response = self._send(http_method='GET',
location_id='387f832c-dbf2-4643-88e9-c1aa94dbb737',
version='5.0-preview.2',
query_parameters=query_parameters)
return self._deserialize('PagedGraphMemberList', response) | def function[get_user_entitlements, parameter[self, top, skip, filter, sort_option]]:
constant[GetUserEntitlements.
[Preview API] Get a paged set of user entitlements matching the filter criteria. If no filter is is passed, a page from all the account users is returned.
:param int top: Maximum number of the user entitlements to return. Max value is 10000. Default value is 100
:param int skip: Offset: Number of records to skip. Default value is 0
:param str filter: Comma (",") separated list of properties and their values to filter on. Currently, the API only supports filtering by ExtensionId. An example parameter would be filter=extensionId eq search.
:param str sort_option: PropertyName and Order (separated by a space ( )) to sort on (e.g. LastAccessDate Desc)
:rtype: :class:`<PagedGraphMemberList> <azure.devops.v5_0.member_entitlement_management.models.PagedGraphMemberList>`
]
variable[query_parameters] assign[=] dictionary[[], []]
if compare[name[top] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[top]] assign[=] call[name[self]._serialize.query, parameter[constant[top], name[top], constant[int]]]
if compare[name[skip] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[skip]] assign[=] call[name[self]._serialize.query, parameter[constant[skip], name[skip], constant[int]]]
if compare[name[filter] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[filter]] assign[=] call[name[self]._serialize.query, parameter[constant[filter], name[filter], constant[str]]]
if compare[name[sort_option] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[sortOption]] assign[=] call[name[self]._serialize.query, parameter[constant[sort_option], name[sort_option], constant[str]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[PagedGraphMemberList], name[response]]]] | keyword[def] identifier[get_user_entitlements] ( identifier[self] , identifier[top] = keyword[None] , identifier[skip] = keyword[None] , identifier[filter] = keyword[None] , identifier[sort_option] = keyword[None] ):
literal[string]
identifier[query_parameters] ={}
keyword[if] identifier[top] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[top] , literal[string] )
keyword[if] identifier[skip] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[skip] , literal[string] )
keyword[if] identifier[filter] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[filter] , literal[string] )
keyword[if] identifier[sort_option] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[sort_option] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[query_parameters] = identifier[query_parameters] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] ) | def get_user_entitlements(self, top=None, skip=None, filter=None, sort_option=None):
"""GetUserEntitlements.
[Preview API] Get a paged set of user entitlements matching the filter criteria. If no filter is is passed, a page from all the account users is returned.
:param int top: Maximum number of the user entitlements to return. Max value is 10000. Default value is 100
:param int skip: Offset: Number of records to skip. Default value is 0
:param str filter: Comma (",") separated list of properties and their values to filter on. Currently, the API only supports filtering by ExtensionId. An example parameter would be filter=extensionId eq search.
:param str sort_option: PropertyName and Order (separated by a space ( )) to sort on (e.g. LastAccessDate Desc)
:rtype: :class:`<PagedGraphMemberList> <azure.devops.v5_0.member_entitlement_management.models.PagedGraphMemberList>`
"""
query_parameters = {}
if top is not None:
query_parameters['top'] = self._serialize.query('top', top, 'int') # depends on [control=['if'], data=['top']]
if skip is not None:
query_parameters['skip'] = self._serialize.query('skip', skip, 'int') # depends on [control=['if'], data=['skip']]
if filter is not None:
query_parameters['filter'] = self._serialize.query('filter', filter, 'str') # depends on [control=['if'], data=['filter']]
if sort_option is not None:
query_parameters['sortOption'] = self._serialize.query('sort_option', sort_option, 'str') # depends on [control=['if'], data=['sort_option']]
response = self._send(http_method='GET', location_id='387f832c-dbf2-4643-88e9-c1aa94dbb737', version='5.0-preview.2', query_parameters=query_parameters)
return self._deserialize('PagedGraphMemberList', response) |
def map_init(interface, params):
"""Intialize random number generator with given seed `params.seed`."""
import numpy as np
import random
np.random.seed(params['seed'])
random.seed(params['seed'])
return params | def function[map_init, parameter[interface, params]]:
constant[Intialize random number generator with given seed `params.seed`.]
import module[numpy] as alias[np]
import module[random]
call[name[np].random.seed, parameter[call[name[params]][constant[seed]]]]
call[name[random].seed, parameter[call[name[params]][constant[seed]]]]
return[name[params]] | keyword[def] identifier[map_init] ( identifier[interface] , identifier[params] ):
literal[string]
keyword[import] identifier[numpy] keyword[as] identifier[np]
keyword[import] identifier[random]
identifier[np] . identifier[random] . identifier[seed] ( identifier[params] [ literal[string] ])
identifier[random] . identifier[seed] ( identifier[params] [ literal[string] ])
keyword[return] identifier[params] | def map_init(interface, params):
"""Intialize random number generator with given seed `params.seed`."""
import numpy as np
import random
np.random.seed(params['seed'])
random.seed(params['seed'])
return params |
def wrapper__ignore(self, type_):
"""
Selectively ignore certain types when wrapping attributes.
:param class type: The class/type definition to ignore.
:rtype list(type): The current list of ignored types
"""
if type_ not in self.__exclusion_list:
self.__exclusion_list.append(type_)
return self.__exclusion_list | def function[wrapper__ignore, parameter[self, type_]]:
constant[
Selectively ignore certain types when wrapping attributes.
:param class type: The class/type definition to ignore.
:rtype list(type): The current list of ignored types
]
if compare[name[type_] <ast.NotIn object at 0x7da2590d7190> name[self].__exclusion_list] begin[:]
call[name[self].__exclusion_list.append, parameter[name[type_]]]
return[name[self].__exclusion_list] | keyword[def] identifier[wrapper__ignore] ( identifier[self] , identifier[type_] ):
literal[string]
keyword[if] identifier[type_] keyword[not] keyword[in] identifier[self] . identifier[__exclusion_list] :
identifier[self] . identifier[__exclusion_list] . identifier[append] ( identifier[type_] )
keyword[return] identifier[self] . identifier[__exclusion_list] | def wrapper__ignore(self, type_):
"""
Selectively ignore certain types when wrapping attributes.
:param class type: The class/type definition to ignore.
:rtype list(type): The current list of ignored types
"""
if type_ not in self.__exclusion_list:
self.__exclusion_list.append(type_) # depends on [control=['if'], data=['type_']]
return self.__exclusion_list |
def has_child_banks(self, bank_id):
"""Tests if a bank has any children.
arg: bank_id (osid.id.Id): a ``bank_id``
return: (boolean) - ``true`` if the ``bank_id`` has children,
``false`` otherwise
raise: NotFound - ``bank_id`` is not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.has_child_bins
if self._catalog_session is not None:
return self._catalog_session.has_child_catalogs(catalog_id=bank_id)
return self._hierarchy_session.has_children(id_=bank_id) | def function[has_child_banks, parameter[self, bank_id]]:
constant[Tests if a bank has any children.
arg: bank_id (osid.id.Id): a ``bank_id``
return: (boolean) - ``true`` if the ``bank_id`` has children,
``false`` otherwise
raise: NotFound - ``bank_id`` is not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
if compare[name[self]._catalog_session is_not constant[None]] begin[:]
return[call[name[self]._catalog_session.has_child_catalogs, parameter[]]]
return[call[name[self]._hierarchy_session.has_children, parameter[]]] | keyword[def] identifier[has_child_banks] ( identifier[self] , identifier[bank_id] ):
literal[string]
keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_catalog_session] . identifier[has_child_catalogs] ( identifier[catalog_id] = identifier[bank_id] )
keyword[return] identifier[self] . identifier[_hierarchy_session] . identifier[has_children] ( identifier[id_] = identifier[bank_id] ) | def has_child_banks(self, bank_id):
"""Tests if a bank has any children.
arg: bank_id (osid.id.Id): a ``bank_id``
return: (boolean) - ``true`` if the ``bank_id`` has children,
``false`` otherwise
raise: NotFound - ``bank_id`` is not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.has_child_bins
if self._catalog_session is not None:
return self._catalog_session.has_child_catalogs(catalog_id=bank_id) # depends on [control=['if'], data=[]]
return self._hierarchy_session.has_children(id_=bank_id) |
def execute(self, query, args, consistency):
"""
Execute a CQL query against the server.
:param query: The CQL query to execute
:type query: str.
:param args: The arguments to substitute
:type args: dict.
:param consistency: The consistency level
:type consistency: ConsistencyLevel
In order to avoid unpleasant issues of CQL injection
(Hey, just because there's no SQL doesn't mean that Little
Bobby Tables won't mess things up for you like in XKCD #327)
you probably want to use argument substitution instead of
concatting strings together to build a query.
Thus, like the official CQL driver for non-Twisted python
that comes with the Cassandra distro, we do variable substitution.
Example::
d = client.execute("UPDATE :table SET 'fff' = :val WHERE "
"KEY = :key",{"val":1234, "key": "fff", "table": "blah"})
:returns: A Deferred that fires with either None, an int, or an
iterable of `{'column': value, ...}` dictionaries, depending
on the CQL query. e.g. a UPDATE would return None,
whereas a SELECT would return an int or some rows
Example output::
[{"fff": 1222}]
"""
prep_query = prepare(query, args)
def _execute(client):
exec_d = client.execute_cql3_query(prep_query,
ttypes.Compression.NONE, consistency)
if self._disconnect_on_cancel:
cancellable_d = Deferred(lambda d: self.disconnect())
exec_d.chainDeferred(cancellable_d)
return cancellable_d
else:
return exec_d
def _proc_results(result):
if result.type == ttypes.CqlResultType.ROWS:
return self._unmarshal_result(result.schema, result.rows,
unmarshallers)
elif result.type == ttypes.CqlResultType.INT:
return result.num
else:
return None
d = self._connection()
d.addCallback(_execute)
d.addCallback(_proc_results)
return d | def function[execute, parameter[self, query, args, consistency]]:
constant[
Execute a CQL query against the server.
:param query: The CQL query to execute
:type query: str.
:param args: The arguments to substitute
:type args: dict.
:param consistency: The consistency level
:type consistency: ConsistencyLevel
In order to avoid unpleasant issues of CQL injection
(Hey, just because there's no SQL doesn't mean that Little
Bobby Tables won't mess things up for you like in XKCD #327)
you probably want to use argument substitution instead of
concatting strings together to build a query.
Thus, like the official CQL driver for non-Twisted python
that comes with the Cassandra distro, we do variable substitution.
Example::
d = client.execute("UPDATE :table SET 'fff' = :val WHERE "
"KEY = :key",{"val":1234, "key": "fff", "table": "blah"})
:returns: A Deferred that fires with either None, an int, or an
iterable of `{'column': value, ...}` dictionaries, depending
on the CQL query. e.g. a UPDATE would return None,
whereas a SELECT would return an int or some rows
Example output::
[{"fff": 1222}]
]
variable[prep_query] assign[=] call[name[prepare], parameter[name[query], name[args]]]
def function[_execute, parameter[client]]:
variable[exec_d] assign[=] call[name[client].execute_cql3_query, parameter[name[prep_query], name[ttypes].Compression.NONE, name[consistency]]]
if name[self]._disconnect_on_cancel begin[:]
variable[cancellable_d] assign[=] call[name[Deferred], parameter[<ast.Lambda object at 0x7da1b14e41f0>]]
call[name[exec_d].chainDeferred, parameter[name[cancellable_d]]]
return[name[cancellable_d]]
def function[_proc_results, parameter[result]]:
if compare[name[result].type equal[==] name[ttypes].CqlResultType.ROWS] begin[:]
return[call[name[self]._unmarshal_result, parameter[name[result].schema, name[result].rows, name[unmarshallers]]]]
variable[d] assign[=] call[name[self]._connection, parameter[]]
call[name[d].addCallback, parameter[name[_execute]]]
call[name[d].addCallback, parameter[name[_proc_results]]]
return[name[d]] | keyword[def] identifier[execute] ( identifier[self] , identifier[query] , identifier[args] , identifier[consistency] ):
literal[string]
identifier[prep_query] = identifier[prepare] ( identifier[query] , identifier[args] )
keyword[def] identifier[_execute] ( identifier[client] ):
identifier[exec_d] = identifier[client] . identifier[execute_cql3_query] ( identifier[prep_query] ,
identifier[ttypes] . identifier[Compression] . identifier[NONE] , identifier[consistency] )
keyword[if] identifier[self] . identifier[_disconnect_on_cancel] :
identifier[cancellable_d] = identifier[Deferred] ( keyword[lambda] identifier[d] : identifier[self] . identifier[disconnect] ())
identifier[exec_d] . identifier[chainDeferred] ( identifier[cancellable_d] )
keyword[return] identifier[cancellable_d]
keyword[else] :
keyword[return] identifier[exec_d]
keyword[def] identifier[_proc_results] ( identifier[result] ):
keyword[if] identifier[result] . identifier[type] == identifier[ttypes] . identifier[CqlResultType] . identifier[ROWS] :
keyword[return] identifier[self] . identifier[_unmarshal_result] ( identifier[result] . identifier[schema] , identifier[result] . identifier[rows] ,
identifier[unmarshallers] )
keyword[elif] identifier[result] . identifier[type] == identifier[ttypes] . identifier[CqlResultType] . identifier[INT] :
keyword[return] identifier[result] . identifier[num]
keyword[else] :
keyword[return] keyword[None]
identifier[d] = identifier[self] . identifier[_connection] ()
identifier[d] . identifier[addCallback] ( identifier[_execute] )
identifier[d] . identifier[addCallback] ( identifier[_proc_results] )
keyword[return] identifier[d] | def execute(self, query, args, consistency):
"""
Execute a CQL query against the server.
:param query: The CQL query to execute
:type query: str.
:param args: The arguments to substitute
:type args: dict.
:param consistency: The consistency level
:type consistency: ConsistencyLevel
In order to avoid unpleasant issues of CQL injection
(Hey, just because there's no SQL doesn't mean that Little
Bobby Tables won't mess things up for you like in XKCD #327)
you probably want to use argument substitution instead of
concatting strings together to build a query.
Thus, like the official CQL driver for non-Twisted python
that comes with the Cassandra distro, we do variable substitution.
Example::
d = client.execute("UPDATE :table SET 'fff' = :val WHERE "
"KEY = :key",{"val":1234, "key": "fff", "table": "blah"})
:returns: A Deferred that fires with either None, an int, or an
iterable of `{'column': value, ...}` dictionaries, depending
on the CQL query. e.g. a UPDATE would return None,
whereas a SELECT would return an int or some rows
Example output::
[{"fff": 1222}]
"""
prep_query = prepare(query, args)
def _execute(client):
exec_d = client.execute_cql3_query(prep_query, ttypes.Compression.NONE, consistency)
if self._disconnect_on_cancel:
cancellable_d = Deferred(lambda d: self.disconnect())
exec_d.chainDeferred(cancellable_d)
return cancellable_d # depends on [control=['if'], data=[]]
else:
return exec_d
def _proc_results(result):
if result.type == ttypes.CqlResultType.ROWS:
return self._unmarshal_result(result.schema, result.rows, unmarshallers) # depends on [control=['if'], data=[]]
elif result.type == ttypes.CqlResultType.INT:
return result.num # depends on [control=['if'], data=[]]
else:
return None
d = self._connection()
d.addCallback(_execute)
d.addCallback(_proc_results)
return d |
def getInterval(self):
"""Vocabulary of date intervals to calculate the "To" field date based
from the "From" field date.
"""
items = (
("", _(u"Not set")),
("1", _(u"daily")),
("7", _(u"weekly")),
("30", _(u"monthly")),
("90", _(u"quarterly")),
("180", _(u"biannually")),
("365", _(u"yearly")),
)
return DisplayList(items) | def function[getInterval, parameter[self]]:
constant[Vocabulary of date intervals to calculate the "To" field date based
from the "From" field date.
]
variable[items] assign[=] tuple[[<ast.Tuple object at 0x7da2054a47f0>, <ast.Tuple object at 0x7da2054a7f40>, <ast.Tuple object at 0x7da2054a6c80>, <ast.Tuple object at 0x7da2054a5750>, <ast.Tuple object at 0x7da1b1d65a20>, <ast.Tuple object at 0x7da1b1d64a90>, <ast.Tuple object at 0x7da1b1d659f0>]]
return[call[name[DisplayList], parameter[name[items]]]] | keyword[def] identifier[getInterval] ( identifier[self] ):
literal[string]
identifier[items] =(
( literal[string] , identifier[_] ( literal[string] )),
( literal[string] , identifier[_] ( literal[string] )),
( literal[string] , identifier[_] ( literal[string] )),
( literal[string] , identifier[_] ( literal[string] )),
( literal[string] , identifier[_] ( literal[string] )),
( literal[string] , identifier[_] ( literal[string] )),
( literal[string] , identifier[_] ( literal[string] )),
)
keyword[return] identifier[DisplayList] ( identifier[items] ) | def getInterval(self):
"""Vocabulary of date intervals to calculate the "To" field date based
from the "From" field date.
"""
items = (('', _(u'Not set')), ('1', _(u'daily')), ('7', _(u'weekly')), ('30', _(u'monthly')), ('90', _(u'quarterly')), ('180', _(u'biannually')), ('365', _(u'yearly')))
return DisplayList(items) |
def _parse_field_selector(field):
"""
Check field keys: css, xpath and id.
Return the first found along with it's value.
"""
for selector_type in ('css', 'xpath', 'id'):
selector = field.get(selector_type)
if selector:
return selector_type, selector
logger.warning("Form field does not define any selector "
"(id, css and xpath available): %r",
field)
return None, None | def function[_parse_field_selector, parameter[field]]:
constant[
Check field keys: css, xpath and id.
Return the first found along with it's value.
]
for taget[name[selector_type]] in starred[tuple[[<ast.Constant object at 0x7da1b0c95480>, <ast.Constant object at 0x7da1b0c97760>, <ast.Constant object at 0x7da20e793910>]]] begin[:]
variable[selector] assign[=] call[name[field].get, parameter[name[selector_type]]]
if name[selector] begin[:]
return[tuple[[<ast.Name object at 0x7da212db4cd0>, <ast.Name object at 0x7da212db5060>]]]
call[name[logger].warning, parameter[constant[Form field does not define any selector (id, css and xpath available): %r], name[field]]]
return[tuple[[<ast.Constant object at 0x7da18eb563b0>, <ast.Constant object at 0x7da18eb54310>]]] | keyword[def] identifier[_parse_field_selector] ( identifier[field] ):
literal[string]
keyword[for] identifier[selector_type] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[selector] = identifier[field] . identifier[get] ( identifier[selector_type] )
keyword[if] identifier[selector] :
keyword[return] identifier[selector_type] , identifier[selector]
identifier[logger] . identifier[warning] ( literal[string]
literal[string] ,
identifier[field] )
keyword[return] keyword[None] , keyword[None] | def _parse_field_selector(field):
"""
Check field keys: css, xpath and id.
Return the first found along with it's value.
"""
for selector_type in ('css', 'xpath', 'id'):
selector = field.get(selector_type)
if selector:
return (selector_type, selector) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['selector_type']]
logger.warning('Form field does not define any selector (id, css and xpath available): %r', field)
return (None, None) |
def pass_multipart(with_completed=False):
"""Decorate to retrieve an object."""
def decorate(f):
@wraps(f)
def inner(self, bucket, key, upload_id, *args, **kwargs):
obj = MultipartObject.get(
bucket, key, upload_id, with_completed=with_completed)
if obj is None:
abort(404, 'uploadId does not exists.')
return f(self, obj, *args, **kwargs)
return inner
return decorate | def function[pass_multipart, parameter[with_completed]]:
constant[Decorate to retrieve an object.]
def function[decorate, parameter[f]]:
def function[inner, parameter[self, bucket, key, upload_id]]:
variable[obj] assign[=] call[name[MultipartObject].get, parameter[name[bucket], name[key], name[upload_id]]]
if compare[name[obj] is constant[None]] begin[:]
call[name[abort], parameter[constant[404], constant[uploadId does not exists.]]]
return[call[name[f], parameter[name[self], name[obj], <ast.Starred object at 0x7da1b1942c50>]]]
return[name[inner]]
return[name[decorate]] | keyword[def] identifier[pass_multipart] ( identifier[with_completed] = keyword[False] ):
literal[string]
keyword[def] identifier[decorate] ( identifier[f] ):
@ identifier[wraps] ( identifier[f] )
keyword[def] identifier[inner] ( identifier[self] , identifier[bucket] , identifier[key] , identifier[upload_id] ,* identifier[args] ,** identifier[kwargs] ):
identifier[obj] = identifier[MultipartObject] . identifier[get] (
identifier[bucket] , identifier[key] , identifier[upload_id] , identifier[with_completed] = identifier[with_completed] )
keyword[if] identifier[obj] keyword[is] keyword[None] :
identifier[abort] ( literal[int] , literal[string] )
keyword[return] identifier[f] ( identifier[self] , identifier[obj] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[inner]
keyword[return] identifier[decorate] | def pass_multipart(with_completed=False):
"""Decorate to retrieve an object."""
def decorate(f):
@wraps(f)
def inner(self, bucket, key, upload_id, *args, **kwargs):
obj = MultipartObject.get(bucket, key, upload_id, with_completed=with_completed)
if obj is None:
abort(404, 'uploadId does not exists.') # depends on [control=['if'], data=[]]
return f(self, obj, *args, **kwargs)
return inner
return decorate |
def element_from_dict(parent, data, element):
"""Create ``element`` to ``parent`` and sets its value to data[element], which
will be removed from the ``data``.
:param parent: parent element
:type parent: Element
:param data: dictionary where data[element] is desired value
:type data: dict(str, str)
:param element: name of the new element
:type element: str
:return: created element
"""
el = ET.SubElement(parent, element)
el.text = data.pop(element)
return el | def function[element_from_dict, parameter[parent, data, element]]:
constant[Create ``element`` to ``parent`` and sets its value to data[element], which
will be removed from the ``data``.
:param parent: parent element
:type parent: Element
:param data: dictionary where data[element] is desired value
:type data: dict(str, str)
:param element: name of the new element
:type element: str
:return: created element
]
variable[el] assign[=] call[name[ET].SubElement, parameter[name[parent], name[element]]]
name[el].text assign[=] call[name[data].pop, parameter[name[element]]]
return[name[el]] | keyword[def] identifier[element_from_dict] ( identifier[parent] , identifier[data] , identifier[element] ):
literal[string]
identifier[el] = identifier[ET] . identifier[SubElement] ( identifier[parent] , identifier[element] )
identifier[el] . identifier[text] = identifier[data] . identifier[pop] ( identifier[element] )
keyword[return] identifier[el] | def element_from_dict(parent, data, element):
"""Create ``element`` to ``parent`` and sets its value to data[element], which
will be removed from the ``data``.
:param parent: parent element
:type parent: Element
:param data: dictionary where data[element] is desired value
:type data: dict(str, str)
:param element: name of the new element
:type element: str
:return: created element
"""
el = ET.SubElement(parent, element)
el.text = data.pop(element)
return el |
def redefineBuffer(self, newBuffer ):
"""!
\~english
Redefine frame of Screen
@param newFrame: a new fram data
@note
newFrame can be:
* PIL Image
* PIL ImageFile
* Dictionary, eg. { "size":(width, height), "color_mode":"1" } or { "size":(width, height), "color_mode":"RGB" }
\~chinese
重新定义缓存数据
@param newFrame: 新缓存数据 \n
newFrame 可以为下面值:
* PIL Image
* PIL ImageFile
* 字典, eg. { "size":(width, height), "color_mode":"1" } or { "size":(width, height), "color_mode":"RGB" }
"""
# Redefine Frame from an image object
if type(self._buffer) == type(newBuffer):
self._buffer = newBuffer
self.Canvas = ImageDraw.Draw( self._buffer )
# self.View.resize(newBuffer.width, newBuffer.height)
return True
# Redefine Frame from an <PIL.ImageFile.ImageFile>
if type(newBuffer).__name__.find(PIL.ImageFile.ImageFile.__name__) != -1:
self._buffer = self._buffer.resize((newBuffer.width, newBuffer.height))
self._buffer.paste( newBuffer, (0,0))
# self.View.resize(newBuffer.width, newBuffer.height)
return True
# Recreated a new frame from dict of frame
if isinstance(newBuffer, dict):
self._buffer = Image.new( newBuffer["color_mode"] , newBuffer["size"] )
self.Canvas = ImageDraw.Draw( self._buffer )
return True
pass | def function[redefineBuffer, parameter[self, newBuffer]]:
constant[!
\~english
Redefine frame of Screen
@param newFrame: a new fram data
@note
newFrame can be:
* PIL Image
* PIL ImageFile
* Dictionary, eg. { "size":(width, height), "color_mode":"1" } or { "size":(width, height), "color_mode":"RGB" }
\~chinese
重新定义缓存数据
@param newFrame: 新缓存数据
newFrame 可以为下面值:
* PIL Image
* PIL ImageFile
* 字典, eg. { "size":(width, height), "color_mode":"1" } or { "size":(width, height), "color_mode":"RGB" }
]
if compare[call[name[type], parameter[name[self]._buffer]] equal[==] call[name[type], parameter[name[newBuffer]]]] begin[:]
name[self]._buffer assign[=] name[newBuffer]
name[self].Canvas assign[=] call[name[ImageDraw].Draw, parameter[name[self]._buffer]]
return[constant[True]]
if compare[call[call[name[type], parameter[name[newBuffer]]].__name__.find, parameter[name[PIL].ImageFile.ImageFile.__name__]] not_equal[!=] <ast.UnaryOp object at 0x7da20e954b20>] begin[:]
name[self]._buffer assign[=] call[name[self]._buffer.resize, parameter[tuple[[<ast.Attribute object at 0x7da20e954790>, <ast.Attribute object at 0x7da20e957130>]]]]
call[name[self]._buffer.paste, parameter[name[newBuffer], tuple[[<ast.Constant object at 0x7da20e957190>, <ast.Constant object at 0x7da20e9540d0>]]]]
return[constant[True]]
if call[name[isinstance], parameter[name[newBuffer], name[dict]]] begin[:]
name[self]._buffer assign[=] call[name[Image].new, parameter[call[name[newBuffer]][constant[color_mode]], call[name[newBuffer]][constant[size]]]]
name[self].Canvas assign[=] call[name[ImageDraw].Draw, parameter[name[self]._buffer]]
return[constant[True]]
pass | keyword[def] identifier[redefineBuffer] ( identifier[self] , identifier[newBuffer] ):
literal[string]
keyword[if] identifier[type] ( identifier[self] . identifier[_buffer] )== identifier[type] ( identifier[newBuffer] ):
identifier[self] . identifier[_buffer] = identifier[newBuffer]
identifier[self] . identifier[Canvas] = identifier[ImageDraw] . identifier[Draw] ( identifier[self] . identifier[_buffer] )
keyword[return] keyword[True]
keyword[if] identifier[type] ( identifier[newBuffer] ). identifier[__name__] . identifier[find] ( identifier[PIL] . identifier[ImageFile] . identifier[ImageFile] . identifier[__name__] )!=- literal[int] :
identifier[self] . identifier[_buffer] = identifier[self] . identifier[_buffer] . identifier[resize] (( identifier[newBuffer] . identifier[width] , identifier[newBuffer] . identifier[height] ))
identifier[self] . identifier[_buffer] . identifier[paste] ( identifier[newBuffer] ,( literal[int] , literal[int] ))
keyword[return] keyword[True]
keyword[if] identifier[isinstance] ( identifier[newBuffer] , identifier[dict] ):
identifier[self] . identifier[_buffer] = identifier[Image] . identifier[new] ( identifier[newBuffer] [ literal[string] ], identifier[newBuffer] [ literal[string] ])
identifier[self] . identifier[Canvas] = identifier[ImageDraw] . identifier[Draw] ( identifier[self] . identifier[_buffer] )
keyword[return] keyword[True]
keyword[pass] | def redefineBuffer(self, newBuffer):
"""!
\\~english
Redefine frame of Screen
@param newFrame: a new fram data
@note
newFrame can be:
* PIL Image
* PIL ImageFile
* Dictionary, eg. { "size":(width, height), "color_mode":"1" } or { "size":(width, height), "color_mode":"RGB" }
\\~chinese
重新定义缓存数据
@param newFrame: 新缓存数据
newFrame 可以为下面值:
* PIL Image
* PIL ImageFile
* 字典, eg. { "size":(width, height), "color_mode":"1" } or { "size":(width, height), "color_mode":"RGB" }
"""
# Redefine Frame from an image object
if type(self._buffer) == type(newBuffer):
self._buffer = newBuffer
self.Canvas = ImageDraw.Draw(self._buffer)
# self.View.resize(newBuffer.width, newBuffer.height)
return True # depends on [control=['if'], data=[]]
# Redefine Frame from an <PIL.ImageFile.ImageFile>
if type(newBuffer).__name__.find(PIL.ImageFile.ImageFile.__name__) != -1:
self._buffer = self._buffer.resize((newBuffer.width, newBuffer.height))
self._buffer.paste(newBuffer, (0, 0))
# self.View.resize(newBuffer.width, newBuffer.height)
return True # depends on [control=['if'], data=[]]
# Recreated a new frame from dict of frame
if isinstance(newBuffer, dict):
self._buffer = Image.new(newBuffer['color_mode'], newBuffer['size'])
self.Canvas = ImageDraw.Draw(self._buffer)
return True # depends on [control=['if'], data=[]]
pass |
def java(items):
"""Check for presence of external Java 1.7 for tools that require it.
"""
if any([_needs_java(d) for d in items]):
min_version = "1.7"
max_version = "1.8"
with setpath.orig_paths():
java = utils.which("java")
if not java:
return ("java not found on PATH. Java %s required for MuTect and GATK < 3.6." % min_version)
p = subprocess.Popen([java, "-Xms250m", "-Xmx250m", "-version"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _ = p.communicate()
p.stdout.close()
version = ""
for line in output.split("\n"):
if line.startswith(("java version", "openjdk version")):
version = line.strip().split()[-1]
if version.startswith('"'):
version = version[1:]
if version.endswith('"'):
version = version[:-1]
if (not version or LooseVersion(version) >= LooseVersion(max_version) or
LooseVersion(version) < LooseVersion(min_version)):
return ("java version %s required for running MuTect and GATK < 3.6.\n"
"It needs to be first on your PATH so running 'java -version' give the correct version.\n"
"Found version %s at %s" % (min_version, version, java)) | def function[java, parameter[items]]:
constant[Check for presence of external Java 1.7 for tools that require it.
]
if call[name[any], parameter[<ast.ListComp object at 0x7da1b2347eb0>]] begin[:]
variable[min_version] assign[=] constant[1.7]
variable[max_version] assign[=] constant[1.8]
with call[name[setpath].orig_paths, parameter[]] begin[:]
variable[java] assign[=] call[name[utils].which, parameter[constant[java]]]
if <ast.UnaryOp object at 0x7da20c76dab0> begin[:]
return[binary_operation[constant[java not found on PATH. Java %s required for MuTect and GATK < 3.6.] <ast.Mod object at 0x7da2590d6920> name[min_version]]]
variable[p] assign[=] call[name[subprocess].Popen, parameter[list[[<ast.Name object at 0x7da20c76f790>, <ast.Constant object at 0x7da20c76fa30>, <ast.Constant object at 0x7da20c76fcd0>, <ast.Constant object at 0x7da20c76f5e0>]]]]
<ast.Tuple object at 0x7da20c6e4a30> assign[=] call[name[p].communicate, parameter[]]
call[name[p].stdout.close, parameter[]]
variable[version] assign[=] constant[]
for taget[name[line]] in starred[call[name[output].split, parameter[constant[
]]]] begin[:]
if call[name[line].startswith, parameter[tuple[[<ast.Constant object at 0x7da20c6e4b80>, <ast.Constant object at 0x7da20c6e6ad0>]]]] begin[:]
variable[version] assign[=] call[call[call[name[line].strip, parameter[]].split, parameter[]]][<ast.UnaryOp object at 0x7da20c6e6200>]
if call[name[version].startswith, parameter[constant["]]] begin[:]
variable[version] assign[=] call[name[version]][<ast.Slice object at 0x7da20c6e7d30>]
if call[name[version].endswith, parameter[constant["]]] begin[:]
variable[version] assign[=] call[name[version]][<ast.Slice object at 0x7da20c6e43d0>]
if <ast.BoolOp object at 0x7da20c6e4730> begin[:]
return[binary_operation[constant[java version %s required for running MuTect and GATK < 3.6.
It needs to be first on your PATH so running 'java -version' give the correct version.
Found version %s at %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6e7850>, <ast.Name object at 0x7da20c6e5ba0>, <ast.Name object at 0x7da20c6e5600>]]]] | keyword[def] identifier[java] ( identifier[items] ):
literal[string]
keyword[if] identifier[any] ([ identifier[_needs_java] ( identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[items] ]):
identifier[min_version] = literal[string]
identifier[max_version] = literal[string]
keyword[with] identifier[setpath] . identifier[orig_paths] ():
identifier[java] = identifier[utils] . identifier[which] ( literal[string] )
keyword[if] keyword[not] identifier[java] :
keyword[return] ( literal[string] % identifier[min_version] )
identifier[p] = identifier[subprocess] . identifier[Popen] ([ identifier[java] , literal[string] , literal[string] , literal[string] ],
identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[STDOUT] )
identifier[output] , identifier[_] = identifier[p] . identifier[communicate] ()
identifier[p] . identifier[stdout] . identifier[close] ()
identifier[version] = literal[string]
keyword[for] identifier[line] keyword[in] identifier[output] . identifier[split] ( literal[string] ):
keyword[if] identifier[line] . identifier[startswith] (( literal[string] , literal[string] )):
identifier[version] = identifier[line] . identifier[strip] (). identifier[split] ()[- literal[int] ]
keyword[if] identifier[version] . identifier[startswith] ( literal[string] ):
identifier[version] = identifier[version] [ literal[int] :]
keyword[if] identifier[version] . identifier[endswith] ( literal[string] ):
identifier[version] = identifier[version] [:- literal[int] ]
keyword[if] ( keyword[not] identifier[version] keyword[or] identifier[LooseVersion] ( identifier[version] )>= identifier[LooseVersion] ( identifier[max_version] ) keyword[or]
identifier[LooseVersion] ( identifier[version] )< identifier[LooseVersion] ( identifier[min_version] )):
keyword[return] ( literal[string]
literal[string]
literal[string] %( identifier[min_version] , identifier[version] , identifier[java] )) | def java(items):
"""Check for presence of external Java 1.7 for tools that require it.
"""
if any([_needs_java(d) for d in items]):
min_version = '1.7'
max_version = '1.8'
with setpath.orig_paths():
java = utils.which('java')
if not java:
return 'java not found on PATH. Java %s required for MuTect and GATK < 3.6.' % min_version # depends on [control=['if'], data=[]]
p = subprocess.Popen([java, '-Xms250m', '-Xmx250m', '-version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(output, _) = p.communicate()
p.stdout.close() # depends on [control=['with'], data=[]]
version = ''
for line in output.split('\n'):
if line.startswith(('java version', 'openjdk version')):
version = line.strip().split()[-1]
if version.startswith('"'):
version = version[1:] # depends on [control=['if'], data=[]]
if version.endswith('"'):
version = version[:-1] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
if not version or LooseVersion(version) >= LooseVersion(max_version) or LooseVersion(version) < LooseVersion(min_version):
return "java version %s required for running MuTect and GATK < 3.6.\nIt needs to be first on your PATH so running 'java -version' give the correct version.\nFound version %s at %s" % (min_version, version, java) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def merged_gasmap(self, **kwargs):
""" return the file name for Galprop merged gasmaps
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.merged_gasmap_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | def function[merged_gasmap, parameter[self]]:
constant[ return the file name for Galprop merged gasmaps
]
variable[kwargs_copy] assign[=] call[name[self].base_dict.copy, parameter[]]
call[name[kwargs_copy].update, parameter[]]
call[name[self]._replace_none, parameter[name[kwargs_copy]]]
variable[localpath] assign[=] call[name[NameFactory].merged_gasmap_format.format, parameter[]]
if call[name[kwargs].get, parameter[constant[fullpath], constant[False]]] begin[:]
return[call[name[self].fullpath, parameter[]]]
return[name[localpath]] | keyword[def] identifier[merged_gasmap] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs_copy] = identifier[self] . identifier[base_dict] . identifier[copy] ()
identifier[kwargs_copy] . identifier[update] (** identifier[kwargs] )
identifier[self] . identifier[_replace_none] ( identifier[kwargs_copy] )
identifier[localpath] = identifier[NameFactory] . identifier[merged_gasmap_format] . identifier[format] (** identifier[kwargs_copy] )
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ):
keyword[return] identifier[self] . identifier[fullpath] ( identifier[localpath] = identifier[localpath] )
keyword[return] identifier[localpath] | def merged_gasmap(self, **kwargs):
""" return the file name for Galprop merged gasmaps
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.merged_gasmap_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath) # depends on [control=['if'], data=[]]
return localpath |
def SendSerializedMessage(self, message):
"""
Send the `message` to the remote client.
Args:
message (neo.Network.Message):
"""
try:
ba = Helper.ToArray(message)
ba2 = binascii.unhexlify(ba)
self.bytes_out += len(ba2)
self.transport.write(ba2)
except Exception as e:
logger.debug(f"Could not send serialized message {e}") | def function[SendSerializedMessage, parameter[self, message]]:
constant[
Send the `message` to the remote client.
Args:
message (neo.Network.Message):
]
<ast.Try object at 0x7da1b1df9c60> | keyword[def] identifier[SendSerializedMessage] ( identifier[self] , identifier[message] ):
literal[string]
keyword[try] :
identifier[ba] = identifier[Helper] . identifier[ToArray] ( identifier[message] )
identifier[ba2] = identifier[binascii] . identifier[unhexlify] ( identifier[ba] )
identifier[self] . identifier[bytes_out] += identifier[len] ( identifier[ba2] )
identifier[self] . identifier[transport] . identifier[write] ( identifier[ba2] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[debug] ( literal[string] ) | def SendSerializedMessage(self, message):
"""
Send the `message` to the remote client.
Args:
message (neo.Network.Message):
"""
try:
ba = Helper.ToArray(message)
ba2 = binascii.unhexlify(ba)
self.bytes_out += len(ba2)
self.transport.write(ba2) # depends on [control=['try'], data=[]]
except Exception as e:
logger.debug(f'Could not send serialized message {e}') # depends on [control=['except'], data=['e']] |
def qualifyContracts(self, *contracts: List[Contract]) -> List[Contract]:
"""
Fully qualify the given contracts in-place. This will fill in
the missing fields in the contract, especially the conId.
Returns a list of contracts that have been successfully qualified.
This method is blocking.
Args:
contracts: Contracts to qualify.
"""
return self._run(self.qualifyContractsAsync(*contracts)) | def function[qualifyContracts, parameter[self]]:
constant[
Fully qualify the given contracts in-place. This will fill in
the missing fields in the contract, especially the conId.
Returns a list of contracts that have been successfully qualified.
This method is blocking.
Args:
contracts: Contracts to qualify.
]
return[call[name[self]._run, parameter[call[name[self].qualifyContractsAsync, parameter[<ast.Starred object at 0x7da18dc06ec0>]]]]] | keyword[def] identifier[qualifyContracts] ( identifier[self] ,* identifier[contracts] : identifier[List] [ identifier[Contract] ])-> identifier[List] [ identifier[Contract] ]:
literal[string]
keyword[return] identifier[self] . identifier[_run] ( identifier[self] . identifier[qualifyContractsAsync] (* identifier[contracts] )) | def qualifyContracts(self, *contracts: List[Contract]) -> List[Contract]:
"""
Fully qualify the given contracts in-place. This will fill in
the missing fields in the contract, especially the conId.
Returns a list of contracts that have been successfully qualified.
This method is blocking.
Args:
contracts: Contracts to qualify.
"""
return self._run(self.qualifyContractsAsync(*contracts)) |
def request_comments(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/requests#listing-comments"
api_path = "/api/v2/requests/{id}/comments.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | def function[request_comments, parameter[self, id]]:
constant[https://developer.zendesk.com/rest_api/docs/core/requests#listing-comments]
variable[api_path] assign[=] constant[/api/v2/requests/{id}/comments.json]
variable[api_path] assign[=] call[name[api_path].format, parameter[]]
return[call[name[self].call, parameter[name[api_path]]]] | keyword[def] identifier[request_comments] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[id] = identifier[id] )
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] ,** identifier[kwargs] ) | def request_comments(self, id, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/core/requests#listing-comments"""
api_path = '/api/v2/requests/{id}/comments.json'
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) |
def addCorpusId(self, value):
'''Adds SourceId to External_Info
'''
if isinstance(value, Corpus_Id):
self.corpus_ids.append(value)
else:
raise (TypeError,
'source_id Type should be Source_Id, not %s' % type(source_id)) | def function[addCorpusId, parameter[self, value]]:
constant[Adds SourceId to External_Info
]
if call[name[isinstance], parameter[name[value], name[Corpus_Id]]] begin[:]
call[name[self].corpus_ids.append, parameter[name[value]]] | keyword[def] identifier[addCorpusId] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[Corpus_Id] ):
identifier[self] . identifier[corpus_ids] . identifier[append] ( identifier[value] )
keyword[else] :
keyword[raise] ( identifier[TypeError] ,
literal[string] % identifier[type] ( identifier[source_id] )) | def addCorpusId(self, value):
"""Adds SourceId to External_Info
"""
if isinstance(value, Corpus_Id):
self.corpus_ids.append(value) # depends on [control=['if'], data=[]]
else:
raise (TypeError, 'source_id Type should be Source_Id, not %s' % type(source_id)) |
def find_local_changes():
""" Find things that have changed since the last run, applying ignore filters """
manifest = data_store.read_local_manifest()
old_state = manifest['files']
current_state = get_file_list(config['data_dir'])
current_state = [fle for fle in current_state if not
next((True for flter in config['ignore_filters']
if fnmatch.fnmatch(fle['path'], flter)), False)]
return manifest, find_manifest_changes(current_state, old_state) | def function[find_local_changes, parameter[]]:
constant[ Find things that have changed since the last run, applying ignore filters ]
variable[manifest] assign[=] call[name[data_store].read_local_manifest, parameter[]]
variable[old_state] assign[=] call[name[manifest]][constant[files]]
variable[current_state] assign[=] call[name[get_file_list], parameter[call[name[config]][constant[data_dir]]]]
variable[current_state] assign[=] <ast.ListComp object at 0x7da20c992d10>
return[tuple[[<ast.Name object at 0x7da20c993610>, <ast.Call object at 0x7da20c992f80>]]] | keyword[def] identifier[find_local_changes] ():
literal[string]
identifier[manifest] = identifier[data_store] . identifier[read_local_manifest] ()
identifier[old_state] = identifier[manifest] [ literal[string] ]
identifier[current_state] = identifier[get_file_list] ( identifier[config] [ literal[string] ])
identifier[current_state] =[ identifier[fle] keyword[for] identifier[fle] keyword[in] identifier[current_state] keyword[if] keyword[not]
identifier[next] (( keyword[True] keyword[for] identifier[flter] keyword[in] identifier[config] [ literal[string] ]
keyword[if] identifier[fnmatch] . identifier[fnmatch] ( identifier[fle] [ literal[string] ], identifier[flter] )), keyword[False] )]
keyword[return] identifier[manifest] , identifier[find_manifest_changes] ( identifier[current_state] , identifier[old_state] ) | def find_local_changes():
""" Find things that have changed since the last run, applying ignore filters """
manifest = data_store.read_local_manifest()
old_state = manifest['files']
current_state = get_file_list(config['data_dir'])
current_state = [fle for fle in current_state if not next((True for flter in config['ignore_filters'] if fnmatch.fnmatch(fle['path'], flter)), False)]
return (manifest, find_manifest_changes(current_state, old_state)) |
def get_unconstrained_bytes(self, name, bits, source=None, key=None, inspect=True, events=True, **kwargs):
"""
Get some consecutive unconstrained bytes.
:param name: Name of the unconstrained variable
:param bits: Size of the unconstrained variable
:param source: Where those bytes are read from. Currently it is only used in under-constrained symbolic
execution so that we can track the allocation depth.
:return: The generated variable
"""
if (self.category == 'mem' and
options.CGC_ZERO_FILL_UNCONSTRAINED_MEMORY in self.state.options):
# CGC binaries zero-fill the memory for any allocated region
# Reference: (https://github.com/CyberGrandChallenge/libcgc/blob/master/allocate.md)
return self.state.solver.BVV(0, bits)
elif options.SPECIAL_MEMORY_FILL in self.state.options and self.state._special_memory_filler is not None:
return self.state._special_memory_filler(name, bits, self.state)
else:
if options.UNDER_CONSTRAINED_SYMEXEC in self.state.options:
if source is not None and type(source) is int:
alloc_depth = self.state.uc_manager.get_alloc_depth(source)
kwargs['uc_alloc_depth'] = 0 if alloc_depth is None else alloc_depth + 1
r = self.state.solver.Unconstrained(name, bits, key=key, inspect=inspect, events=events, **kwargs)
return r | def function[get_unconstrained_bytes, parameter[self, name, bits, source, key, inspect, events]]:
constant[
Get some consecutive unconstrained bytes.
:param name: Name of the unconstrained variable
:param bits: Size of the unconstrained variable
:param source: Where those bytes are read from. Currently it is only used in under-constrained symbolic
execution so that we can track the allocation depth.
:return: The generated variable
]
if <ast.BoolOp object at 0x7da18f00f940> begin[:]
return[call[name[self].state.solver.BVV, parameter[constant[0], name[bits]]]] | keyword[def] identifier[get_unconstrained_bytes] ( identifier[self] , identifier[name] , identifier[bits] , identifier[source] = keyword[None] , identifier[key] = keyword[None] , identifier[inspect] = keyword[True] , identifier[events] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[if] ( identifier[self] . identifier[category] == literal[string] keyword[and]
identifier[options] . identifier[CGC_ZERO_FILL_UNCONSTRAINED_MEMORY] keyword[in] identifier[self] . identifier[state] . identifier[options] ):
keyword[return] identifier[self] . identifier[state] . identifier[solver] . identifier[BVV] ( literal[int] , identifier[bits] )
keyword[elif] identifier[options] . identifier[SPECIAL_MEMORY_FILL] keyword[in] identifier[self] . identifier[state] . identifier[options] keyword[and] identifier[self] . identifier[state] . identifier[_special_memory_filler] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[state] . identifier[_special_memory_filler] ( identifier[name] , identifier[bits] , identifier[self] . identifier[state] )
keyword[else] :
keyword[if] identifier[options] . identifier[UNDER_CONSTRAINED_SYMEXEC] keyword[in] identifier[self] . identifier[state] . identifier[options] :
keyword[if] identifier[source] keyword[is] keyword[not] keyword[None] keyword[and] identifier[type] ( identifier[source] ) keyword[is] identifier[int] :
identifier[alloc_depth] = identifier[self] . identifier[state] . identifier[uc_manager] . identifier[get_alloc_depth] ( identifier[source] )
identifier[kwargs] [ literal[string] ]= literal[int] keyword[if] identifier[alloc_depth] keyword[is] keyword[None] keyword[else] identifier[alloc_depth] + literal[int]
identifier[r] = identifier[self] . identifier[state] . identifier[solver] . identifier[Unconstrained] ( identifier[name] , identifier[bits] , identifier[key] = identifier[key] , identifier[inspect] = identifier[inspect] , identifier[events] = identifier[events] ,** identifier[kwargs] )
keyword[return] identifier[r] | def get_unconstrained_bytes(self, name, bits, source=None, key=None, inspect=True, events=True, **kwargs):
"""
Get some consecutive unconstrained bytes.
:param name: Name of the unconstrained variable
:param bits: Size of the unconstrained variable
:param source: Where those bytes are read from. Currently it is only used in under-constrained symbolic
execution so that we can track the allocation depth.
:return: The generated variable
"""
if self.category == 'mem' and options.CGC_ZERO_FILL_UNCONSTRAINED_MEMORY in self.state.options:
# CGC binaries zero-fill the memory for any allocated region
# Reference: (https://github.com/CyberGrandChallenge/libcgc/blob/master/allocate.md)
return self.state.solver.BVV(0, bits) # depends on [control=['if'], data=[]]
elif options.SPECIAL_MEMORY_FILL in self.state.options and self.state._special_memory_filler is not None:
return self.state._special_memory_filler(name, bits, self.state) # depends on [control=['if'], data=[]]
else:
if options.UNDER_CONSTRAINED_SYMEXEC in self.state.options:
if source is not None and type(source) is int:
alloc_depth = self.state.uc_manager.get_alloc_depth(source)
kwargs['uc_alloc_depth'] = 0 if alloc_depth is None else alloc_depth + 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
r = self.state.solver.Unconstrained(name, bits, key=key, inspect=inspect, events=events, **kwargs)
return r |
def create_blob(profile, content):
"""Create a blob.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
content
The (UTF-8 encoded) content to create in the blob.
Returns:
A dict with data about the newly created blob.
"""
resource = "/blobs"
payload = {"content": content}
data = api.post_request(profile, resource, payload)
return data | def function[create_blob, parameter[profile, content]]:
constant[Create a blob.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
content
The (UTF-8 encoded) content to create in the blob.
Returns:
A dict with data about the newly created blob.
]
variable[resource] assign[=] constant[/blobs]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b1366710>], [<ast.Name object at 0x7da1b1366500>]]
variable[data] assign[=] call[name[api].post_request, parameter[name[profile], name[resource], name[payload]]]
return[name[data]] | keyword[def] identifier[create_blob] ( identifier[profile] , identifier[content] ):
literal[string]
identifier[resource] = literal[string]
identifier[payload] ={ literal[string] : identifier[content] }
identifier[data] = identifier[api] . identifier[post_request] ( identifier[profile] , identifier[resource] , identifier[payload] )
keyword[return] identifier[data] | def create_blob(profile, content):
"""Create a blob.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
content
The (UTF-8 encoded) content to create in the blob.
Returns:
A dict with data about the newly created blob.
"""
resource = '/blobs'
payload = {'content': content}
data = api.post_request(profile, resource, payload)
return data |
def change_crypto_type(self, crypto_type, zeroize=None):
"""
Reconfigures a cryptographic adapter to a different crypto type.
This operation is only supported for cryptographic adapters.
The cryptographic adapter must be varied offline before its crypto
type can be reconfigured.
Authorization requirements:
* Object-access permission to this Adapter.
* Task permission to the "Adapter Details" task.
Parameters:
crypto_type (:term:`string`):
- ``"accelerator"``: Crypto Express5S Accelerator
- ``"cca-coprocessor"``: Crypto Express5S CCA Coprocessor
- ``"ep11-coprocessor"``: Crypto Express5S EP11 Coprocessor
zeroize (bool):
Specifies whether the cryptographic adapter will be zeroized when
it is reconfigured to a crypto type of ``"accelerator"``.
`None` means that the HMC-implemented default of `True` will be
used.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'crypto-type': crypto_type}
if zeroize is not None:
body['zeroize'] = zeroize
self.manager.session.post(
self.uri + '/operations/change-crypto-type', body) | def function[change_crypto_type, parameter[self, crypto_type, zeroize]]:
constant[
Reconfigures a cryptographic adapter to a different crypto type.
This operation is only supported for cryptographic adapters.
The cryptographic adapter must be varied offline before its crypto
type can be reconfigured.
Authorization requirements:
* Object-access permission to this Adapter.
* Task permission to the "Adapter Details" task.
Parameters:
crypto_type (:term:`string`):
- ``"accelerator"``: Crypto Express5S Accelerator
- ``"cca-coprocessor"``: Crypto Express5S CCA Coprocessor
- ``"ep11-coprocessor"``: Crypto Express5S EP11 Coprocessor
zeroize (bool):
Specifies whether the cryptographic adapter will be zeroized when
it is reconfigured to a crypto type of ``"accelerator"``.
`None` means that the HMC-implemented default of `True` will be
used.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da18f722dd0>], [<ast.Name object at 0x7da18f722140>]]
if compare[name[zeroize] is_not constant[None]] begin[:]
call[name[body]][constant[zeroize]] assign[=] name[zeroize]
call[name[self].manager.session.post, parameter[binary_operation[name[self].uri + constant[/operations/change-crypto-type]], name[body]]] | keyword[def] identifier[change_crypto_type] ( identifier[self] , identifier[crypto_type] , identifier[zeroize] = keyword[None] ):
literal[string]
identifier[body] ={ literal[string] : identifier[crypto_type] }
keyword[if] identifier[zeroize] keyword[is] keyword[not] keyword[None] :
identifier[body] [ literal[string] ]= identifier[zeroize]
identifier[self] . identifier[manager] . identifier[session] . identifier[post] (
identifier[self] . identifier[uri] + literal[string] , identifier[body] ) | def change_crypto_type(self, crypto_type, zeroize=None):
"""
Reconfigures a cryptographic adapter to a different crypto type.
This operation is only supported for cryptographic adapters.
The cryptographic adapter must be varied offline before its crypto
type can be reconfigured.
Authorization requirements:
* Object-access permission to this Adapter.
* Task permission to the "Adapter Details" task.
Parameters:
crypto_type (:term:`string`):
- ``"accelerator"``: Crypto Express5S Accelerator
- ``"cca-coprocessor"``: Crypto Express5S CCA Coprocessor
- ``"ep11-coprocessor"``: Crypto Express5S EP11 Coprocessor
zeroize (bool):
Specifies whether the cryptographic adapter will be zeroized when
it is reconfigured to a crypto type of ``"accelerator"``.
`None` means that the HMC-implemented default of `True` will be
used.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'crypto-type': crypto_type}
if zeroize is not None:
body['zeroize'] = zeroize # depends on [control=['if'], data=['zeroize']]
self.manager.session.post(self.uri + '/operations/change-crypto-type', body) |
def descendants(self, unroll=False, skip_not_present=True, in_post_order=False):
"""
Returns an iterator that provides nodes for all descendants of this
component.
Parameters
----------
unroll : bool
If True, any children that are arrays are unrolled.
skip_not_present : bool
If True, skips children whose 'ispresent' property is set to False
in_post_order : bool
If True, descendants are walked using post-order traversal
(children first) rather than the default pre-order traversal
(parents first).
Yields
------
:class:`~Node`
All descendant nodes of this component
"""
for child in self.children(unroll, skip_not_present):
if in_post_order:
yield from child.descendants(unroll, skip_not_present, in_post_order)
yield child
if not in_post_order:
yield from child.descendants(unroll, skip_not_present, in_post_order) | def function[descendants, parameter[self, unroll, skip_not_present, in_post_order]]:
constant[
Returns an iterator that provides nodes for all descendants of this
component.
Parameters
----------
unroll : bool
If True, any children that are arrays are unrolled.
skip_not_present : bool
If True, skips children whose 'ispresent' property is set to False
in_post_order : bool
If True, descendants are walked using post-order traversal
(children first) rather than the default pre-order traversal
(parents first).
Yields
------
:class:`~Node`
All descendant nodes of this component
]
for taget[name[child]] in starred[call[name[self].children, parameter[name[unroll], name[skip_not_present]]]] begin[:]
if name[in_post_order] begin[:]
<ast.YieldFrom object at 0x7da1b0d777c0>
<ast.Yield object at 0x7da1b0d74e80>
if <ast.UnaryOp object at 0x7da1b0d761a0> begin[:]
<ast.YieldFrom object at 0x7da1b0d76500> | keyword[def] identifier[descendants] ( identifier[self] , identifier[unroll] = keyword[False] , identifier[skip_not_present] = keyword[True] , identifier[in_post_order] = keyword[False] ):
literal[string]
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[children] ( identifier[unroll] , identifier[skip_not_present] ):
keyword[if] identifier[in_post_order] :
keyword[yield] keyword[from] identifier[child] . identifier[descendants] ( identifier[unroll] , identifier[skip_not_present] , identifier[in_post_order] )
keyword[yield] identifier[child]
keyword[if] keyword[not] identifier[in_post_order] :
keyword[yield] keyword[from] identifier[child] . identifier[descendants] ( identifier[unroll] , identifier[skip_not_present] , identifier[in_post_order] ) | def descendants(self, unroll=False, skip_not_present=True, in_post_order=False):
"""
Returns an iterator that provides nodes for all descendants of this
component.
Parameters
----------
unroll : bool
If True, any children that are arrays are unrolled.
skip_not_present : bool
If True, skips children whose 'ispresent' property is set to False
in_post_order : bool
If True, descendants are walked using post-order traversal
(children first) rather than the default pre-order traversal
(parents first).
Yields
------
:class:`~Node`
All descendant nodes of this component
"""
for child in self.children(unroll, skip_not_present):
if in_post_order:
yield from child.descendants(unroll, skip_not_present, in_post_order) # depends on [control=['if'], data=[]]
yield child
if not in_post_order:
yield from child.descendants(unroll, skip_not_present, in_post_order) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.