code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def add_output(self, out_name, type_or_serialize=None, **kwargs):
""" Declare an output
"""
if out_name not in self.engine.all_outputs():
raise ValueError("'%s' is not generated by the engine %s" % (out_name, self.engine.all_outputs()))
if type_or_serialize is None:
type_or_serialize = GenericType()
if not isinstance(type_or_serialize, GenericType) and callable(type_or_serialize):
type_or_serialize = GenericType(serialize=type_or_serialize)
elif not isinstance(type_or_serialize, GenericType):
raise ValueError("the given 'type_or_serialize' is invalid")
# register outpurs
self._outputs[out_name] = {
'serializer': type_or_serialize,
'parameters': kwargs if kwargs else {}
} | def function[add_output, parameter[self, out_name, type_or_serialize]]:
constant[ Declare an output
]
if compare[name[out_name] <ast.NotIn object at 0x7da2590d7190> call[name[self].engine.all_outputs, parameter[]]] begin[:]
<ast.Raise object at 0x7da1b1351120>
if compare[name[type_or_serialize] is constant[None]] begin[:]
variable[type_or_serialize] assign[=] call[name[GenericType], parameter[]]
if <ast.BoolOp object at 0x7da1b1350a30> begin[:]
variable[type_or_serialize] assign[=] call[name[GenericType], parameter[]]
call[name[self]._outputs][name[out_name]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1342050>, <ast.Constant object at 0x7da1b1340730>], [<ast.Name object at 0x7da1b13424d0>, <ast.IfExp object at 0x7da1b1341fc0>]] | keyword[def] identifier[add_output] ( identifier[self] , identifier[out_name] , identifier[type_or_serialize] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[out_name] keyword[not] keyword[in] identifier[self] . identifier[engine] . identifier[all_outputs] ():
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[out_name] , identifier[self] . identifier[engine] . identifier[all_outputs] ()))
keyword[if] identifier[type_or_serialize] keyword[is] keyword[None] :
identifier[type_or_serialize] = identifier[GenericType] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[type_or_serialize] , identifier[GenericType] ) keyword[and] identifier[callable] ( identifier[type_or_serialize] ):
identifier[type_or_serialize] = identifier[GenericType] ( identifier[serialize] = identifier[type_or_serialize] )
keyword[elif] keyword[not] identifier[isinstance] ( identifier[type_or_serialize] , identifier[GenericType] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[_outputs] [ identifier[out_name] ]={
literal[string] : identifier[type_or_serialize] ,
literal[string] : identifier[kwargs] keyword[if] identifier[kwargs] keyword[else] {}
} | def add_output(self, out_name, type_or_serialize=None, **kwargs):
""" Declare an output
"""
if out_name not in self.engine.all_outputs():
raise ValueError("'%s' is not generated by the engine %s" % (out_name, self.engine.all_outputs())) # depends on [control=['if'], data=['out_name']]
if type_or_serialize is None:
type_or_serialize = GenericType() # depends on [control=['if'], data=['type_or_serialize']]
if not isinstance(type_or_serialize, GenericType) and callable(type_or_serialize):
type_or_serialize = GenericType(serialize=type_or_serialize) # depends on [control=['if'], data=[]]
elif not isinstance(type_or_serialize, GenericType):
raise ValueError("the given 'type_or_serialize' is invalid") # depends on [control=['if'], data=[]]
# register outpurs
self._outputs[out_name] = {'serializer': type_or_serialize, 'parameters': kwargs if kwargs else {}} |
def transitive_dependents_of_addresses(self, addresses):
"""Given an iterable of addresses, yield all of those addresses dependents, transitively."""
closure = set()
result = []
to_visit = deque(addresses)
while to_visit:
address = to_visit.popleft()
if address in closure:
continue
closure.add(address)
result.append(address)
to_visit.extend(self._dependent_address_map[address])
to_visit.extend(self._implicit_dependent_address_map[address])
return result | def function[transitive_dependents_of_addresses, parameter[self, addresses]]:
constant[Given an iterable of addresses, yield all of those addresses dependents, transitively.]
variable[closure] assign[=] call[name[set], parameter[]]
variable[result] assign[=] list[[]]
variable[to_visit] assign[=] call[name[deque], parameter[name[addresses]]]
while name[to_visit] begin[:]
variable[address] assign[=] call[name[to_visit].popleft, parameter[]]
if compare[name[address] in name[closure]] begin[:]
continue
call[name[closure].add, parameter[name[address]]]
call[name[result].append, parameter[name[address]]]
call[name[to_visit].extend, parameter[call[name[self]._dependent_address_map][name[address]]]]
call[name[to_visit].extend, parameter[call[name[self]._implicit_dependent_address_map][name[address]]]]
return[name[result]] | keyword[def] identifier[transitive_dependents_of_addresses] ( identifier[self] , identifier[addresses] ):
literal[string]
identifier[closure] = identifier[set] ()
identifier[result] =[]
identifier[to_visit] = identifier[deque] ( identifier[addresses] )
keyword[while] identifier[to_visit] :
identifier[address] = identifier[to_visit] . identifier[popleft] ()
keyword[if] identifier[address] keyword[in] identifier[closure] :
keyword[continue]
identifier[closure] . identifier[add] ( identifier[address] )
identifier[result] . identifier[append] ( identifier[address] )
identifier[to_visit] . identifier[extend] ( identifier[self] . identifier[_dependent_address_map] [ identifier[address] ])
identifier[to_visit] . identifier[extend] ( identifier[self] . identifier[_implicit_dependent_address_map] [ identifier[address] ])
keyword[return] identifier[result] | def transitive_dependents_of_addresses(self, addresses):
"""Given an iterable of addresses, yield all of those addresses dependents, transitively."""
closure = set()
result = []
to_visit = deque(addresses)
while to_visit:
address = to_visit.popleft()
if address in closure:
continue # depends on [control=['if'], data=[]]
closure.add(address)
result.append(address)
to_visit.extend(self._dependent_address_map[address])
to_visit.extend(self._implicit_dependent_address_map[address]) # depends on [control=['while'], data=[]]
return result |
def result(self):
"""
The result of the job if available
throws ValueError is result is not available yet
throws ApiError if server returned an error indicating program execution was not successful
or if the job was cancelled
"""
if not self.is_done():
raise ValueError("Cannot get a result for a program that isn't completed.")
if self._raw['status'] == 'CANCELLED':
raise CancellationError(self._raw['result'])
elif self._raw['status'] == 'ERROR':
if self._machine == 'QVM':
raise QVMError(self._raw['result'])
elif self._machine == 'QPU':
raise QPUError(self._raw['result'])
elif self._machine == 'QUILC':
raise QUILCError(self._raw['result'])
else:
raise UnknownApiError(self._raw['result'])
if self._raw['program']['type'] == 'wavefunction':
return Wavefunction.from_bit_packed_string(
base64.b64decode(self._raw['result']), self._raw['program']['addresses'])
elif self._raw['program']['type'] in ['multishot', 'multishot-measure', 'expectation']:
return np.asarray(self._raw['result'])
else:
return self._raw['result'] | def function[result, parameter[self]]:
constant[
The result of the job if available
throws ValueError is result is not available yet
throws ApiError if server returned an error indicating program execution was not successful
or if the job was cancelled
]
if <ast.UnaryOp object at 0x7da1b1cd6410> begin[:]
<ast.Raise object at 0x7da1b1cd4f40>
if compare[call[name[self]._raw][constant[status]] equal[==] constant[CANCELLED]] begin[:]
<ast.Raise object at 0x7da1b1cd7670>
if compare[call[call[name[self]._raw][constant[program]]][constant[type]] equal[==] constant[wavefunction]] begin[:]
return[call[name[Wavefunction].from_bit_packed_string, parameter[call[name[base64].b64decode, parameter[call[name[self]._raw][constant[result]]]], call[call[name[self]._raw][constant[program]]][constant[addresses]]]]] | keyword[def] identifier[result] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_done] ():
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[_raw] [ literal[string] ]== literal[string] :
keyword[raise] identifier[CancellationError] ( identifier[self] . identifier[_raw] [ literal[string] ])
keyword[elif] identifier[self] . identifier[_raw] [ literal[string] ]== literal[string] :
keyword[if] identifier[self] . identifier[_machine] == literal[string] :
keyword[raise] identifier[QVMError] ( identifier[self] . identifier[_raw] [ literal[string] ])
keyword[elif] identifier[self] . identifier[_machine] == literal[string] :
keyword[raise] identifier[QPUError] ( identifier[self] . identifier[_raw] [ literal[string] ])
keyword[elif] identifier[self] . identifier[_machine] == literal[string] :
keyword[raise] identifier[QUILCError] ( identifier[self] . identifier[_raw] [ literal[string] ])
keyword[else] :
keyword[raise] identifier[UnknownApiError] ( identifier[self] . identifier[_raw] [ literal[string] ])
keyword[if] identifier[self] . identifier[_raw] [ literal[string] ][ literal[string] ]== literal[string] :
keyword[return] identifier[Wavefunction] . identifier[from_bit_packed_string] (
identifier[base64] . identifier[b64decode] ( identifier[self] . identifier[_raw] [ literal[string] ]), identifier[self] . identifier[_raw] [ literal[string] ][ literal[string] ])
keyword[elif] identifier[self] . identifier[_raw] [ literal[string] ][ literal[string] ] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[return] identifier[np] . identifier[asarray] ( identifier[self] . identifier[_raw] [ literal[string] ])
keyword[else] :
keyword[return] identifier[self] . identifier[_raw] [ literal[string] ] | def result(self):
"""
The result of the job if available
throws ValueError is result is not available yet
throws ApiError if server returned an error indicating program execution was not successful
or if the job was cancelled
"""
if not self.is_done():
raise ValueError("Cannot get a result for a program that isn't completed.") # depends on [control=['if'], data=[]]
if self._raw['status'] == 'CANCELLED':
raise CancellationError(self._raw['result']) # depends on [control=['if'], data=[]]
elif self._raw['status'] == 'ERROR':
if self._machine == 'QVM':
raise QVMError(self._raw['result']) # depends on [control=['if'], data=[]]
elif self._machine == 'QPU':
raise QPUError(self._raw['result']) # depends on [control=['if'], data=[]]
elif self._machine == 'QUILC':
raise QUILCError(self._raw['result']) # depends on [control=['if'], data=[]]
else:
raise UnknownApiError(self._raw['result']) # depends on [control=['if'], data=[]]
if self._raw['program']['type'] == 'wavefunction':
return Wavefunction.from_bit_packed_string(base64.b64decode(self._raw['result']), self._raw['program']['addresses']) # depends on [control=['if'], data=[]]
elif self._raw['program']['type'] in ['multishot', 'multishot-measure', 'expectation']:
return np.asarray(self._raw['result']) # depends on [control=['if'], data=[]]
else:
return self._raw['result'] |
def download_vod(self, video_id):
"""
This will return a byte string of the M3U8 playlist data
(which contains more links to segments of the vod)
"""
vod_id = video_id[1:]
token = self._request_get(
'vods/{}/access_token'.format(vod_id), url='https://api.twitch.tv/api/')
params = {
'nauthsig': token['sig'],
'nauth': token['token']
}
m3u8 = self._request_get(
'vod/{}'.format(vod_id), url=VOD_FETCH_URL, params=params, json=False)
return m3u8.content | def function[download_vod, parameter[self, video_id]]:
constant[
This will return a byte string of the M3U8 playlist data
(which contains more links to segments of the vod)
]
variable[vod_id] assign[=] call[name[video_id]][<ast.Slice object at 0x7da207f01930>]
variable[token] assign[=] call[name[self]._request_get, parameter[call[constant[vods/{}/access_token].format, parameter[name[vod_id]]]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da207f00f10>, <ast.Constant object at 0x7da2054a7a30>], [<ast.Subscript object at 0x7da2054a5390>, <ast.Subscript object at 0x7da2054a6680>]]
variable[m3u8] assign[=] call[name[self]._request_get, parameter[call[constant[vod/{}].format, parameter[name[vod_id]]]]]
return[name[m3u8].content] | keyword[def] identifier[download_vod] ( identifier[self] , identifier[video_id] ):
literal[string]
identifier[vod_id] = identifier[video_id] [ literal[int] :]
identifier[token] = identifier[self] . identifier[_request_get] (
literal[string] . identifier[format] ( identifier[vod_id] ), identifier[url] = literal[string] )
identifier[params] ={
literal[string] : identifier[token] [ literal[string] ],
literal[string] : identifier[token] [ literal[string] ]
}
identifier[m3u8] = identifier[self] . identifier[_request_get] (
literal[string] . identifier[format] ( identifier[vod_id] ), identifier[url] = identifier[VOD_FETCH_URL] , identifier[params] = identifier[params] , identifier[json] = keyword[False] )
keyword[return] identifier[m3u8] . identifier[content] | def download_vod(self, video_id):
"""
This will return a byte string of the M3U8 playlist data
(which contains more links to segments of the vod)
"""
vod_id = video_id[1:]
token = self._request_get('vods/{}/access_token'.format(vod_id), url='https://api.twitch.tv/api/')
params = {'nauthsig': token['sig'], 'nauth': token['token']}
m3u8 = self._request_get('vod/{}'.format(vod_id), url=VOD_FETCH_URL, params=params, json=False)
return m3u8.content |
def _check_or_enforce_type(self, value):
"""
Depending on whether enforce_type is enabled call self.enforce_type and
return the result or call it and trigger a silent warning if the result
is different or a Traceback
To aid with migration, enable the warnings with:
warnings.simplefilter("always", FailingEnforceTypeWarning)
warnings.simplefilter("always", ModifyingEnforceTypeWarning)
"""
if self._enable_enforce_type:
return self.enforce_type(value)
try:
new_value = self.enforce_type(value)
except: # pylint: disable=bare-except
message = "The value {!r} could not be enforced ({})".format(
value, traceback.format_exc().splitlines()[-1])
warnings.warn(message, FailingEnforceTypeWarning, stacklevel=3)
else:
try:
equal = value == new_value
except TypeError:
equal = False
if not equal:
message = "The value {!r} would be enforced to {!r}".format(
value, new_value)
warnings.warn(message, ModifyingEnforceTypeWarning, stacklevel=3)
return value | def function[_check_or_enforce_type, parameter[self, value]]:
constant[
Depending on whether enforce_type is enabled call self.enforce_type and
return the result or call it and trigger a silent warning if the result
is different or a Traceback
To aid with migration, enable the warnings with:
warnings.simplefilter("always", FailingEnforceTypeWarning)
warnings.simplefilter("always", ModifyingEnforceTypeWarning)
]
if name[self]._enable_enforce_type begin[:]
return[call[name[self].enforce_type, parameter[name[value]]]]
<ast.Try object at 0x7da18bccbe80>
return[name[value]] | keyword[def] identifier[_check_or_enforce_type] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[self] . identifier[_enable_enforce_type] :
keyword[return] identifier[self] . identifier[enforce_type] ( identifier[value] )
keyword[try] :
identifier[new_value] = identifier[self] . identifier[enforce_type] ( identifier[value] )
keyword[except] :
identifier[message] = literal[string] . identifier[format] (
identifier[value] , identifier[traceback] . identifier[format_exc] (). identifier[splitlines] ()[- literal[int] ])
identifier[warnings] . identifier[warn] ( identifier[message] , identifier[FailingEnforceTypeWarning] , identifier[stacklevel] = literal[int] )
keyword[else] :
keyword[try] :
identifier[equal] = identifier[value] == identifier[new_value]
keyword[except] identifier[TypeError] :
identifier[equal] = keyword[False]
keyword[if] keyword[not] identifier[equal] :
identifier[message] = literal[string] . identifier[format] (
identifier[value] , identifier[new_value] )
identifier[warnings] . identifier[warn] ( identifier[message] , identifier[ModifyingEnforceTypeWarning] , identifier[stacklevel] = literal[int] )
keyword[return] identifier[value] | def _check_or_enforce_type(self, value):
"""
Depending on whether enforce_type is enabled call self.enforce_type and
return the result or call it and trigger a silent warning if the result
is different or a Traceback
To aid with migration, enable the warnings with:
warnings.simplefilter("always", FailingEnforceTypeWarning)
warnings.simplefilter("always", ModifyingEnforceTypeWarning)
"""
if self._enable_enforce_type:
return self.enforce_type(value) # depends on [control=['if'], data=[]]
try:
new_value = self.enforce_type(value) # depends on [control=['try'], data=[]]
except: # pylint: disable=bare-except
message = 'The value {!r} could not be enforced ({})'.format(value, traceback.format_exc().splitlines()[-1])
warnings.warn(message, FailingEnforceTypeWarning, stacklevel=3) # depends on [control=['except'], data=[]]
else:
try:
equal = value == new_value # depends on [control=['try'], data=[]]
except TypeError:
equal = False # depends on [control=['except'], data=[]]
if not equal:
message = 'The value {!r} would be enforced to {!r}'.format(value, new_value)
warnings.warn(message, ModifyingEnforceTypeWarning, stacklevel=3) # depends on [control=['if'], data=[]]
return value |
def _reduce_degree(bqm, poly, vartype, scale):
"""helper function for make_quadratic"""
if all(len(term) <= 2 for term in poly):
# termination criteria, we are already quadratic
bqm.add_interactions_from(poly)
return bqm
# determine which pair of variables appear most often
paircounter = Counter()
for term in poly:
if len(term) > 2:
for u, v in itertools.combinations(term, 2):
pair = frozenset((u, v))
paircounter[pair] += 1
pair, __ = paircounter.most_common(1)[0]
u, v = pair
# make a new product variable and aux variable and add constraint that u*v == p
p = '{}*{}'.format(u, v)
while p in bqm.linear:
p = '_' + p
if vartype is Vartype.BINARY:
constraint = _binary_product([u, v, p])
bqm.info['reduction'][(u, v)] = {'product': p}
else:
aux = 'aux{},{}'.format(u, v)
while aux in bqm.linear:
aux = '_' + aux
constraint = _spin_product([u, v, p, aux])
bqm.info['reduction'][(u, v)] = {'product': p, 'auxiliary': aux}
constraint.scale(scale)
bqm.update(constraint)
new_poly = {}
for interaction, bias in poly.items():
if u in interaction and v in interaction:
if len(interaction) == 2:
# in this case we are reducing a quadratic bias, so it becomes linear and can
# be removed
assert len(interaction) >= 2
bqm.add_variable(p, bias)
continue
interaction = tuple(s for s in interaction if s not in pair)
interaction += (p,)
if interaction in new_poly:
new_poly[interaction] += bias
else:
new_poly[interaction] = bias
return _reduce_degree(bqm, new_poly, vartype, scale) | def function[_reduce_degree, parameter[bqm, poly, vartype, scale]]:
constant[helper function for make_quadratic]
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b0763280>]] begin[:]
call[name[bqm].add_interactions_from, parameter[name[poly]]]
return[name[bqm]]
variable[paircounter] assign[=] call[name[Counter], parameter[]]
for taget[name[term]] in starred[name[poly]] begin[:]
if compare[call[name[len], parameter[name[term]]] greater[>] constant[2]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0760b80>, <ast.Name object at 0x7da1b07636d0>]]] in starred[call[name[itertools].combinations, parameter[name[term], constant[2]]]] begin[:]
variable[pair] assign[=] call[name[frozenset], parameter[tuple[[<ast.Name object at 0x7da1b07605e0>, <ast.Name object at 0x7da1b0762e00>]]]]
<ast.AugAssign object at 0x7da1b0760640>
<ast.Tuple object at 0x7da1b0760790> assign[=] call[call[name[paircounter].most_common, parameter[constant[1]]]][constant[0]]
<ast.Tuple object at 0x7da1b0761de0> assign[=] name[pair]
variable[p] assign[=] call[constant[{}*{}].format, parameter[name[u], name[v]]]
while compare[name[p] in name[bqm].linear] begin[:]
variable[p] assign[=] binary_operation[constant[_] + name[p]]
if compare[name[vartype] is name[Vartype].BINARY] begin[:]
variable[constraint] assign[=] call[name[_binary_product], parameter[list[[<ast.Name object at 0x7da1b0761d80>, <ast.Name object at 0x7da1b07627a0>, <ast.Name object at 0x7da1b0761240>]]]]
call[call[name[bqm].info][constant[reduction]]][tuple[[<ast.Name object at 0x7da1b07621d0>, <ast.Name object at 0x7da1b07639d0>]]] assign[=] dictionary[[<ast.Constant object at 0x7da1b07629e0>], [<ast.Name object at 0x7da1b0762a10>]]
call[name[constraint].scale, parameter[name[scale]]]
call[name[bqm].update, parameter[name[constraint]]]
variable[new_poly] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b06ff9d0>, <ast.Name object at 0x7da1b06ff460>]]] in starred[call[name[poly].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b06fc2b0> begin[:]
if compare[call[name[len], parameter[name[interaction]]] equal[==] constant[2]] begin[:]
assert[compare[call[name[len], parameter[name[interaction]]] greater_or_equal[>=] constant[2]]]
call[name[bqm].add_variable, parameter[name[p], name[bias]]]
continue
variable[interaction] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b078ef20>]]
<ast.AugAssign object at 0x7da1b078ef80>
if compare[name[interaction] in name[new_poly]] begin[:]
<ast.AugAssign object at 0x7da1b078f040>
return[call[name[_reduce_degree], parameter[name[bqm], name[new_poly], name[vartype], name[scale]]]] | keyword[def] identifier[_reduce_degree] ( identifier[bqm] , identifier[poly] , identifier[vartype] , identifier[scale] ):
literal[string]
keyword[if] identifier[all] ( identifier[len] ( identifier[term] )<= literal[int] keyword[for] identifier[term] keyword[in] identifier[poly] ):
identifier[bqm] . identifier[add_interactions_from] ( identifier[poly] )
keyword[return] identifier[bqm]
identifier[paircounter] = identifier[Counter] ()
keyword[for] identifier[term] keyword[in] identifier[poly] :
keyword[if] identifier[len] ( identifier[term] )> literal[int] :
keyword[for] identifier[u] , identifier[v] keyword[in] identifier[itertools] . identifier[combinations] ( identifier[term] , literal[int] ):
identifier[pair] = identifier[frozenset] (( identifier[u] , identifier[v] ))
identifier[paircounter] [ identifier[pair] ]+= literal[int]
identifier[pair] , identifier[__] = identifier[paircounter] . identifier[most_common] ( literal[int] )[ literal[int] ]
identifier[u] , identifier[v] = identifier[pair]
identifier[p] = literal[string] . identifier[format] ( identifier[u] , identifier[v] )
keyword[while] identifier[p] keyword[in] identifier[bqm] . identifier[linear] :
identifier[p] = literal[string] + identifier[p]
keyword[if] identifier[vartype] keyword[is] identifier[Vartype] . identifier[BINARY] :
identifier[constraint] = identifier[_binary_product] ([ identifier[u] , identifier[v] , identifier[p] ])
identifier[bqm] . identifier[info] [ literal[string] ][( identifier[u] , identifier[v] )]={ literal[string] : identifier[p] }
keyword[else] :
identifier[aux] = literal[string] . identifier[format] ( identifier[u] , identifier[v] )
keyword[while] identifier[aux] keyword[in] identifier[bqm] . identifier[linear] :
identifier[aux] = literal[string] + identifier[aux]
identifier[constraint] = identifier[_spin_product] ([ identifier[u] , identifier[v] , identifier[p] , identifier[aux] ])
identifier[bqm] . identifier[info] [ literal[string] ][( identifier[u] , identifier[v] )]={ literal[string] : identifier[p] , literal[string] : identifier[aux] }
identifier[constraint] . identifier[scale] ( identifier[scale] )
identifier[bqm] . identifier[update] ( identifier[constraint] )
identifier[new_poly] ={}
keyword[for] identifier[interaction] , identifier[bias] keyword[in] identifier[poly] . identifier[items] ():
keyword[if] identifier[u] keyword[in] identifier[interaction] keyword[and] identifier[v] keyword[in] identifier[interaction] :
keyword[if] identifier[len] ( identifier[interaction] )== literal[int] :
keyword[assert] identifier[len] ( identifier[interaction] )>= literal[int]
identifier[bqm] . identifier[add_variable] ( identifier[p] , identifier[bias] )
keyword[continue]
identifier[interaction] = identifier[tuple] ( identifier[s] keyword[for] identifier[s] keyword[in] identifier[interaction] keyword[if] identifier[s] keyword[not] keyword[in] identifier[pair] )
identifier[interaction] +=( identifier[p] ,)
keyword[if] identifier[interaction] keyword[in] identifier[new_poly] :
identifier[new_poly] [ identifier[interaction] ]+= identifier[bias]
keyword[else] :
identifier[new_poly] [ identifier[interaction] ]= identifier[bias]
keyword[return] identifier[_reduce_degree] ( identifier[bqm] , identifier[new_poly] , identifier[vartype] , identifier[scale] ) | def _reduce_degree(bqm, poly, vartype, scale):
"""helper function for make_quadratic"""
if all((len(term) <= 2 for term in poly)):
# termination criteria, we are already quadratic
bqm.add_interactions_from(poly)
return bqm # depends on [control=['if'], data=[]]
# determine which pair of variables appear most often
paircounter = Counter()
for term in poly:
if len(term) > 2:
for (u, v) in itertools.combinations(term, 2):
pair = frozenset((u, v))
paircounter[pair] += 1 # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['term']]
(pair, __) = paircounter.most_common(1)[0]
(u, v) = pair
# make a new product variable and aux variable and add constraint that u*v == p
p = '{}*{}'.format(u, v)
while p in bqm.linear:
p = '_' + p # depends on [control=['while'], data=['p']]
if vartype is Vartype.BINARY:
constraint = _binary_product([u, v, p])
bqm.info['reduction'][u, v] = {'product': p} # depends on [control=['if'], data=[]]
else:
aux = 'aux{},{}'.format(u, v)
while aux in bqm.linear:
aux = '_' + aux # depends on [control=['while'], data=['aux']]
constraint = _spin_product([u, v, p, aux])
bqm.info['reduction'][u, v] = {'product': p, 'auxiliary': aux}
constraint.scale(scale)
bqm.update(constraint)
new_poly = {}
for (interaction, bias) in poly.items():
if u in interaction and v in interaction:
if len(interaction) == 2:
# in this case we are reducing a quadratic bias, so it becomes linear and can
# be removed
assert len(interaction) >= 2
bqm.add_variable(p, bias)
continue # depends on [control=['if'], data=[]]
interaction = tuple((s for s in interaction if s not in pair))
interaction += (p,) # depends on [control=['if'], data=[]]
if interaction in new_poly:
new_poly[interaction] += bias # depends on [control=['if'], data=['interaction', 'new_poly']]
else:
new_poly[interaction] = bias # depends on [control=['for'], data=[]]
return _reduce_degree(bqm, new_poly, vartype, scale) |
def cli_put(context, path):
"""
Performs a PUT on the item (account, container, or object).
See :py:mod:`swiftly.cli.put` for context usage information.
See :py:class:`CLIPut` for more information.
"""
path = path.lstrip('/') if path else ''
if context.input_ and os.path.isdir(context.input_):
return cli_put_directory_structure(context, path)
if not path:
return cli_put_account(context)
elif '/' not in path.rstrip('/'):
return cli_put_container(context, path)
else:
return cli_put_object(context, path) | def function[cli_put, parameter[context, path]]:
constant[
Performs a PUT on the item (account, container, or object).
See :py:mod:`swiftly.cli.put` for context usage information.
See :py:class:`CLIPut` for more information.
]
variable[path] assign[=] <ast.IfExp object at 0x7da1b05c86a0>
if <ast.BoolOp object at 0x7da1b05cabc0> begin[:]
return[call[name[cli_put_directory_structure], parameter[name[context], name[path]]]]
if <ast.UnaryOp object at 0x7da1b05c8c40> begin[:]
return[call[name[cli_put_account], parameter[name[context]]]] | keyword[def] identifier[cli_put] ( identifier[context] , identifier[path] ):
literal[string]
identifier[path] = identifier[path] . identifier[lstrip] ( literal[string] ) keyword[if] identifier[path] keyword[else] literal[string]
keyword[if] identifier[context] . identifier[input_] keyword[and] identifier[os] . identifier[path] . identifier[isdir] ( identifier[context] . identifier[input_] ):
keyword[return] identifier[cli_put_directory_structure] ( identifier[context] , identifier[path] )
keyword[if] keyword[not] identifier[path] :
keyword[return] identifier[cli_put_account] ( identifier[context] )
keyword[elif] literal[string] keyword[not] keyword[in] identifier[path] . identifier[rstrip] ( literal[string] ):
keyword[return] identifier[cli_put_container] ( identifier[context] , identifier[path] )
keyword[else] :
keyword[return] identifier[cli_put_object] ( identifier[context] , identifier[path] ) | def cli_put(context, path):
"""
Performs a PUT on the item (account, container, or object).
See :py:mod:`swiftly.cli.put` for context usage information.
See :py:class:`CLIPut` for more information.
"""
path = path.lstrip('/') if path else ''
if context.input_ and os.path.isdir(context.input_):
return cli_put_directory_structure(context, path) # depends on [control=['if'], data=[]]
if not path:
return cli_put_account(context) # depends on [control=['if'], data=[]]
elif '/' not in path.rstrip('/'):
return cli_put_container(context, path) # depends on [control=['if'], data=[]]
else:
return cli_put_object(context, path) |
def init(options):
""" Initialize some defaults """
# Set matlplotlib's backend so LIVVkit can plot to files.
import matplotlib
matplotlib.use('agg')
livvkit.output_dir = os.path.abspath(options.out_dir)
livvkit.index_dir = livvkit.output_dir
livvkit.verify = True if options.verify is not None else False
livvkit.validate = True if options.validate is not None else False
livvkit.publish = options.publish
# Get a list of bundles that provide model specific implementations
available_bundles = [mod for imp, mod, ispkg in pkgutil.iter_modules(bundles.__path__)]
if options.verify is not None:
livvkit.model_dir = os.path.normpath(options.verify[0])
livvkit.bench_dir = os.path.normpath(options.verify[1])
if not os.path.isdir(livvkit.model_dir):
print("")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" UH OH!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" Your comparison directory does not exist; please check")
print(" the path:")
print("\n"+livvkit.model_dir+"\n\n")
sys.exit(1)
if not os.path.isdir(livvkit.bench_dir):
print("")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" UH OH!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" Your benchmark directory does not exist; please check")
print(" the path:")
print("\n"+livvkit.bench_dir+"\n\n")
sys.exit(1)
livvkit.model_bundle = os.path.basename(livvkit.model_dir)
livvkit.bench_bundle = os.path.basename(livvkit.bench_dir)
if livvkit.model_bundle in available_bundles:
livvkit.numerics_model_config = os.path.join(
livvkit.bundle_dir, livvkit.model_bundle, "numerics.json")
livvkit.numerics_model_module = importlib.import_module(
".".join(["livvkit.bundles", livvkit.model_bundle, "numerics"]))
livvkit.verification_model_config = os.path.join(
livvkit.bundle_dir, livvkit.model_bundle, "verification.json")
livvkit.verification_model_module = importlib.import_module(
".".join(["livvkit.bundles", livvkit.model_bundle, "verification"]))
livvkit.performance_model_config = os.path.join(
livvkit.bundle_dir, livvkit.model_bundle, "performance.json")
# NOTE: This isn't used right now...
# livvkit.performance_model_module = importlib.import_module(
# ".".join(["livvkit.bundles", livvkit.model_bundle, "performance"]))
else:
# TODO: Should implement some error checking here...
livvkit.verify = False
if options.validate is not None:
livvkit.validation_model_configs = options.validate
if not (livvkit.verify or livvkit.validate) and not options.serve:
print("")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" UH OH!")
print("----------------------------------------------------------")
print(" No verification or validation tests found/submitted!")
print("")
print(" Use either one or both of the --verify and")
print(" --validate options to run tests. For more ")
print(" information use the --help option, view the README")
print(" or check https://livvkit.github.io/Docs/")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("")
sys.exit(1)
return options | def function[init, parameter[options]]:
constant[ Initialize some defaults ]
import module[matplotlib]
call[name[matplotlib].use, parameter[constant[agg]]]
name[livvkit].output_dir assign[=] call[name[os].path.abspath, parameter[name[options].out_dir]]
name[livvkit].index_dir assign[=] name[livvkit].output_dir
name[livvkit].verify assign[=] <ast.IfExp object at 0x7da1b0b60e50>
name[livvkit].validate assign[=] <ast.IfExp object at 0x7da1b0b62080>
name[livvkit].publish assign[=] name[options].publish
variable[available_bundles] assign[=] <ast.ListComp object at 0x7da1b0b60af0>
if compare[name[options].verify is_not constant[None]] begin[:]
name[livvkit].model_dir assign[=] call[name[os].path.normpath, parameter[call[name[options].verify][constant[0]]]]
name[livvkit].bench_dir assign[=] call[name[os].path.normpath, parameter[call[name[options].verify][constant[1]]]]
if <ast.UnaryOp object at 0x7da1b0b63670> begin[:]
call[name[print], parameter[constant[]]]
call[name[print], parameter[constant[!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!]]]
call[name[print], parameter[constant[ UH OH!]]]
call[name[print], parameter[constant[!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!]]]
call[name[print], parameter[constant[ Your comparison directory does not exist; please check]]]
call[name[print], parameter[constant[ the path:]]]
call[name[print], parameter[binary_operation[binary_operation[constant[
] + name[livvkit].model_dir] + constant[
]]]]
call[name[sys].exit, parameter[constant[1]]]
if <ast.UnaryOp object at 0x7da1b0b63610> begin[:]
call[name[print], parameter[constant[]]]
call[name[print], parameter[constant[!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!]]]
call[name[print], parameter[constant[ UH OH!]]]
call[name[print], parameter[constant[!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!]]]
call[name[print], parameter[constant[ Your benchmark directory does not exist; please check]]]
call[name[print], parameter[constant[ the path:]]]
call[name[print], parameter[binary_operation[binary_operation[constant[
] + name[livvkit].bench_dir] + constant[
]]]]
call[name[sys].exit, parameter[constant[1]]]
name[livvkit].model_bundle assign[=] call[name[os].path.basename, parameter[name[livvkit].model_dir]]
name[livvkit].bench_bundle assign[=] call[name[os].path.basename, parameter[name[livvkit].bench_dir]]
if compare[name[livvkit].model_bundle in name[available_bundles]] begin[:]
name[livvkit].numerics_model_config assign[=] call[name[os].path.join, parameter[name[livvkit].bundle_dir, name[livvkit].model_bundle, constant[numerics.json]]]
name[livvkit].numerics_model_module assign[=] call[name[importlib].import_module, parameter[call[constant[.].join, parameter[list[[<ast.Constant object at 0x7da1b0b3abc0>, <ast.Attribute object at 0x7da1b0b38cd0>, <ast.Constant object at 0x7da1b0b38d30>]]]]]]
name[livvkit].verification_model_config assign[=] call[name[os].path.join, parameter[name[livvkit].bundle_dir, name[livvkit].model_bundle, constant[verification.json]]]
name[livvkit].verification_model_module assign[=] call[name[importlib].import_module, parameter[call[constant[.].join, parameter[list[[<ast.Constant object at 0x7da1b0b3b910>, <ast.Attribute object at 0x7da1b0b38fd0>, <ast.Constant object at 0x7da1b0b39c30>]]]]]]
name[livvkit].performance_model_config assign[=] call[name[os].path.join, parameter[name[livvkit].bundle_dir, name[livvkit].model_bundle, constant[performance.json]]]
if compare[name[options].validate is_not constant[None]] begin[:]
name[livvkit].validation_model_configs assign[=] name[options].validate
if <ast.BoolOp object at 0x7da1b0b39690> begin[:]
call[name[print], parameter[constant[]]]
call[name[print], parameter[constant[!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!]]]
call[name[print], parameter[constant[ UH OH!]]]
call[name[print], parameter[constant[----------------------------------------------------------]]]
call[name[print], parameter[constant[ No verification or validation tests found/submitted!]]]
call[name[print], parameter[constant[]]]
call[name[print], parameter[constant[ Use either one or both of the --verify and]]]
call[name[print], parameter[constant[ --validate options to run tests. For more ]]]
call[name[print], parameter[constant[ information use the --help option, view the README]]]
call[name[print], parameter[constant[ or check https://livvkit.github.io/Docs/]]]
call[name[print], parameter[constant[!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!]]]
call[name[print], parameter[constant[]]]
call[name[sys].exit, parameter[constant[1]]]
return[name[options]] | keyword[def] identifier[init] ( identifier[options] ):
literal[string]
keyword[import] identifier[matplotlib]
identifier[matplotlib] . identifier[use] ( literal[string] )
identifier[livvkit] . identifier[output_dir] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[options] . identifier[out_dir] )
identifier[livvkit] . identifier[index_dir] = identifier[livvkit] . identifier[output_dir]
identifier[livvkit] . identifier[verify] = keyword[True] keyword[if] identifier[options] . identifier[verify] keyword[is] keyword[not] keyword[None] keyword[else] keyword[False]
identifier[livvkit] . identifier[validate] = keyword[True] keyword[if] identifier[options] . identifier[validate] keyword[is] keyword[not] keyword[None] keyword[else] keyword[False]
identifier[livvkit] . identifier[publish] = identifier[options] . identifier[publish]
identifier[available_bundles] =[ identifier[mod] keyword[for] identifier[imp] , identifier[mod] , identifier[ispkg] keyword[in] identifier[pkgutil] . identifier[iter_modules] ( identifier[bundles] . identifier[__path__] )]
keyword[if] identifier[options] . identifier[verify] keyword[is] keyword[not] keyword[None] :
identifier[livvkit] . identifier[model_dir] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[options] . identifier[verify] [ literal[int] ])
identifier[livvkit] . identifier[bench_dir] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[options] . identifier[verify] [ literal[int] ])
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[livvkit] . identifier[model_dir] ):
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] + identifier[livvkit] . identifier[model_dir] + literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[livvkit] . identifier[bench_dir] ):
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] + identifier[livvkit] . identifier[bench_dir] + literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[livvkit] . identifier[model_bundle] = identifier[os] . identifier[path] . identifier[basename] ( identifier[livvkit] . identifier[model_dir] )
identifier[livvkit] . identifier[bench_bundle] = identifier[os] . identifier[path] . identifier[basename] ( identifier[livvkit] . identifier[bench_dir] )
keyword[if] identifier[livvkit] . identifier[model_bundle] keyword[in] identifier[available_bundles] :
identifier[livvkit] . identifier[numerics_model_config] = identifier[os] . identifier[path] . identifier[join] (
identifier[livvkit] . identifier[bundle_dir] , identifier[livvkit] . identifier[model_bundle] , literal[string] )
identifier[livvkit] . identifier[numerics_model_module] = identifier[importlib] . identifier[import_module] (
literal[string] . identifier[join] ([ literal[string] , identifier[livvkit] . identifier[model_bundle] , literal[string] ]))
identifier[livvkit] . identifier[verification_model_config] = identifier[os] . identifier[path] . identifier[join] (
identifier[livvkit] . identifier[bundle_dir] , identifier[livvkit] . identifier[model_bundle] , literal[string] )
identifier[livvkit] . identifier[verification_model_module] = identifier[importlib] . identifier[import_module] (
literal[string] . identifier[join] ([ literal[string] , identifier[livvkit] . identifier[model_bundle] , literal[string] ]))
identifier[livvkit] . identifier[performance_model_config] = identifier[os] . identifier[path] . identifier[join] (
identifier[livvkit] . identifier[bundle_dir] , identifier[livvkit] . identifier[model_bundle] , literal[string] )
keyword[else] :
identifier[livvkit] . identifier[verify] = keyword[False]
keyword[if] identifier[options] . identifier[validate] keyword[is] keyword[not] keyword[None] :
identifier[livvkit] . identifier[validation_model_configs] = identifier[options] . identifier[validate]
keyword[if] keyword[not] ( identifier[livvkit] . identifier[verify] keyword[or] identifier[livvkit] . identifier[validate] ) keyword[and] keyword[not] identifier[options] . identifier[serve] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[return] identifier[options] | def init(options):
""" Initialize some defaults """
# Set matlplotlib's backend so LIVVkit can plot to files.
import matplotlib
matplotlib.use('agg')
livvkit.output_dir = os.path.abspath(options.out_dir)
livvkit.index_dir = livvkit.output_dir
livvkit.verify = True if options.verify is not None else False
livvkit.validate = True if options.validate is not None else False
livvkit.publish = options.publish
# Get a list of bundles that provide model specific implementations
available_bundles = [mod for (imp, mod, ispkg) in pkgutil.iter_modules(bundles.__path__)]
if options.verify is not None:
livvkit.model_dir = os.path.normpath(options.verify[0])
livvkit.bench_dir = os.path.normpath(options.verify[1])
if not os.path.isdir(livvkit.model_dir):
print('')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print(' UH OH!')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print(' Your comparison directory does not exist; please check')
print(' the path:')
print('\n' + livvkit.model_dir + '\n\n')
sys.exit(1) # depends on [control=['if'], data=[]]
if not os.path.isdir(livvkit.bench_dir):
print('')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print(' UH OH!')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print(' Your benchmark directory does not exist; please check')
print(' the path:')
print('\n' + livvkit.bench_dir + '\n\n')
sys.exit(1) # depends on [control=['if'], data=[]]
livvkit.model_bundle = os.path.basename(livvkit.model_dir)
livvkit.bench_bundle = os.path.basename(livvkit.bench_dir)
if livvkit.model_bundle in available_bundles:
livvkit.numerics_model_config = os.path.join(livvkit.bundle_dir, livvkit.model_bundle, 'numerics.json')
livvkit.numerics_model_module = importlib.import_module('.'.join(['livvkit.bundles', livvkit.model_bundle, 'numerics']))
livvkit.verification_model_config = os.path.join(livvkit.bundle_dir, livvkit.model_bundle, 'verification.json')
livvkit.verification_model_module = importlib.import_module('.'.join(['livvkit.bundles', livvkit.model_bundle, 'verification']))
livvkit.performance_model_config = os.path.join(livvkit.bundle_dir, livvkit.model_bundle, 'performance.json') # depends on [control=['if'], data=[]]
else:
# NOTE: This isn't used right now...
# livvkit.performance_model_module = importlib.import_module(
# ".".join(["livvkit.bundles", livvkit.model_bundle, "performance"]))
# TODO: Should implement some error checking here...
livvkit.verify = False # depends on [control=['if'], data=[]]
if options.validate is not None:
livvkit.validation_model_configs = options.validate # depends on [control=['if'], data=[]]
if not (livvkit.verify or livvkit.validate) and (not options.serve):
print('')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print(' UH OH!')
print('----------------------------------------------------------')
print(' No verification or validation tests found/submitted!')
print('')
print(' Use either one or both of the --verify and')
print(' --validate options to run tests. For more ')
print(' information use the --help option, view the README')
print(' or check https://livvkit.github.io/Docs/')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('')
sys.exit(1) # depends on [control=['if'], data=[]]
return options |
def calculate_size(name, expected, updated):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += BOOLEAN_SIZE_IN_BYTES
if expected is not None:
data_size += calculate_size_data(expected)
data_size += BOOLEAN_SIZE_IN_BYTES
if updated is not None:
data_size += calculate_size_data(updated)
return data_size | def function[calculate_size, parameter[name, expected, updated]]:
constant[ Calculates the request payload size]
variable[data_size] assign[=] constant[0]
<ast.AugAssign object at 0x7da2046225c0>
<ast.AugAssign object at 0x7da1b2347a00>
if compare[name[expected] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b23479a0>
<ast.AugAssign object at 0x7da1b2344550>
if compare[name[updated] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b2347ac0>
return[name[data_size]] | keyword[def] identifier[calculate_size] ( identifier[name] , identifier[expected] , identifier[updated] ):
literal[string]
identifier[data_size] = literal[int]
identifier[data_size] += identifier[calculate_size_str] ( identifier[name] )
identifier[data_size] += identifier[BOOLEAN_SIZE_IN_BYTES]
keyword[if] identifier[expected] keyword[is] keyword[not] keyword[None] :
identifier[data_size] += identifier[calculate_size_data] ( identifier[expected] )
identifier[data_size] += identifier[BOOLEAN_SIZE_IN_BYTES]
keyword[if] identifier[updated] keyword[is] keyword[not] keyword[None] :
identifier[data_size] += identifier[calculate_size_data] ( identifier[updated] )
keyword[return] identifier[data_size] | def calculate_size(name, expected, updated):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += BOOLEAN_SIZE_IN_BYTES
if expected is not None:
data_size += calculate_size_data(expected) # depends on [control=['if'], data=['expected']]
data_size += BOOLEAN_SIZE_IN_BYTES
if updated is not None:
data_size += calculate_size_data(updated) # depends on [control=['if'], data=['updated']]
return data_size |
def set_value(self, section, option, value):
"""Sets the given option in section to the given value.
It will create the section if required, and will not throw as opposed to the default
ConfigParser 'set' method.
:param section: Name of the section in which the option resides or should reside
:param option: Name of the options whose value to set
:param value: Value to set the option to. It must be a string or convertible
to a string
:return: this instance"""
if not self.has_section(section):
self.add_section(section)
self.set(section, option, self._value_to_string(value))
return self | def function[set_value, parameter[self, section, option, value]]:
constant[Sets the given option in section to the given value.
It will create the section if required, and will not throw as opposed to the default
ConfigParser 'set' method.
:param section: Name of the section in which the option resides or should reside
:param option: Name of the options whose value to set
:param value: Value to set the option to. It must be a string or convertible
to a string
:return: this instance]
if <ast.UnaryOp object at 0x7da1b22a2410> begin[:]
call[name[self].add_section, parameter[name[section]]]
call[name[self].set, parameter[name[section], name[option], call[name[self]._value_to_string, parameter[name[value]]]]]
return[name[self]] | keyword[def] identifier[set_value] ( identifier[self] , identifier[section] , identifier[option] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[has_section] ( identifier[section] ):
identifier[self] . identifier[add_section] ( identifier[section] )
identifier[self] . identifier[set] ( identifier[section] , identifier[option] , identifier[self] . identifier[_value_to_string] ( identifier[value] ))
keyword[return] identifier[self] | def set_value(self, section, option, value):
"""Sets the given option in section to the given value.
It will create the section if required, and will not throw as opposed to the default
ConfigParser 'set' method.
:param section: Name of the section in which the option resides or should reside
:param option: Name of the options whose value to set
:param value: Value to set the option to. It must be a string or convertible
to a string
:return: this instance"""
if not self.has_section(section):
self.add_section(section) # depends on [control=['if'], data=[]]
self.set(section, option, self._value_to_string(value))
return self |
def track_event(self, name, properties=None, measurements=None):
""" Send information about a single event that has occurred in the context of the application.
Args:
name (str). the data to associate to this event.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)
"""
data = channel.contracts.EventData()
data.name = name or NULL_CONSTANT_STRING
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context) | def function[track_event, parameter[self, name, properties, measurements]]:
constant[ Send information about a single event that has occurred in the context of the application.
Args:
name (str). the data to associate to this event.
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)
]
variable[data] assign[=] call[name[channel].contracts.EventData, parameter[]]
name[data].name assign[=] <ast.BoolOp object at 0x7da1b103b4f0>
if name[properties] begin[:]
name[data].properties assign[=] name[properties]
if name[measurements] begin[:]
name[data].measurements assign[=] name[measurements]
call[name[self].track, parameter[name[data], name[self]._context]] | keyword[def] identifier[track_event] ( identifier[self] , identifier[name] , identifier[properties] = keyword[None] , identifier[measurements] = keyword[None] ):
literal[string]
identifier[data] = identifier[channel] . identifier[contracts] . identifier[EventData] ()
identifier[data] . identifier[name] = identifier[name] keyword[or] identifier[NULL_CONSTANT_STRING]
keyword[if] identifier[properties] :
identifier[data] . identifier[properties] = identifier[properties]
keyword[if] identifier[measurements] :
identifier[data] . identifier[measurements] = identifier[measurements]
identifier[self] . identifier[track] ( identifier[data] , identifier[self] . identifier[_context] ) | def track_event(self, name, properties=None, measurements=None):
""" Send information about a single event that has occurred in the context of the application.
Args:
name (str). the data to associate to this event.
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)
"""
data = channel.contracts.EventData()
data.name = name or NULL_CONSTANT_STRING
if properties:
data.properties = properties # depends on [control=['if'], data=[]]
if measurements:
data.measurements = measurements # depends on [control=['if'], data=[]]
self.track(data, self._context) |
def hpo_terms():
"""Render search box and view for HPO phenotype terms"""
if request.method == 'GET':
data = controllers.hpo_terms(store= store, limit=100)
return data
else: # POST. user is searching for a specific term or phenotype
search_term = request.form.get('hpo_term')
limit = request.form.get('limit')
data = controllers.hpo_terms(store= store, query = search_term, limit=limit)
return dict(data, query=search_term, limit=limit) | def function[hpo_terms, parameter[]]:
constant[Render search box and view for HPO phenotype terms]
if compare[name[request].method equal[==] constant[GET]] begin[:]
variable[data] assign[=] call[name[controllers].hpo_terms, parameter[]]
return[name[data]] | keyword[def] identifier[hpo_terms] ():
literal[string]
keyword[if] identifier[request] . identifier[method] == literal[string] :
identifier[data] = identifier[controllers] . identifier[hpo_terms] ( identifier[store] = identifier[store] , identifier[limit] = literal[int] )
keyword[return] identifier[data]
keyword[else] :
identifier[search_term] = identifier[request] . identifier[form] . identifier[get] ( literal[string] )
identifier[limit] = identifier[request] . identifier[form] . identifier[get] ( literal[string] )
identifier[data] = identifier[controllers] . identifier[hpo_terms] ( identifier[store] = identifier[store] , identifier[query] = identifier[search_term] , identifier[limit] = identifier[limit] )
keyword[return] identifier[dict] ( identifier[data] , identifier[query] = identifier[search_term] , identifier[limit] = identifier[limit] ) | def hpo_terms():
"""Render search box and view for HPO phenotype terms"""
if request.method == 'GET':
data = controllers.hpo_terms(store=store, limit=100)
return data # depends on [control=['if'], data=[]]
else: # POST. user is searching for a specific term or phenotype
search_term = request.form.get('hpo_term')
limit = request.form.get('limit')
data = controllers.hpo_terms(store=store, query=search_term, limit=limit)
return dict(data, query=search_term, limit=limit) |
def resample(f, rate = None, delay = None, micro = False,
trace = True,
binary = True):
"""Resample a file
Resample all data traces, and update the file handle to reflect the new
sample rate. No actual samples (data traces) are modified, only the header
fields and interpretation.
By default, the rate and the delay are in millseconds - if you need higher
resolution, passing micro=True interprets rate as microseconds (as it is
represented in the file). Delay is always milliseconds.
By default, both the global binary header and the trace headers are updated
to reflect this. If preserving either the trace header interval field or
the binary header interval field is important, pass trace=False and
binary=False respectively, to not have that field updated. This only apply
to sample rates - the recording delay is only found in trace headers and
will be written unconditionally, if delay is not None.
.. warning::
This function requires an open file handle and is **DESTRUCTIVE**. It
will modify the file, and if an exception is raised then partial writes
might have happened and the file might be corrupted.
This function assumes all traces have uniform delays and frequencies.
Parameters
----------
f : SegyFile
rate : int
delay : int
micro : bool
if True, interpret rate as microseconds
trace : bool
Update the trace header if True
binary : bool
Update the binary header if True
Notes
-----
.. versionadded:: 1.4
"""
if rate is not None:
if not micro: rate *= 1000
if binary: f.bin[segyio.su.hdt] = rate
if trace: f.header = { segyio.su.dt: rate}
if delay is not None:
f.header = { segyio.su.delrt: delay }
t0 = delay if delay is not None else f.samples[0]
rate = rate / 1000 if rate is not None else f.samples[1] - f.samples[0]
f._samples = (np.arange(len(f.samples)) * rate) + t0
return f | def function[resample, parameter[f, rate, delay, micro, trace, binary]]:
constant[Resample a file
Resample all data traces, and update the file handle to reflect the new
sample rate. No actual samples (data traces) are modified, only the header
fields and interpretation.
By default, the rate and the delay are in millseconds - if you need higher
resolution, passing micro=True interprets rate as microseconds (as it is
represented in the file). Delay is always milliseconds.
By default, both the global binary header and the trace headers are updated
to reflect this. If preserving either the trace header interval field or
the binary header interval field is important, pass trace=False and
binary=False respectively, to not have that field updated. This only apply
to sample rates - the recording delay is only found in trace headers and
will be written unconditionally, if delay is not None.
.. warning::
This function requires an open file handle and is **DESTRUCTIVE**. It
will modify the file, and if an exception is raised then partial writes
might have happened and the file might be corrupted.
This function assumes all traces have uniform delays and frequencies.
Parameters
----------
f : SegyFile
rate : int
delay : int
micro : bool
if True, interpret rate as microseconds
trace : bool
Update the trace header if True
binary : bool
Update the binary header if True
Notes
-----
.. versionadded:: 1.4
]
if compare[name[rate] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b18496f0> begin[:]
<ast.AugAssign object at 0x7da1b18484c0>
if name[binary] begin[:]
call[name[f].bin][name[segyio].su.hdt] assign[=] name[rate]
if name[trace] begin[:]
name[f].header assign[=] dictionary[[<ast.Attribute object at 0x7da1b1849330>], [<ast.Name object at 0x7da1b1848760>]]
if compare[name[delay] is_not constant[None]] begin[:]
name[f].header assign[=] dictionary[[<ast.Attribute object at 0x7da1b1848c70>], [<ast.Name object at 0x7da1b18496c0>]]
variable[t0] assign[=] <ast.IfExp object at 0x7da1b18487f0>
variable[rate] assign[=] <ast.IfExp object at 0x7da1b1848a90>
name[f]._samples assign[=] binary_operation[binary_operation[call[name[np].arange, parameter[call[name[len], parameter[name[f].samples]]]] * name[rate]] + name[t0]]
return[name[f]] | keyword[def] identifier[resample] ( identifier[f] , identifier[rate] = keyword[None] , identifier[delay] = keyword[None] , identifier[micro] = keyword[False] ,
identifier[trace] = keyword[True] ,
identifier[binary] = keyword[True] ):
literal[string]
keyword[if] identifier[rate] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[micro] : identifier[rate] *= literal[int]
keyword[if] identifier[binary] : identifier[f] . identifier[bin] [ identifier[segyio] . identifier[su] . identifier[hdt] ]= identifier[rate]
keyword[if] identifier[trace] : identifier[f] . identifier[header] ={ identifier[segyio] . identifier[su] . identifier[dt] : identifier[rate] }
keyword[if] identifier[delay] keyword[is] keyword[not] keyword[None] :
identifier[f] . identifier[header] ={ identifier[segyio] . identifier[su] . identifier[delrt] : identifier[delay] }
identifier[t0] = identifier[delay] keyword[if] identifier[delay] keyword[is] keyword[not] keyword[None] keyword[else] identifier[f] . identifier[samples] [ literal[int] ]
identifier[rate] = identifier[rate] / literal[int] keyword[if] identifier[rate] keyword[is] keyword[not] keyword[None] keyword[else] identifier[f] . identifier[samples] [ literal[int] ]- identifier[f] . identifier[samples] [ literal[int] ]
identifier[f] . identifier[_samples] =( identifier[np] . identifier[arange] ( identifier[len] ( identifier[f] . identifier[samples] ))* identifier[rate] )+ identifier[t0]
keyword[return] identifier[f] | def resample(f, rate=None, delay=None, micro=False, trace=True, binary=True):
"""Resample a file
Resample all data traces, and update the file handle to reflect the new
sample rate. No actual samples (data traces) are modified, only the header
fields and interpretation.
By default, the rate and the delay are in millseconds - if you need higher
resolution, passing micro=True interprets rate as microseconds (as it is
represented in the file). Delay is always milliseconds.
By default, both the global binary header and the trace headers are updated
to reflect this. If preserving either the trace header interval field or
the binary header interval field is important, pass trace=False and
binary=False respectively, to not have that field updated. This only apply
to sample rates - the recording delay is only found in trace headers and
will be written unconditionally, if delay is not None.
.. warning::
This function requires an open file handle and is **DESTRUCTIVE**. It
will modify the file, and if an exception is raised then partial writes
might have happened and the file might be corrupted.
This function assumes all traces have uniform delays and frequencies.
Parameters
----------
f : SegyFile
rate : int
delay : int
micro : bool
if True, interpret rate as microseconds
trace : bool
Update the trace header if True
binary : bool
Update the binary header if True
Notes
-----
.. versionadded:: 1.4
"""
if rate is not None:
if not micro:
rate *= 1000 # depends on [control=['if'], data=[]]
if binary:
f.bin[segyio.su.hdt] = rate # depends on [control=['if'], data=[]]
if trace:
f.header = {segyio.su.dt: rate} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['rate']]
if delay is not None:
f.header = {segyio.su.delrt: delay} # depends on [control=['if'], data=['delay']]
t0 = delay if delay is not None else f.samples[0]
rate = rate / 1000 if rate is not None else f.samples[1] - f.samples[0]
f._samples = np.arange(len(f.samples)) * rate + t0
return f |
def gauge(self, key, gauge=None, default=float("nan"), **dims):
"""Adds gauge with dimensions to the registry"""
return super(RegexRegistry, self).gauge(
self._get_key(key), gauge=gauge, default=default, **dims) | def function[gauge, parameter[self, key, gauge, default]]:
constant[Adds gauge with dimensions to the registry]
return[call[call[name[super], parameter[name[RegexRegistry], name[self]]].gauge, parameter[call[name[self]._get_key, parameter[name[key]]]]]] | keyword[def] identifier[gauge] ( identifier[self] , identifier[key] , identifier[gauge] = keyword[None] , identifier[default] = identifier[float] ( literal[string] ),** identifier[dims] ):
literal[string]
keyword[return] identifier[super] ( identifier[RegexRegistry] , identifier[self] ). identifier[gauge] (
identifier[self] . identifier[_get_key] ( identifier[key] ), identifier[gauge] = identifier[gauge] , identifier[default] = identifier[default] ,** identifier[dims] ) | def gauge(self, key, gauge=None, default=float('nan'), **dims):
"""Adds gauge with dimensions to the registry"""
return super(RegexRegistry, self).gauge(self._get_key(key), gauge=gauge, default=default, **dims) |
def add(self, user, message, groups):
"""
Add a new log entry
:param user: The requesting user
:type user: agentml.User
:param message: The request Message instance
:type message: agentml.Message
:param groups: The request groups
:type groups: set
:return: The logged Request instance
:rtype : Request
"""
self._debug_log.debug('Logging new Request entry')
request = Request(user, message, groups)
self._log_entries.appendleft(request)
return request | def function[add, parameter[self, user, message, groups]]:
constant[
Add a new log entry
:param user: The requesting user
:type user: agentml.User
:param message: The request Message instance
:type message: agentml.Message
:param groups: The request groups
:type groups: set
:return: The logged Request instance
:rtype : Request
]
call[name[self]._debug_log.debug, parameter[constant[Logging new Request entry]]]
variable[request] assign[=] call[name[Request], parameter[name[user], name[message], name[groups]]]
call[name[self]._log_entries.appendleft, parameter[name[request]]]
return[name[request]] | keyword[def] identifier[add] ( identifier[self] , identifier[user] , identifier[message] , identifier[groups] ):
literal[string]
identifier[self] . identifier[_debug_log] . identifier[debug] ( literal[string] )
identifier[request] = identifier[Request] ( identifier[user] , identifier[message] , identifier[groups] )
identifier[self] . identifier[_log_entries] . identifier[appendleft] ( identifier[request] )
keyword[return] identifier[request] | def add(self, user, message, groups):
"""
Add a new log entry
:param user: The requesting user
:type user: agentml.User
:param message: The request Message instance
:type message: agentml.Message
:param groups: The request groups
:type groups: set
:return: The logged Request instance
:rtype : Request
"""
self._debug_log.debug('Logging new Request entry')
request = Request(user, message, groups)
self._log_entries.appendleft(request)
return request |
def is_fivefold_repetition(self) -> bool:
"""
Since the 1st of July 2014 a game is automatically drawn (without
a claim by one of the players) if a position occurs for the fifth time.
Originally this had to occur on consecutive alternating moves, but
this has since been revised.
"""
transposition_key = self._transposition_key()
repetitions = 1
switchyard = []
while self.move_stack and repetitions < 5:
move = self.pop()
switchyard.append(move)
if self.is_irreversible(move):
break
if self._transposition_key() == transposition_key:
repetitions += 1
while switchyard:
self.push(switchyard.pop())
return repetitions >= 5 | def function[is_fivefold_repetition, parameter[self]]:
constant[
Since the 1st of July 2014 a game is automatically drawn (without
a claim by one of the players) if a position occurs for the fifth time.
Originally this had to occur on consecutive alternating moves, but
this has since been revised.
]
variable[transposition_key] assign[=] call[name[self]._transposition_key, parameter[]]
variable[repetitions] assign[=] constant[1]
variable[switchyard] assign[=] list[[]]
while <ast.BoolOp object at 0x7da1b18d9210> begin[:]
variable[move] assign[=] call[name[self].pop, parameter[]]
call[name[switchyard].append, parameter[name[move]]]
if call[name[self].is_irreversible, parameter[name[move]]] begin[:]
break
if compare[call[name[self]._transposition_key, parameter[]] equal[==] name[transposition_key]] begin[:]
<ast.AugAssign object at 0x7da1b17b9630>
while name[switchyard] begin[:]
call[name[self].push, parameter[call[name[switchyard].pop, parameter[]]]]
return[compare[name[repetitions] greater_or_equal[>=] constant[5]]] | keyword[def] identifier[is_fivefold_repetition] ( identifier[self] )-> identifier[bool] :
literal[string]
identifier[transposition_key] = identifier[self] . identifier[_transposition_key] ()
identifier[repetitions] = literal[int]
identifier[switchyard] =[]
keyword[while] identifier[self] . identifier[move_stack] keyword[and] identifier[repetitions] < literal[int] :
identifier[move] = identifier[self] . identifier[pop] ()
identifier[switchyard] . identifier[append] ( identifier[move] )
keyword[if] identifier[self] . identifier[is_irreversible] ( identifier[move] ):
keyword[break]
keyword[if] identifier[self] . identifier[_transposition_key] ()== identifier[transposition_key] :
identifier[repetitions] += literal[int]
keyword[while] identifier[switchyard] :
identifier[self] . identifier[push] ( identifier[switchyard] . identifier[pop] ())
keyword[return] identifier[repetitions] >= literal[int] | def is_fivefold_repetition(self) -> bool:
"""
Since the 1st of July 2014 a game is automatically drawn (without
a claim by one of the players) if a position occurs for the fifth time.
Originally this had to occur on consecutive alternating moves, but
this has since been revised.
"""
transposition_key = self._transposition_key()
repetitions = 1
switchyard = []
while self.move_stack and repetitions < 5:
move = self.pop()
switchyard.append(move)
if self.is_irreversible(move):
break # depends on [control=['if'], data=[]]
if self._transposition_key() == transposition_key:
repetitions += 1 # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
while switchyard:
self.push(switchyard.pop()) # depends on [control=['while'], data=[]]
return repetitions >= 5 |
def _exponent(self):
"""Return the exponent of self, as an integer.
The exponent is defined as the unique integer k such that
2**(k-1) <= abs(self) < 2**k.
If self is not finite and nonzero, return a string: one
of '0', 'inf' or 'nan'.
"""
if self and is_finite(self):
return mpfr.mpfr_get_exp(self)
if not self:
return '0'
elif is_inf(self):
return 'inf'
elif is_nan(self):
return 'nan'
else:
assert False, "shouldn't ever get here" | def function[_exponent, parameter[self]]:
constant[Return the exponent of self, as an integer.
The exponent is defined as the unique integer k such that
2**(k-1) <= abs(self) < 2**k.
If self is not finite and nonzero, return a string: one
of '0', 'inf' or 'nan'.
]
if <ast.BoolOp object at 0x7da18ede6dd0> begin[:]
return[call[name[mpfr].mpfr_get_exp, parameter[name[self]]]]
if <ast.UnaryOp object at 0x7da20c76f100> begin[:]
return[constant[0]] | keyword[def] identifier[_exponent] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] keyword[and] identifier[is_finite] ( identifier[self] ):
keyword[return] identifier[mpfr] . identifier[mpfr_get_exp] ( identifier[self] )
keyword[if] keyword[not] identifier[self] :
keyword[return] literal[string]
keyword[elif] identifier[is_inf] ( identifier[self] ):
keyword[return] literal[string]
keyword[elif] identifier[is_nan] ( identifier[self] ):
keyword[return] literal[string]
keyword[else] :
keyword[assert] keyword[False] , literal[string] | def _exponent(self):
"""Return the exponent of self, as an integer.
The exponent is defined as the unique integer k such that
2**(k-1) <= abs(self) < 2**k.
If self is not finite and nonzero, return a string: one
of '0', 'inf' or 'nan'.
"""
if self and is_finite(self):
return mpfr.mpfr_get_exp(self) # depends on [control=['if'], data=[]]
if not self:
return '0' # depends on [control=['if'], data=[]]
elif is_inf(self):
return 'inf' # depends on [control=['if'], data=[]]
elif is_nan(self):
return 'nan' # depends on [control=['if'], data=[]]
else:
assert False, "shouldn't ever get here" |
def pairwise(lst):
""" yield item i and item i+1 in lst. e.g.
(lst[0], lst[1]), (lst[1], lst[2]), ..., (lst[-1], None)
Args:
lst (list): List to process
Returns:
list
"""
if not lst:
return
length = len(lst)
for i in range(length - 1):
yield lst[i], lst[i + 1]
yield lst[-1], None | def function[pairwise, parameter[lst]]:
constant[ yield item i and item i+1 in lst. e.g.
(lst[0], lst[1]), (lst[1], lst[2]), ..., (lst[-1], None)
Args:
lst (list): List to process
Returns:
list
]
if <ast.UnaryOp object at 0x7da1aff03550> begin[:]
return[None]
variable[length] assign[=] call[name[len], parameter[name[lst]]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[name[length] - constant[1]]]]] begin[:]
<ast.Yield object at 0x7da1aff027a0>
<ast.Yield object at 0x7da1aff00af0> | keyword[def] identifier[pairwise] ( identifier[lst] ):
literal[string]
keyword[if] keyword[not] identifier[lst] :
keyword[return]
identifier[length] = identifier[len] ( identifier[lst] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[length] - literal[int] ):
keyword[yield] identifier[lst] [ identifier[i] ], identifier[lst] [ identifier[i] + literal[int] ]
keyword[yield] identifier[lst] [- literal[int] ], keyword[None] | def pairwise(lst):
""" yield item i and item i+1 in lst. e.g.
(lst[0], lst[1]), (lst[1], lst[2]), ..., (lst[-1], None)
Args:
lst (list): List to process
Returns:
list
"""
if not lst:
return # depends on [control=['if'], data=[]]
length = len(lst)
for i in range(length - 1):
yield (lst[i], lst[i + 1]) # depends on [control=['for'], data=['i']]
yield (lst[-1], None) |
def translate_wp_comment(self, e):
"""
<wp:comment>
<wp:comment_id>1234</wp:comment_id>
<wp:comment_author><![CDATA[John Doe]]></wp:comment_author>
<wp:comment_author_email><![CDATA[info@adsasd.com]]></wp:comment_author_email>
<wp:comment_author_url>http://myhomepage.com/</wp:comment_author_url>
<wp:comment_author_IP><![CDATA[12.123.123.123]]></wp:comment_author_IP>
<wp:comment_date><![CDATA[2008-09-25 14:24:51]]></wp:comment_date>
<wp:comment_date_gmt><![CDATA[2008-09-25 13:24:51]]></wp:comment_date_gmt>
<wp:comment_content><![CDATA[Hey dude :)]]></wp:comment_content>
<wp:comment_approved><![CDATA[1]]></wp:comment_approved>
<wp:comment_type><![CDATA[]]></wp:comment_type>
<wp:comment_parent>0</wp:comment_parent>
<wp:comment_user_id>0</wp:comment_user_id>
</wp:comment>
"""
comment_dict = {}
comment_dict['ID'] = e.find('./{wp}comment_id').text
comment_dict['date'] = e.find('{wp}comment_date').text
comment_dict['content'] = e.find('{wp}comment_content').text
comment_dict['status'] = e.find('{wp}comment_approved').text
comment_dict['status'] = "approved" if comment_dict['status'] == "1" else "rejected"
comment_dict['parent'] = e.find('{wp}comment_parent').text
comment_dict['author'] = e.find('{wp}comment_author').text
comment_dict['date'] = time.strptime(comment_dict['date'], '%Y-%m-%d %H:%M:%S')
comment_dict['date'] = time.strftime('%Y-%m-%dT%H:%M:%S', comment_dict['date'])
return comment_dict | def function[translate_wp_comment, parameter[self, e]]:
constant[
<wp:comment>
<wp:comment_id>1234</wp:comment_id>
<wp:comment_author><![CDATA[John Doe]]></wp:comment_author>
<wp:comment_author_email><![CDATA[info@adsasd.com]]></wp:comment_author_email>
<wp:comment_author_url>http://myhomepage.com/</wp:comment_author_url>
<wp:comment_author_IP><![CDATA[12.123.123.123]]></wp:comment_author_IP>
<wp:comment_date><![CDATA[2008-09-25 14:24:51]]></wp:comment_date>
<wp:comment_date_gmt><![CDATA[2008-09-25 13:24:51]]></wp:comment_date_gmt>
<wp:comment_content><![CDATA[Hey dude :)]]></wp:comment_content>
<wp:comment_approved><![CDATA[1]]></wp:comment_approved>
<wp:comment_type><![CDATA[]]></wp:comment_type>
<wp:comment_parent>0</wp:comment_parent>
<wp:comment_user_id>0</wp:comment_user_id>
</wp:comment>
]
variable[comment_dict] assign[=] dictionary[[], []]
call[name[comment_dict]][constant[ID]] assign[=] call[name[e].find, parameter[constant[./{wp}comment_id]]].text
call[name[comment_dict]][constant[date]] assign[=] call[name[e].find, parameter[constant[{wp}comment_date]]].text
call[name[comment_dict]][constant[content]] assign[=] call[name[e].find, parameter[constant[{wp}comment_content]]].text
call[name[comment_dict]][constant[status]] assign[=] call[name[e].find, parameter[constant[{wp}comment_approved]]].text
call[name[comment_dict]][constant[status]] assign[=] <ast.IfExp object at 0x7da1b19e5720>
call[name[comment_dict]][constant[parent]] assign[=] call[name[e].find, parameter[constant[{wp}comment_parent]]].text
call[name[comment_dict]][constant[author]] assign[=] call[name[e].find, parameter[constant[{wp}comment_author]]].text
call[name[comment_dict]][constant[date]] assign[=] call[name[time].strptime, parameter[call[name[comment_dict]][constant[date]], constant[%Y-%m-%d %H:%M:%S]]]
call[name[comment_dict]][constant[date]] assign[=] call[name[time].strftime, parameter[constant[%Y-%m-%dT%H:%M:%S], call[name[comment_dict]][constant[date]]]]
return[name[comment_dict]] | keyword[def] identifier[translate_wp_comment] ( identifier[self] , identifier[e] ):
literal[string]
identifier[comment_dict] ={}
identifier[comment_dict] [ literal[string] ]= identifier[e] . identifier[find] ( literal[string] ). identifier[text]
identifier[comment_dict] [ literal[string] ]= identifier[e] . identifier[find] ( literal[string] ). identifier[text]
identifier[comment_dict] [ literal[string] ]= identifier[e] . identifier[find] ( literal[string] ). identifier[text]
identifier[comment_dict] [ literal[string] ]= identifier[e] . identifier[find] ( literal[string] ). identifier[text]
identifier[comment_dict] [ literal[string] ]= literal[string] keyword[if] identifier[comment_dict] [ literal[string] ]== literal[string] keyword[else] literal[string]
identifier[comment_dict] [ literal[string] ]= identifier[e] . identifier[find] ( literal[string] ). identifier[text]
identifier[comment_dict] [ literal[string] ]= identifier[e] . identifier[find] ( literal[string] ). identifier[text]
identifier[comment_dict] [ literal[string] ]= identifier[time] . identifier[strptime] ( identifier[comment_dict] [ literal[string] ], literal[string] )
identifier[comment_dict] [ literal[string] ]= identifier[time] . identifier[strftime] ( literal[string] , identifier[comment_dict] [ literal[string] ])
keyword[return] identifier[comment_dict] | def translate_wp_comment(self, e):
"""
<wp:comment>
<wp:comment_id>1234</wp:comment_id>
<wp:comment_author><![CDATA[John Doe]]></wp:comment_author>
<wp:comment_author_email><![CDATA[info@adsasd.com]]></wp:comment_author_email>
<wp:comment_author_url>http://myhomepage.com/</wp:comment_author_url>
<wp:comment_author_IP><![CDATA[12.123.123.123]]></wp:comment_author_IP>
<wp:comment_date><![CDATA[2008-09-25 14:24:51]]></wp:comment_date>
<wp:comment_date_gmt><![CDATA[2008-09-25 13:24:51]]></wp:comment_date_gmt>
<wp:comment_content><![CDATA[Hey dude :)]]></wp:comment_content>
<wp:comment_approved><![CDATA[1]]></wp:comment_approved>
<wp:comment_type><![CDATA[]]></wp:comment_type>
<wp:comment_parent>0</wp:comment_parent>
<wp:comment_user_id>0</wp:comment_user_id>
</wp:comment>
"""
comment_dict = {}
comment_dict['ID'] = e.find('./{wp}comment_id').text
comment_dict['date'] = e.find('{wp}comment_date').text
comment_dict['content'] = e.find('{wp}comment_content').text
comment_dict['status'] = e.find('{wp}comment_approved').text
comment_dict['status'] = 'approved' if comment_dict['status'] == '1' else 'rejected'
comment_dict['parent'] = e.find('{wp}comment_parent').text
comment_dict['author'] = e.find('{wp}comment_author').text
comment_dict['date'] = time.strptime(comment_dict['date'], '%Y-%m-%d %H:%M:%S')
comment_dict['date'] = time.strftime('%Y-%m-%dT%H:%M:%S', comment_dict['date'])
return comment_dict |
def auto_labels(df):
"""Transforms atomic system information into well-formatted labels.
Parameters
----------
df : Pandas DataFrame.
Returns
-------
labels : list of system labels.
"""
systems = list(df.system)
facets = list(df.facet)
systems_labels = [w.replace('_', '\ ') for w in systems]
systems_labels = [sub(w) for w in systems_labels]
systems_labels = [w.replace('}$$_{', '') for w in systems_labels]
systems_labels = [w.replace('$', '') for w in systems_labels]
systems_labels = ['$' + w + '$' for w in systems_labels]
facets_label = [w.replace('_', '\ ') for w in facets]
facets_label = ['(' + w + ')' for w in facets_label]
labels = []
for i, sys in enumerate(systems_labels):
labels.append(sys + facets_label[i])
# labels = list(set(labels))
return(labels) | def function[auto_labels, parameter[df]]:
constant[Transforms atomic system information into well-formatted labels.
Parameters
----------
df : Pandas DataFrame.
Returns
-------
labels : list of system labels.
]
variable[systems] assign[=] call[name[list], parameter[name[df].system]]
variable[facets] assign[=] call[name[list], parameter[name[df].facet]]
variable[systems_labels] assign[=] <ast.ListComp object at 0x7da204620040>
variable[systems_labels] assign[=] <ast.ListComp object at 0x7da204623880>
variable[systems_labels] assign[=] <ast.ListComp object at 0x7da1b2486f80>
variable[systems_labels] assign[=] <ast.ListComp object at 0x7da1b2484370>
variable[systems_labels] assign[=] <ast.ListComp object at 0x7da1b24afa60>
variable[facets_label] assign[=] <ast.ListComp object at 0x7da1b24ac9a0>
variable[facets_label] assign[=] <ast.ListComp object at 0x7da1b24ad900>
variable[labels] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da207f03d00>, <ast.Name object at 0x7da207f01780>]]] in starred[call[name[enumerate], parameter[name[systems_labels]]]] begin[:]
call[name[labels].append, parameter[binary_operation[name[sys] + call[name[facets_label]][name[i]]]]]
return[name[labels]] | keyword[def] identifier[auto_labels] ( identifier[df] ):
literal[string]
identifier[systems] = identifier[list] ( identifier[df] . identifier[system] )
identifier[facets] = identifier[list] ( identifier[df] . identifier[facet] )
identifier[systems_labels] =[ identifier[w] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[w] keyword[in] identifier[systems] ]
identifier[systems_labels] =[ identifier[sub] ( identifier[w] ) keyword[for] identifier[w] keyword[in] identifier[systems_labels] ]
identifier[systems_labels] =[ identifier[w] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[w] keyword[in] identifier[systems_labels] ]
identifier[systems_labels] =[ identifier[w] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[w] keyword[in] identifier[systems_labels] ]
identifier[systems_labels] =[ literal[string] + identifier[w] + literal[string] keyword[for] identifier[w] keyword[in] identifier[systems_labels] ]
identifier[facets_label] =[ identifier[w] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[w] keyword[in] identifier[facets] ]
identifier[facets_label] =[ literal[string] + identifier[w] + literal[string] keyword[for] identifier[w] keyword[in] identifier[facets_label] ]
identifier[labels] =[]
keyword[for] identifier[i] , identifier[sys] keyword[in] identifier[enumerate] ( identifier[systems_labels] ):
identifier[labels] . identifier[append] ( identifier[sys] + identifier[facets_label] [ identifier[i] ])
keyword[return] ( identifier[labels] ) | def auto_labels(df):
"""Transforms atomic system information into well-formatted labels.
Parameters
----------
df : Pandas DataFrame.
Returns
-------
labels : list of system labels.
"""
systems = list(df.system)
facets = list(df.facet)
systems_labels = [w.replace('_', '\\ ') for w in systems]
systems_labels = [sub(w) for w in systems_labels]
systems_labels = [w.replace('}$$_{', '') for w in systems_labels]
systems_labels = [w.replace('$', '') for w in systems_labels]
systems_labels = ['$' + w + '$' for w in systems_labels]
facets_label = [w.replace('_', '\\ ') for w in facets]
facets_label = ['(' + w + ')' for w in facets_label]
labels = []
for (i, sys) in enumerate(systems_labels):
labels.append(sys + facets_label[i]) # depends on [control=['for'], data=[]]
# labels = list(set(labels))
return labels |
def run(self, files, stack):
"Clean your text"
for filename, post in files.items():
post.content = self.bleach.clean(post.content, *self.args, **self.kwargs) | def function[run, parameter[self, files, stack]]:
constant[Clean your text]
for taget[tuple[[<ast.Name object at 0x7da1b0ab8a30>, <ast.Name object at 0x7da1b0ab9930>]]] in starred[call[name[files].items, parameter[]]] begin[:]
name[post].content assign[=] call[name[self].bleach.clean, parameter[name[post].content, <ast.Starred object at 0x7da1b0ab9570>]] | keyword[def] identifier[run] ( identifier[self] , identifier[files] , identifier[stack] ):
literal[string]
keyword[for] identifier[filename] , identifier[post] keyword[in] identifier[files] . identifier[items] ():
identifier[post] . identifier[content] = identifier[self] . identifier[bleach] . identifier[clean] ( identifier[post] . identifier[content] ,* identifier[self] . identifier[args] ,** identifier[self] . identifier[kwargs] ) | def run(self, files, stack):
"""Clean your text"""
for (filename, post) in files.items():
post.content = self.bleach.clean(post.content, *self.args, **self.kwargs) # depends on [control=['for'], data=[]] |
def display(self, typ, data):
""" display section of typ with data """
if hasattr(self, 'print_' + typ):
getattr(self, 'print_' + typ)(data)
elif not data:
self._print("%s: %s" % (typ, data))
elif isinstance(data, collections.Mapping):
self._print("\n", typ)
for k, v in data.items():
self.print(k, v)
elif isinstance(data, (list, tuple)):
# tabular data layout for lists of dicts
if isinstance(data[0], collections.Mapping):
self.display_set(typ, data, self._get_columns(data[0]))
else:
for each in data:
self.print(typ, each)
else:
self._print("%s: %s" % (typ, data))
self.fobj.flush() | def function[display, parameter[self, typ, data]]:
constant[ display section of typ with data ]
if call[name[hasattr], parameter[name[self], binary_operation[constant[print_] + name[typ]]]] begin[:]
call[call[name[getattr], parameter[name[self], binary_operation[constant[print_] + name[typ]]]], parameter[name[data]]]
call[name[self].fobj.flush, parameter[]] | keyword[def] identifier[display] ( identifier[self] , identifier[typ] , identifier[data] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] + identifier[typ] ):
identifier[getattr] ( identifier[self] , literal[string] + identifier[typ] )( identifier[data] )
keyword[elif] keyword[not] identifier[data] :
identifier[self] . identifier[_print] ( literal[string] %( identifier[typ] , identifier[data] ))
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[collections] . identifier[Mapping] ):
identifier[self] . identifier[_print] ( literal[string] , identifier[typ] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[data] . identifier[items] ():
identifier[self] . identifier[print] ( identifier[k] , identifier[v] )
keyword[elif] identifier[isinstance] ( identifier[data] ,( identifier[list] , identifier[tuple] )):
keyword[if] identifier[isinstance] ( identifier[data] [ literal[int] ], identifier[collections] . identifier[Mapping] ):
identifier[self] . identifier[display_set] ( identifier[typ] , identifier[data] , identifier[self] . identifier[_get_columns] ( identifier[data] [ literal[int] ]))
keyword[else] :
keyword[for] identifier[each] keyword[in] identifier[data] :
identifier[self] . identifier[print] ( identifier[typ] , identifier[each] )
keyword[else] :
identifier[self] . identifier[_print] ( literal[string] %( identifier[typ] , identifier[data] ))
identifier[self] . identifier[fobj] . identifier[flush] () | def display(self, typ, data):
""" display section of typ with data """
if hasattr(self, 'print_' + typ):
getattr(self, 'print_' + typ)(data) # depends on [control=['if'], data=[]]
elif not data:
self._print('%s: %s' % (typ, data)) # depends on [control=['if'], data=[]]
elif isinstance(data, collections.Mapping):
self._print('\n', typ)
for (k, v) in data.items():
self.print(k, v) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(data, (list, tuple)):
# tabular data layout for lists of dicts
if isinstance(data[0], collections.Mapping):
self.display_set(typ, data, self._get_columns(data[0])) # depends on [control=['if'], data=[]]
else:
for each in data:
self.print(typ, each) # depends on [control=['for'], data=['each']] # depends on [control=['if'], data=[]]
else:
self._print('%s: %s' % (typ, data))
self.fobj.flush() |
def add_timeout(
self,
deadline: Union[float, datetime.timedelta],
callback: Callable[..., None],
*args: Any,
**kwargs: Any
) -> object:
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(
self.time() + deadline.total_seconds(), callback, *args, **kwargs
)
else:
raise TypeError("Unsupported deadline %r" % deadline) | def function[add_timeout, parameter[self, deadline, callback]]:
constant[Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
]
if call[name[isinstance], parameter[name[deadline], name[numbers].Real]] begin[:]
return[call[name[self].call_at, parameter[name[deadline], name[callback], <ast.Starred object at 0x7da1b1f1beb0>]]] | keyword[def] identifier[add_timeout] (
identifier[self] ,
identifier[deadline] : identifier[Union] [ identifier[float] , identifier[datetime] . identifier[timedelta] ],
identifier[callback] : identifier[Callable] [..., keyword[None] ],
* identifier[args] : identifier[Any] ,
** identifier[kwargs] : identifier[Any]
)-> identifier[object] :
literal[string]
keyword[if] identifier[isinstance] ( identifier[deadline] , identifier[numbers] . identifier[Real] ):
keyword[return] identifier[self] . identifier[call_at] ( identifier[deadline] , identifier[callback] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[isinstance] ( identifier[deadline] , identifier[datetime] . identifier[timedelta] ):
keyword[return] identifier[self] . identifier[call_at] (
identifier[self] . identifier[time] ()+ identifier[deadline] . identifier[total_seconds] (), identifier[callback] ,* identifier[args] ,** identifier[kwargs]
)
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[deadline] ) | def add_timeout(self, deadline: Union[float, datetime.timedelta], callback: Callable[..., None], *args: Any, **kwargs: Any) -> object:
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs) # depends on [control=['if'], data=[]]
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + deadline.total_seconds(), callback, *args, **kwargs) # depends on [control=['if'], data=[]]
else:
raise TypeError('Unsupported deadline %r' % deadline) |
def get_commensurate_points(supercell_matrix): # wrt primitive cell
"""Commensurate q-points are returned.
Parameters
----------
supercell_matrix : array_like
Supercell matrix with respect to primitive cell basis vectors.
shape=(3, 3)
dtype=intc
"""
smat = np.array(supercell_matrix, dtype=int)
rec_primitive = PhonopyAtoms(numbers=[1],
scaled_positions=[[0, 0, 0]],
cell=np.diag([1, 1, 1]),
pbc=True)
rec_supercell = get_supercell(rec_primitive, smat.T)
q_pos = rec_supercell.get_scaled_positions()
return np.array(np.where(q_pos > 1 - 1e-15, q_pos - 1, q_pos),
dtype='double', order='C') | def function[get_commensurate_points, parameter[supercell_matrix]]:
constant[Commensurate q-points are returned.
Parameters
----------
supercell_matrix : array_like
Supercell matrix with respect to primitive cell basis vectors.
shape=(3, 3)
dtype=intc
]
variable[smat] assign[=] call[name[np].array, parameter[name[supercell_matrix]]]
variable[rec_primitive] assign[=] call[name[PhonopyAtoms], parameter[]]
variable[rec_supercell] assign[=] call[name[get_supercell], parameter[name[rec_primitive], name[smat].T]]
variable[q_pos] assign[=] call[name[rec_supercell].get_scaled_positions, parameter[]]
return[call[name[np].array, parameter[call[name[np].where, parameter[compare[name[q_pos] greater[>] binary_operation[constant[1] - constant[1e-15]]], binary_operation[name[q_pos] - constant[1]], name[q_pos]]]]]] | keyword[def] identifier[get_commensurate_points] ( identifier[supercell_matrix] ):
literal[string]
identifier[smat] = identifier[np] . identifier[array] ( identifier[supercell_matrix] , identifier[dtype] = identifier[int] )
identifier[rec_primitive] = identifier[PhonopyAtoms] ( identifier[numbers] =[ literal[int] ],
identifier[scaled_positions] =[[ literal[int] , literal[int] , literal[int] ]],
identifier[cell] = identifier[np] . identifier[diag] ([ literal[int] , literal[int] , literal[int] ]),
identifier[pbc] = keyword[True] )
identifier[rec_supercell] = identifier[get_supercell] ( identifier[rec_primitive] , identifier[smat] . identifier[T] )
identifier[q_pos] = identifier[rec_supercell] . identifier[get_scaled_positions] ()
keyword[return] identifier[np] . identifier[array] ( identifier[np] . identifier[where] ( identifier[q_pos] > literal[int] - literal[int] , identifier[q_pos] - literal[int] , identifier[q_pos] ),
identifier[dtype] = literal[string] , identifier[order] = literal[string] ) | def get_commensurate_points(supercell_matrix): # wrt primitive cell
'Commensurate q-points are returned.\n\n Parameters\n ----------\n supercell_matrix : array_like\n Supercell matrix with respect to primitive cell basis vectors.\n shape=(3, 3)\n dtype=intc\n\n '
smat = np.array(supercell_matrix, dtype=int)
rec_primitive = PhonopyAtoms(numbers=[1], scaled_positions=[[0, 0, 0]], cell=np.diag([1, 1, 1]), pbc=True)
rec_supercell = get_supercell(rec_primitive, smat.T)
q_pos = rec_supercell.get_scaled_positions()
return np.array(np.where(q_pos > 1 - 1e-15, q_pos - 1, q_pos), dtype='double', order='C') |
def request_raw_reverse(self, req, msg):
"""
A raw request handler to demonstrate the calling convention if
@request decoraters are not used. Reverses the message arguments.
"""
# msg is a katcp.Message.request object
reversed_args = msg.arguments[::-1]
# req.make_reply() makes a katcp.Message.reply using the correct request
# name and message ID
return req.make_reply(*reversed_args) | def function[request_raw_reverse, parameter[self, req, msg]]:
constant[
A raw request handler to demonstrate the calling convention if
@request decoraters are not used. Reverses the message arguments.
]
variable[reversed_args] assign[=] call[name[msg].arguments][<ast.Slice object at 0x7da1b056a170>]
return[call[name[req].make_reply, parameter[<ast.Starred object at 0x7da1b056a5c0>]]] | keyword[def] identifier[request_raw_reverse] ( identifier[self] , identifier[req] , identifier[msg] ):
literal[string]
identifier[reversed_args] = identifier[msg] . identifier[arguments] [::- literal[int] ]
keyword[return] identifier[req] . identifier[make_reply] (* identifier[reversed_args] ) | def request_raw_reverse(self, req, msg):
"""
A raw request handler to demonstrate the calling convention if
@request decoraters are not used. Reverses the message arguments.
"""
# msg is a katcp.Message.request object
reversed_args = msg.arguments[::-1]
# req.make_reply() makes a katcp.Message.reply using the correct request
# name and message ID
return req.make_reply(*reversed_args) |
def find_all(self, kw: YangIdentifier,
pref: YangIdentifier = None) -> List["Statement"]:
"""Return the list all substatements with the given keyword and prefix.
Args:
kw: Statement keyword (local part for extensions).
pref: Keyword prefix (``None`` for built-in statements).
"""
return [c for c in self.substatements
if c.keyword == kw and c.prefix == pref] | def function[find_all, parameter[self, kw, pref]]:
constant[Return the list all substatements with the given keyword and prefix.
Args:
kw: Statement keyword (local part for extensions).
pref: Keyword prefix (``None`` for built-in statements).
]
return[<ast.ListComp object at 0x7da1b05591e0>] | keyword[def] identifier[find_all] ( identifier[self] , identifier[kw] : identifier[YangIdentifier] ,
identifier[pref] : identifier[YangIdentifier] = keyword[None] )-> identifier[List] [ literal[string] ]:
literal[string]
keyword[return] [ identifier[c] keyword[for] identifier[c] keyword[in] identifier[self] . identifier[substatements]
keyword[if] identifier[c] . identifier[keyword] == identifier[kw] keyword[and] identifier[c] . identifier[prefix] == identifier[pref] ] | def find_all(self, kw: YangIdentifier, pref: YangIdentifier=None) -> List['Statement']:
"""Return the list all substatements with the given keyword and prefix.
Args:
kw: Statement keyword (local part for extensions).
pref: Keyword prefix (``None`` for built-in statements).
"""
return [c for c in self.substatements if c.keyword == kw and c.prefix == pref] |
def add_listener(self, evt_name, fn):
"""添加观察者函数。
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
.. note::
允许一个函数多次注册,多次注册意味着一次 :func:`fire_event` 多次调用。
"""
self._listeners.setdefault(evt_name, [])
listeners = self.__get_listeners(evt_name)
listeners.append(fn) | def function[add_listener, parameter[self, evt_name, fn]]:
constant[添加观察者函数。
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
.. note::
允许一个函数多次注册,多次注册意味着一次 :func:`fire_event` 多次调用。
]
call[name[self]._listeners.setdefault, parameter[name[evt_name], list[[]]]]
variable[listeners] assign[=] call[name[self].__get_listeners, parameter[name[evt_name]]]
call[name[listeners].append, parameter[name[fn]]] | keyword[def] identifier[add_listener] ( identifier[self] , identifier[evt_name] , identifier[fn] ):
literal[string]
identifier[self] . identifier[_listeners] . identifier[setdefault] ( identifier[evt_name] ,[])
identifier[listeners] = identifier[self] . identifier[__get_listeners] ( identifier[evt_name] )
identifier[listeners] . identifier[append] ( identifier[fn] ) | def add_listener(self, evt_name, fn):
"""添加观察者函数。
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
.. note::
允许一个函数多次注册,多次注册意味着一次 :func:`fire_event` 多次调用。
"""
self._listeners.setdefault(evt_name, [])
listeners = self.__get_listeners(evt_name)
listeners.append(fn) |
def _batch_arguments(self):
"""Arguments specific to Batch API writes.
--batch_action action Action for the batch job ['Create', 'Delete'].
--batch_chunk number The maximum number of indicator per batch job.
--batch_halt_on_error Flag to indicate that the batch job should halt on error.
--batch_poll_interval seconds Seconds between batch status polls.
--batch_interval_max seconds Seconds before app should time out waiting on batch job
completion.
--batch_write_type type Write type for Indicator attributes ['Append', 'Replace'].
"""
self.add_argument(
'--batch_action',
choices=['Create', 'Delete'],
default=self._batch_action,
help='Action for the batch job',
)
self.add_argument(
'--batch_chunk',
default=self._batch_chunk,
help='Max number of indicators per batch',
type=int,
)
self.add_argument(
'--batch_halt_on_error',
action='store_true',
default=self._batch_halt_on_error,
help='Halt batch job on error',
)
self.add_argument(
'--batch_poll_interval',
default=self._batch_poll_interval,
help='Frequency to run status check for batch job.',
type=int,
)
self.add_argument(
'--batch_poll_interval_max',
default=self._batch_poll_interval_max,
help='Maximum amount of time for status check on batch job.',
type=int,
)
self.add_argument(
'--batch_write_type',
choices=['Append', 'Replace'],
default=self._batch_write_type,
help='Append or Replace attributes.',
) | def function[_batch_arguments, parameter[self]]:
constant[Arguments specific to Batch API writes.
--batch_action action Action for the batch job ['Create', 'Delete'].
--batch_chunk number The maximum number of indicator per batch job.
--batch_halt_on_error Flag to indicate that the batch job should halt on error.
--batch_poll_interval seconds Seconds between batch status polls.
--batch_interval_max seconds Seconds before app should time out waiting on batch job
completion.
--batch_write_type type Write type for Indicator attributes ['Append', 'Replace'].
]
call[name[self].add_argument, parameter[constant[--batch_action]]]
call[name[self].add_argument, parameter[constant[--batch_chunk]]]
call[name[self].add_argument, parameter[constant[--batch_halt_on_error]]]
call[name[self].add_argument, parameter[constant[--batch_poll_interval]]]
call[name[self].add_argument, parameter[constant[--batch_poll_interval_max]]]
call[name[self].add_argument, parameter[constant[--batch_write_type]]] | keyword[def] identifier[_batch_arguments] ( identifier[self] ):
literal[string]
identifier[self] . identifier[add_argument] (
literal[string] ,
identifier[choices] =[ literal[string] , literal[string] ],
identifier[default] = identifier[self] . identifier[_batch_action] ,
identifier[help] = literal[string] ,
)
identifier[self] . identifier[add_argument] (
literal[string] ,
identifier[default] = identifier[self] . identifier[_batch_chunk] ,
identifier[help] = literal[string] ,
identifier[type] = identifier[int] ,
)
identifier[self] . identifier[add_argument] (
literal[string] ,
identifier[action] = literal[string] ,
identifier[default] = identifier[self] . identifier[_batch_halt_on_error] ,
identifier[help] = literal[string] ,
)
identifier[self] . identifier[add_argument] (
literal[string] ,
identifier[default] = identifier[self] . identifier[_batch_poll_interval] ,
identifier[help] = literal[string] ,
identifier[type] = identifier[int] ,
)
identifier[self] . identifier[add_argument] (
literal[string] ,
identifier[default] = identifier[self] . identifier[_batch_poll_interval_max] ,
identifier[help] = literal[string] ,
identifier[type] = identifier[int] ,
)
identifier[self] . identifier[add_argument] (
literal[string] ,
identifier[choices] =[ literal[string] , literal[string] ],
identifier[default] = identifier[self] . identifier[_batch_write_type] ,
identifier[help] = literal[string] ,
) | def _batch_arguments(self):
"""Arguments specific to Batch API writes.
--batch_action action Action for the batch job ['Create', 'Delete'].
--batch_chunk number The maximum number of indicator per batch job.
--batch_halt_on_error Flag to indicate that the batch job should halt on error.
--batch_poll_interval seconds Seconds between batch status polls.
--batch_interval_max seconds Seconds before app should time out waiting on batch job
completion.
--batch_write_type type Write type for Indicator attributes ['Append', 'Replace'].
"""
self.add_argument('--batch_action', choices=['Create', 'Delete'], default=self._batch_action, help='Action for the batch job')
self.add_argument('--batch_chunk', default=self._batch_chunk, help='Max number of indicators per batch', type=int)
self.add_argument('--batch_halt_on_error', action='store_true', default=self._batch_halt_on_error, help='Halt batch job on error')
self.add_argument('--batch_poll_interval', default=self._batch_poll_interval, help='Frequency to run status check for batch job.', type=int)
self.add_argument('--batch_poll_interval_max', default=self._batch_poll_interval_max, help='Maximum amount of time for status check on batch job.', type=int)
self.add_argument('--batch_write_type', choices=['Append', 'Replace'], default=self._batch_write_type, help='Append or Replace attributes.') |
def geo_field(queryset):
"""Returns the GeometryField for a django or spillway GeoQuerySet."""
for field in queryset.model._meta.fields:
if isinstance(field, models.GeometryField):
return field
raise exceptions.FieldDoesNotExist('No GeometryField found') | def function[geo_field, parameter[queryset]]:
constant[Returns the GeometryField for a django or spillway GeoQuerySet.]
for taget[name[field]] in starred[name[queryset].model._meta.fields] begin[:]
if call[name[isinstance], parameter[name[field], name[models].GeometryField]] begin[:]
return[name[field]]
<ast.Raise object at 0x7da18f58ebf0> | keyword[def] identifier[geo_field] ( identifier[queryset] ):
literal[string]
keyword[for] identifier[field] keyword[in] identifier[queryset] . identifier[model] . identifier[_meta] . identifier[fields] :
keyword[if] identifier[isinstance] ( identifier[field] , identifier[models] . identifier[GeometryField] ):
keyword[return] identifier[field]
keyword[raise] identifier[exceptions] . identifier[FieldDoesNotExist] ( literal[string] ) | def geo_field(queryset):
"""Returns the GeometryField for a django or spillway GeoQuerySet."""
for field in queryset.model._meta.fields:
if isinstance(field, models.GeometryField):
return field # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
raise exceptions.FieldDoesNotExist('No GeometryField found') |
def _check_useless_super_delegation(self, function):
"""Check if the given function node is an useless method override
We consider it *useless* if it uses the super() builtin, but having
nothing additional whatsoever than not implementing the method at all.
If the method uses super() to delegate an operation to the rest of the MRO,
and if the method called is the same as the current one, the arguments
passed to super() are the same as the parameters that were passed to
this method, then the method could be removed altogether, by letting
other implementation to take precedence.
"""
if (
not function.is_method()
# With decorators is a change of use
or function.decorators
):
return
body = function.body
if len(body) != 1:
# Multiple statements, which means this overridden method
# could do multiple things we are not aware of.
return
statement = body[0]
if not isinstance(statement, (astroid.Expr, astroid.Return)):
# Doing something else than what we are interested into.
return
call = statement.value
if (
not isinstance(call, astroid.Call)
# Not a super() attribute access.
or not isinstance(call.func, astroid.Attribute)
):
return
# Should be a super call.
try:
super_call = next(call.func.expr.infer())
except astroid.InferenceError:
return
else:
if not isinstance(super_call, objects.Super):
return
# The name should be the same.
if call.func.attrname != function.name:
return
# Should be a super call with the MRO pointer being the
# current class and the type being the current instance.
current_scope = function.parent.scope()
if (
super_call.mro_pointer != current_scope
or not isinstance(super_call.type, astroid.Instance)
or super_call.type.name != current_scope.name
):
return
# Check values of default args
klass = function.parent.frame()
meth_node = None
for overridden in klass.local_attr_ancestors(function.name):
# get astroid for the searched method
try:
meth_node = overridden[function.name]
except KeyError:
# we have found the method but it's not in the local
# dictionary.
# This may happen with astroid build from living objects
continue
if (
not isinstance(meth_node, astroid.FunctionDef)
# If the method have an ancestor which is not a
# function then it is legitimate to redefine it
or _has_different_parameters_default_value(
meth_node.args, function.args
)
):
return
break
# Detect if the parameters are the same as the call's arguments.
params = _signature_from_arguments(function.args)
args = _signature_from_call(call)
if meth_node is not None:
def form_annotations(annotations):
return [
annotation.as_string() for annotation in filter(None, annotations)
]
called_annotations = form_annotations(function.args.annotations)
overridden_annotations = form_annotations(meth_node.args.annotations)
if called_annotations and overridden_annotations:
if called_annotations != overridden_annotations:
return
if _definition_equivalent_to_call(params, args):
self.add_message(
"useless-super-delegation", node=function, args=(function.name,)
) | def function[_check_useless_super_delegation, parameter[self, function]]:
constant[Check if the given function node is an useless method override
We consider it *useless* if it uses the super() builtin, but having
nothing additional whatsoever than not implementing the method at all.
If the method uses super() to delegate an operation to the rest of the MRO,
and if the method called is the same as the current one, the arguments
passed to super() are the same as the parameters that were passed to
this method, then the method could be removed altogether, by letting
other implementation to take precedence.
]
if <ast.BoolOp object at 0x7da1b025a650> begin[:]
return[None]
variable[body] assign[=] name[function].body
if compare[call[name[len], parameter[name[body]]] not_equal[!=] constant[1]] begin[:]
return[None]
variable[statement] assign[=] call[name[body]][constant[0]]
if <ast.UnaryOp object at 0x7da1b02585e0> begin[:]
return[None]
variable[call] assign[=] name[statement].value
if <ast.BoolOp object at 0x7da1b025bd30> begin[:]
return[None]
<ast.Try object at 0x7da1b025b880>
if compare[name[call].func.attrname not_equal[!=] name[function].name] begin[:]
return[None]
variable[current_scope] assign[=] call[name[function].parent.scope, parameter[]]
if <ast.BoolOp object at 0x7da1b025aad0> begin[:]
return[None]
variable[klass] assign[=] call[name[function].parent.frame, parameter[]]
variable[meth_node] assign[=] constant[None]
for taget[name[overridden]] in starred[call[name[klass].local_attr_ancestors, parameter[name[function].name]]] begin[:]
<ast.Try object at 0x7da1b025b0d0>
if <ast.BoolOp object at 0x7da1b0259c30> begin[:]
return[None]
break
variable[params] assign[=] call[name[_signature_from_arguments], parameter[name[function].args]]
variable[args] assign[=] call[name[_signature_from_call], parameter[name[call]]]
if compare[name[meth_node] is_not constant[None]] begin[:]
def function[form_annotations, parameter[annotations]]:
return[<ast.ListComp object at 0x7da1b0259090>]
variable[called_annotations] assign[=] call[name[form_annotations], parameter[name[function].args.annotations]]
variable[overridden_annotations] assign[=] call[name[form_annotations], parameter[name[meth_node].args.annotations]]
if <ast.BoolOp object at 0x7da1b02f1990> begin[:]
if compare[name[called_annotations] not_equal[!=] name[overridden_annotations]] begin[:]
return[None]
if call[name[_definition_equivalent_to_call], parameter[name[params], name[args]]] begin[:]
call[name[self].add_message, parameter[constant[useless-super-delegation]]] | keyword[def] identifier[_check_useless_super_delegation] ( identifier[self] , identifier[function] ):
literal[string]
keyword[if] (
keyword[not] identifier[function] . identifier[is_method] ()
keyword[or] identifier[function] . identifier[decorators]
):
keyword[return]
identifier[body] = identifier[function] . identifier[body]
keyword[if] identifier[len] ( identifier[body] )!= literal[int] :
keyword[return]
identifier[statement] = identifier[body] [ literal[int] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[statement] ,( identifier[astroid] . identifier[Expr] , identifier[astroid] . identifier[Return] )):
keyword[return]
identifier[call] = identifier[statement] . identifier[value]
keyword[if] (
keyword[not] identifier[isinstance] ( identifier[call] , identifier[astroid] . identifier[Call] )
keyword[or] keyword[not] identifier[isinstance] ( identifier[call] . identifier[func] , identifier[astroid] . identifier[Attribute] )
):
keyword[return]
keyword[try] :
identifier[super_call] = identifier[next] ( identifier[call] . identifier[func] . identifier[expr] . identifier[infer] ())
keyword[except] identifier[astroid] . identifier[InferenceError] :
keyword[return]
keyword[else] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[super_call] , identifier[objects] . identifier[Super] ):
keyword[return]
keyword[if] identifier[call] . identifier[func] . identifier[attrname] != identifier[function] . identifier[name] :
keyword[return]
identifier[current_scope] = identifier[function] . identifier[parent] . identifier[scope] ()
keyword[if] (
identifier[super_call] . identifier[mro_pointer] != identifier[current_scope]
keyword[or] keyword[not] identifier[isinstance] ( identifier[super_call] . identifier[type] , identifier[astroid] . identifier[Instance] )
keyword[or] identifier[super_call] . identifier[type] . identifier[name] != identifier[current_scope] . identifier[name]
):
keyword[return]
identifier[klass] = identifier[function] . identifier[parent] . identifier[frame] ()
identifier[meth_node] = keyword[None]
keyword[for] identifier[overridden] keyword[in] identifier[klass] . identifier[local_attr_ancestors] ( identifier[function] . identifier[name] ):
keyword[try] :
identifier[meth_node] = identifier[overridden] [ identifier[function] . identifier[name] ]
keyword[except] identifier[KeyError] :
keyword[continue]
keyword[if] (
keyword[not] identifier[isinstance] ( identifier[meth_node] , identifier[astroid] . identifier[FunctionDef] )
keyword[or] identifier[_has_different_parameters_default_value] (
identifier[meth_node] . identifier[args] , identifier[function] . identifier[args]
)
):
keyword[return]
keyword[break]
identifier[params] = identifier[_signature_from_arguments] ( identifier[function] . identifier[args] )
identifier[args] = identifier[_signature_from_call] ( identifier[call] )
keyword[if] identifier[meth_node] keyword[is] keyword[not] keyword[None] :
keyword[def] identifier[form_annotations] ( identifier[annotations] ):
keyword[return] [
identifier[annotation] . identifier[as_string] () keyword[for] identifier[annotation] keyword[in] identifier[filter] ( keyword[None] , identifier[annotations] )
]
identifier[called_annotations] = identifier[form_annotations] ( identifier[function] . identifier[args] . identifier[annotations] )
identifier[overridden_annotations] = identifier[form_annotations] ( identifier[meth_node] . identifier[args] . identifier[annotations] )
keyword[if] identifier[called_annotations] keyword[and] identifier[overridden_annotations] :
keyword[if] identifier[called_annotations] != identifier[overridden_annotations] :
keyword[return]
keyword[if] identifier[_definition_equivalent_to_call] ( identifier[params] , identifier[args] ):
identifier[self] . identifier[add_message] (
literal[string] , identifier[node] = identifier[function] , identifier[args] =( identifier[function] . identifier[name] ,)
) | def _check_useless_super_delegation(self, function):
"""Check if the given function node is an useless method override
We consider it *useless* if it uses the super() builtin, but having
nothing additional whatsoever than not implementing the method at all.
If the method uses super() to delegate an operation to the rest of the MRO,
and if the method called is the same as the current one, the arguments
passed to super() are the same as the parameters that were passed to
this method, then the method could be removed altogether, by letting
other implementation to take precedence.
"""
if not function.is_method() or function.decorators:
# With decorators is a change of use
return # depends on [control=['if'], data=[]]
body = function.body
if len(body) != 1:
# Multiple statements, which means this overridden method
# could do multiple things we are not aware of.
return # depends on [control=['if'], data=[]]
statement = body[0]
if not isinstance(statement, (astroid.Expr, astroid.Return)):
# Doing something else than what we are interested into.
return # depends on [control=['if'], data=[]]
call = statement.value
if not isinstance(call, astroid.Call) or not isinstance(call.func, astroid.Attribute):
# Not a super() attribute access.
return # depends on [control=['if'], data=[]]
# Should be a super call.
try:
super_call = next(call.func.expr.infer()) # depends on [control=['try'], data=[]]
except astroid.InferenceError:
return # depends on [control=['except'], data=[]]
else:
if not isinstance(super_call, objects.Super):
return # depends on [control=['if'], data=[]]
# The name should be the same.
if call.func.attrname != function.name:
return # depends on [control=['if'], data=[]]
# Should be a super call with the MRO pointer being the
# current class and the type being the current instance.
current_scope = function.parent.scope()
if super_call.mro_pointer != current_scope or not isinstance(super_call.type, astroid.Instance) or super_call.type.name != current_scope.name:
return # depends on [control=['if'], data=[]]
# Check values of default args
klass = function.parent.frame()
meth_node = None
for overridden in klass.local_attr_ancestors(function.name):
# get astroid for the searched method
try:
meth_node = overridden[function.name] # depends on [control=['try'], data=[]]
except KeyError:
# we have found the method but it's not in the local
# dictionary.
# This may happen with astroid build from living objects
continue # depends on [control=['except'], data=[]]
if not isinstance(meth_node, astroid.FunctionDef) or _has_different_parameters_default_value(meth_node.args, function.args):
# If the method have an ancestor which is not a
# function then it is legitimate to redefine it
return # depends on [control=['if'], data=[]]
break # depends on [control=['for'], data=['overridden']]
# Detect if the parameters are the same as the call's arguments.
params = _signature_from_arguments(function.args)
args = _signature_from_call(call)
if meth_node is not None:
def form_annotations(annotations):
return [annotation.as_string() for annotation in filter(None, annotations)]
called_annotations = form_annotations(function.args.annotations)
overridden_annotations = form_annotations(meth_node.args.annotations)
if called_annotations and overridden_annotations:
if called_annotations != overridden_annotations:
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['meth_node']]
if _definition_equivalent_to_call(params, args):
self.add_message('useless-super-delegation', node=function, args=(function.name,)) # depends on [control=['if'], data=[]] |
def cf_dictionary_from_pairs(pairs):
"""
Creates a CFDictionaryRef object from a list of 2-element tuples
representing the key and value. Each key should be a CFStringRef and each
value some sort of CF* type.
:param pairs:
A list of 2-element tuples
:return:
A CFDictionaryRef
"""
length = len(pairs)
keys = []
values = []
for pair in pairs:
key, value = pair
keys.append(key)
values.append(value)
keys = (CFStringRef * length)(*keys)
values = (CFTypeRef * length)(*values)
return CoreFoundation.CFDictionaryCreate(
CoreFoundation.kCFAllocatorDefault,
_cast_pointer_p(byref(keys)),
_cast_pointer_p(byref(values)),
length,
kCFTypeDictionaryKeyCallBacks,
kCFTypeDictionaryValueCallBacks
) | def function[cf_dictionary_from_pairs, parameter[pairs]]:
constant[
Creates a CFDictionaryRef object from a list of 2-element tuples
representing the key and value. Each key should be a CFStringRef and each
value some sort of CF* type.
:param pairs:
A list of 2-element tuples
:return:
A CFDictionaryRef
]
variable[length] assign[=] call[name[len], parameter[name[pairs]]]
variable[keys] assign[=] list[[]]
variable[values] assign[=] list[[]]
for taget[name[pair]] in starred[name[pairs]] begin[:]
<ast.Tuple object at 0x7da1aff0e170> assign[=] name[pair]
call[name[keys].append, parameter[name[key]]]
call[name[values].append, parameter[name[value]]]
variable[keys] assign[=] call[binary_operation[name[CFStringRef] * name[length]], parameter[<ast.Starred object at 0x7da1aff0da50>]]
variable[values] assign[=] call[binary_operation[name[CFTypeRef] * name[length]], parameter[<ast.Starred object at 0x7da1aff0ca90>]]
return[call[name[CoreFoundation].CFDictionaryCreate, parameter[name[CoreFoundation].kCFAllocatorDefault, call[name[_cast_pointer_p], parameter[call[name[byref], parameter[name[keys]]]]], call[name[_cast_pointer_p], parameter[call[name[byref], parameter[name[values]]]]], name[length], name[kCFTypeDictionaryKeyCallBacks], name[kCFTypeDictionaryValueCallBacks]]]] | keyword[def] identifier[cf_dictionary_from_pairs] ( identifier[pairs] ):
literal[string]
identifier[length] = identifier[len] ( identifier[pairs] )
identifier[keys] =[]
identifier[values] =[]
keyword[for] identifier[pair] keyword[in] identifier[pairs] :
identifier[key] , identifier[value] = identifier[pair]
identifier[keys] . identifier[append] ( identifier[key] )
identifier[values] . identifier[append] ( identifier[value] )
identifier[keys] =( identifier[CFStringRef] * identifier[length] )(* identifier[keys] )
identifier[values] =( identifier[CFTypeRef] * identifier[length] )(* identifier[values] )
keyword[return] identifier[CoreFoundation] . identifier[CFDictionaryCreate] (
identifier[CoreFoundation] . identifier[kCFAllocatorDefault] ,
identifier[_cast_pointer_p] ( identifier[byref] ( identifier[keys] )),
identifier[_cast_pointer_p] ( identifier[byref] ( identifier[values] )),
identifier[length] ,
identifier[kCFTypeDictionaryKeyCallBacks] ,
identifier[kCFTypeDictionaryValueCallBacks]
) | def cf_dictionary_from_pairs(pairs):
"""
Creates a CFDictionaryRef object from a list of 2-element tuples
representing the key and value. Each key should be a CFStringRef and each
value some sort of CF* type.
:param pairs:
A list of 2-element tuples
:return:
A CFDictionaryRef
"""
length = len(pairs)
keys = []
values = []
for pair in pairs:
(key, value) = pair
keys.append(key)
values.append(value) # depends on [control=['for'], data=['pair']]
keys = (CFStringRef * length)(*keys)
values = (CFTypeRef * length)(*values)
return CoreFoundation.CFDictionaryCreate(CoreFoundation.kCFAllocatorDefault, _cast_pointer_p(byref(keys)), _cast_pointer_p(byref(values)), length, kCFTypeDictionaryKeyCallBacks, kCFTypeDictionaryValueCallBacks) |
def get_record(name, zone, record_type, fetch_all=False, region=None, key=None,
keyid=None, profile=None, split_dns=False, private_zone=False,
identifier=None, retry_on_rate_limit=None,
rate_limit_retries=None, retry_on_errors=True, error_retries=5):
'''
Get a record from a zone.
CLI example::
salt myminion boto_route53.get_record test.example.org example.org A
'''
if region is None:
region = 'universal'
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if retry_on_rate_limit or rate_limit_retries is not None:
salt.utils.versions.warn_until(
'Neon',
'The \'retry_on_rate_limit\' and \'rate_limit_retries\' arguments '
'have been deprecated in favor of \'retry_on_errors\' and '
'\'error_retries\' respectively. Their functionality will be '
'removed, as such, their usage is no longer required.'
)
if retry_on_rate_limit is not None:
retry_on_errors = retry_on_rate_limit
if rate_limit_retries is not None:
error_retries = rate_limit_retries
while error_retries > 0:
try:
if split_dns:
_zone = _get_split_zone(zone, conn, private_zone)
else:
_zone = conn.get_zone(zone)
if not _zone:
msg = 'Failed to retrieve zone {0}'.format(zone)
log.error(msg)
return None
_type = record_type.upper()
ret = odict.OrderedDict()
name = _encode_name(name)
_record = _zone.find_records(name, _type, all=fetch_all, identifier=identifier)
break # the while True
except DNSServerError as e:
if retry_on_errors:
if 'Throttling' == e.code:
log.debug('Throttled by AWS API.')
elif 'PriorRequestNotComplete' == e.code:
log.debug('The request was rejected by AWS API.\
Route 53 was still processing a prior request')
time.sleep(3)
error_retries -= 1
continue
raise e
if _record:
ret['name'] = _decode_name(_record.name)
ret['value'] = _record.resource_records[0]
ret['record_type'] = _record.type
ret['ttl'] = _record.ttl
if _record.identifier:
ret['identifier'] = []
ret['identifier'].append(_record.identifier)
ret['identifier'].append(_record.weight)
return ret | def function[get_record, parameter[name, zone, record_type, fetch_all, region, key, keyid, profile, split_dns, private_zone, identifier, retry_on_rate_limit, rate_limit_retries, retry_on_errors, error_retries]]:
constant[
Get a record from a zone.
CLI example::
salt myminion boto_route53.get_record test.example.org example.org A
]
if compare[name[region] is constant[None]] begin[:]
variable[region] assign[=] constant[universal]
variable[conn] assign[=] call[name[_get_conn], parameter[]]
if <ast.BoolOp object at 0x7da204346710> begin[:]
call[name[salt].utils.versions.warn_until, parameter[constant[Neon], constant[The 'retry_on_rate_limit' and 'rate_limit_retries' arguments have been deprecated in favor of 'retry_on_errors' and 'error_retries' respectively. Their functionality will be removed, as such, their usage is no longer required.]]]
if compare[name[retry_on_rate_limit] is_not constant[None]] begin[:]
variable[retry_on_errors] assign[=] name[retry_on_rate_limit]
if compare[name[rate_limit_retries] is_not constant[None]] begin[:]
variable[error_retries] assign[=] name[rate_limit_retries]
while compare[name[error_retries] greater[>] constant[0]] begin[:]
<ast.Try object at 0x7da2047e9390>
if name[_record] begin[:]
call[name[ret]][constant[name]] assign[=] call[name[_decode_name], parameter[name[_record].name]]
call[name[ret]][constant[value]] assign[=] call[name[_record].resource_records][constant[0]]
call[name[ret]][constant[record_type]] assign[=] name[_record].type
call[name[ret]][constant[ttl]] assign[=] name[_record].ttl
if name[_record].identifier begin[:]
call[name[ret]][constant[identifier]] assign[=] list[[]]
call[call[name[ret]][constant[identifier]].append, parameter[name[_record].identifier]]
call[call[name[ret]][constant[identifier]].append, parameter[name[_record].weight]]
return[name[ret]] | keyword[def] identifier[get_record] ( identifier[name] , identifier[zone] , identifier[record_type] , identifier[fetch_all] = keyword[False] , identifier[region] = keyword[None] , identifier[key] = keyword[None] ,
identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] , identifier[split_dns] = keyword[False] , identifier[private_zone] = keyword[False] ,
identifier[identifier] = keyword[None] , identifier[retry_on_rate_limit] = keyword[None] ,
identifier[rate_limit_retries] = keyword[None] , identifier[retry_on_errors] = keyword[True] , identifier[error_retries] = literal[int] ):
literal[string]
keyword[if] identifier[region] keyword[is] keyword[None] :
identifier[region] = literal[string]
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[if] identifier[retry_on_rate_limit] keyword[or] identifier[rate_limit_retries] keyword[is] keyword[not] keyword[None] :
identifier[salt] . identifier[utils] . identifier[versions] . identifier[warn_until] (
literal[string] ,
literal[string]
literal[string]
literal[string]
literal[string]
)
keyword[if] identifier[retry_on_rate_limit] keyword[is] keyword[not] keyword[None] :
identifier[retry_on_errors] = identifier[retry_on_rate_limit]
keyword[if] identifier[rate_limit_retries] keyword[is] keyword[not] keyword[None] :
identifier[error_retries] = identifier[rate_limit_retries]
keyword[while] identifier[error_retries] > literal[int] :
keyword[try] :
keyword[if] identifier[split_dns] :
identifier[_zone] = identifier[_get_split_zone] ( identifier[zone] , identifier[conn] , identifier[private_zone] )
keyword[else] :
identifier[_zone] = identifier[conn] . identifier[get_zone] ( identifier[zone] )
keyword[if] keyword[not] identifier[_zone] :
identifier[msg] = literal[string] . identifier[format] ( identifier[zone] )
identifier[log] . identifier[error] ( identifier[msg] )
keyword[return] keyword[None]
identifier[_type] = identifier[record_type] . identifier[upper] ()
identifier[ret] = identifier[odict] . identifier[OrderedDict] ()
identifier[name] = identifier[_encode_name] ( identifier[name] )
identifier[_record] = identifier[_zone] . identifier[find_records] ( identifier[name] , identifier[_type] , identifier[all] = identifier[fetch_all] , identifier[identifier] = identifier[identifier] )
keyword[break]
keyword[except] identifier[DNSServerError] keyword[as] identifier[e] :
keyword[if] identifier[retry_on_errors] :
keyword[if] literal[string] == identifier[e] . identifier[code] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[elif] literal[string] == identifier[e] . identifier[code] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[time] . identifier[sleep] ( literal[int] )
identifier[error_retries] -= literal[int]
keyword[continue]
keyword[raise] identifier[e]
keyword[if] identifier[_record] :
identifier[ret] [ literal[string] ]= identifier[_decode_name] ( identifier[_record] . identifier[name] )
identifier[ret] [ literal[string] ]= identifier[_record] . identifier[resource_records] [ literal[int] ]
identifier[ret] [ literal[string] ]= identifier[_record] . identifier[type]
identifier[ret] [ literal[string] ]= identifier[_record] . identifier[ttl]
keyword[if] identifier[_record] . identifier[identifier] :
identifier[ret] [ literal[string] ]=[]
identifier[ret] [ literal[string] ]. identifier[append] ( identifier[_record] . identifier[identifier] )
identifier[ret] [ literal[string] ]. identifier[append] ( identifier[_record] . identifier[weight] )
keyword[return] identifier[ret] | def get_record(name, zone, record_type, fetch_all=False, region=None, key=None, keyid=None, profile=None, split_dns=False, private_zone=False, identifier=None, retry_on_rate_limit=None, rate_limit_retries=None, retry_on_errors=True, error_retries=5):
"""
Get a record from a zone.
CLI example::
salt myminion boto_route53.get_record test.example.org example.org A
"""
if region is None:
region = 'universal' # depends on [control=['if'], data=['region']]
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if retry_on_rate_limit or rate_limit_retries is not None:
salt.utils.versions.warn_until('Neon', "The 'retry_on_rate_limit' and 'rate_limit_retries' arguments have been deprecated in favor of 'retry_on_errors' and 'error_retries' respectively. Their functionality will be removed, as such, their usage is no longer required.")
if retry_on_rate_limit is not None:
retry_on_errors = retry_on_rate_limit # depends on [control=['if'], data=['retry_on_rate_limit']]
if rate_limit_retries is not None:
error_retries = rate_limit_retries # depends on [control=['if'], data=['rate_limit_retries']] # depends on [control=['if'], data=[]]
while error_retries > 0:
try:
if split_dns:
_zone = _get_split_zone(zone, conn, private_zone) # depends on [control=['if'], data=[]]
else:
_zone = conn.get_zone(zone)
if not _zone:
msg = 'Failed to retrieve zone {0}'.format(zone)
log.error(msg)
return None # depends on [control=['if'], data=[]]
_type = record_type.upper()
ret = odict.OrderedDict()
name = _encode_name(name)
_record = _zone.find_records(name, _type, all=fetch_all, identifier=identifier)
break # the while True # depends on [control=['try'], data=[]]
except DNSServerError as e:
if retry_on_errors:
if 'Throttling' == e.code:
log.debug('Throttled by AWS API.') # depends on [control=['if'], data=[]]
elif 'PriorRequestNotComplete' == e.code:
log.debug('The request was rejected by AWS API. Route 53 was still processing a prior request') # depends on [control=['if'], data=[]]
time.sleep(3)
error_retries -= 1
continue # depends on [control=['if'], data=[]]
raise e # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=['error_retries']]
if _record:
ret['name'] = _decode_name(_record.name)
ret['value'] = _record.resource_records[0]
ret['record_type'] = _record.type
ret['ttl'] = _record.ttl
if _record.identifier:
ret['identifier'] = []
ret['identifier'].append(_record.identifier)
ret['identifier'].append(_record.weight) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return ret |
def get_regions(self):
'''
Get list of available service bus regions.
'''
response = self._perform_get(
self._get_path('services/serviceBus/Regions/', None),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
_ServiceBusManagementXmlSerializer.xml_to_region) | def function[get_regions, parameter[self]]:
constant[
Get list of available service bus regions.
]
variable[response] assign[=] call[name[self]._perform_get, parameter[call[name[self]._get_path, parameter[constant[services/serviceBus/Regions/], constant[None]]], constant[None]]]
return[call[name[_MinidomXmlToObject].convert_response_to_feeds, parameter[name[response], name[_ServiceBusManagementXmlSerializer].xml_to_region]]] | keyword[def] identifier[get_regions] ( identifier[self] ):
literal[string]
identifier[response] = identifier[self] . identifier[_perform_get] (
identifier[self] . identifier[_get_path] ( literal[string] , keyword[None] ),
keyword[None] )
keyword[return] identifier[_MinidomXmlToObject] . identifier[convert_response_to_feeds] (
identifier[response] ,
identifier[_ServiceBusManagementXmlSerializer] . identifier[xml_to_region] ) | def get_regions(self):
"""
Get list of available service bus regions.
"""
response = self._perform_get(self._get_path('services/serviceBus/Regions/', None), None)
return _MinidomXmlToObject.convert_response_to_feeds(response, _ServiceBusManagementXmlSerializer.xml_to_region) |
def _header_string(basis_dict):
'''Creates a header with information about a basis set
Information includes description, revision, etc, but not references
'''
tw = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 20)
header = '-' * 70 + '\n'
header += ' Basis Set Exchange\n'
header += ' Version ' + version() + '\n'
header += ' ' + _main_url + '\n'
header += '-' * 70 + '\n'
header += ' Basis set: ' + basis_dict['name'] + '\n'
header += tw.fill(' Description: ' + basis_dict['description']) + '\n'
header += ' Role: ' + basis_dict['role'] + '\n'
header += tw.fill(' Version: {} ({})'.format(basis_dict['version'],
basis_dict['revision_description'])) + '\n'
header += '-' * 70 + '\n'
return header | def function[_header_string, parameter[basis_dict]]:
constant[Creates a header with information about a basis set
Information includes description, revision, etc, but not references
]
variable[tw] assign[=] call[name[textwrap].TextWrapper, parameter[]]
variable[header] assign[=] binary_operation[binary_operation[constant[-] * constant[70]] + constant[
]]
<ast.AugAssign object at 0x7da2041d8670>
<ast.AugAssign object at 0x7da2041db250>
<ast.AugAssign object at 0x7da2041d9ed0>
<ast.AugAssign object at 0x7da2041d89d0>
<ast.AugAssign object at 0x7da2041d99c0>
<ast.AugAssign object at 0x7da2041db790>
<ast.AugAssign object at 0x7da2041d8ac0>
<ast.AugAssign object at 0x7da2041d83d0>
<ast.AugAssign object at 0x7da2041d80a0>
return[name[header]] | keyword[def] identifier[_header_string] ( identifier[basis_dict] ):
literal[string]
identifier[tw] = identifier[textwrap] . identifier[TextWrapper] ( identifier[initial_indent] = literal[string] , identifier[subsequent_indent] = literal[string] * literal[int] )
identifier[header] = literal[string] * literal[int] + literal[string]
identifier[header] += literal[string]
identifier[header] += literal[string] + identifier[version] ()+ literal[string]
identifier[header] += literal[string] + identifier[_main_url] + literal[string]
identifier[header] += literal[string] * literal[int] + literal[string]
identifier[header] += literal[string] + identifier[basis_dict] [ literal[string] ]+ literal[string]
identifier[header] += identifier[tw] . identifier[fill] ( literal[string] + identifier[basis_dict] [ literal[string] ])+ literal[string]
identifier[header] += literal[string] + identifier[basis_dict] [ literal[string] ]+ literal[string]
identifier[header] += identifier[tw] . identifier[fill] ( literal[string] . identifier[format] ( identifier[basis_dict] [ literal[string] ],
identifier[basis_dict] [ literal[string] ]))+ literal[string]
identifier[header] += literal[string] * literal[int] + literal[string]
keyword[return] identifier[header] | def _header_string(basis_dict):
"""Creates a header with information about a basis set
Information includes description, revision, etc, but not references
"""
tw = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 20)
header = '-' * 70 + '\n'
header += ' Basis Set Exchange\n'
header += ' Version ' + version() + '\n'
header += ' ' + _main_url + '\n'
header += '-' * 70 + '\n'
header += ' Basis set: ' + basis_dict['name'] + '\n'
header += tw.fill(' Description: ' + basis_dict['description']) + '\n'
header += ' Role: ' + basis_dict['role'] + '\n'
header += tw.fill(' Version: {} ({})'.format(basis_dict['version'], basis_dict['revision_description'])) + '\n'
header += '-' * 70 + '\n'
return header |
def get_state_search_path_list(saltenv='base'):
'''
For the state file system, return a list of paths to search for states
'''
# state cache should be updated before running this method
search_list = []
cachedir = __opts__.get('cachedir', None)
log.info("Searching for files in saltenv: %s", saltenv)
path = cachedir + os.sep + "files" + os.sep + saltenv
search_list.append(path)
return search_list | def function[get_state_search_path_list, parameter[saltenv]]:
constant[
For the state file system, return a list of paths to search for states
]
variable[search_list] assign[=] list[[]]
variable[cachedir] assign[=] call[name[__opts__].get, parameter[constant[cachedir], constant[None]]]
call[name[log].info, parameter[constant[Searching for files in saltenv: %s], name[saltenv]]]
variable[path] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[cachedir] + name[os].sep] + constant[files]] + name[os].sep] + name[saltenv]]
call[name[search_list].append, parameter[name[path]]]
return[name[search_list]] | keyword[def] identifier[get_state_search_path_list] ( identifier[saltenv] = literal[string] ):
literal[string]
identifier[search_list] =[]
identifier[cachedir] = identifier[__opts__] . identifier[get] ( literal[string] , keyword[None] )
identifier[log] . identifier[info] ( literal[string] , identifier[saltenv] )
identifier[path] = identifier[cachedir] + identifier[os] . identifier[sep] + literal[string] + identifier[os] . identifier[sep] + identifier[saltenv]
identifier[search_list] . identifier[append] ( identifier[path] )
keyword[return] identifier[search_list] | def get_state_search_path_list(saltenv='base'):
"""
For the state file system, return a list of paths to search for states
"""
# state cache should be updated before running this method
search_list = []
cachedir = __opts__.get('cachedir', None)
log.info('Searching for files in saltenv: %s', saltenv)
path = cachedir + os.sep + 'files' + os.sep + saltenv
search_list.append(path)
return search_list |
def get_cfn_parameters(self):
"""Return a dictionary of variables with `type` :class:`CFNType`.
Returns:
dict: variables that need to be submitted as CloudFormation
Parameters.
"""
variables = self.get_variables()
output = {}
for key, value in variables.items():
if hasattr(value, "to_parameter_value"):
output[key] = value.to_parameter_value()
return output | def function[get_cfn_parameters, parameter[self]]:
constant[Return a dictionary of variables with `type` :class:`CFNType`.
Returns:
dict: variables that need to be submitted as CloudFormation
Parameters.
]
variable[variables] assign[=] call[name[self].get_variables, parameter[]]
variable[output] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b18b4070>, <ast.Name object at 0x7da1b18b7850>]]] in starred[call[name[variables].items, parameter[]]] begin[:]
if call[name[hasattr], parameter[name[value], constant[to_parameter_value]]] begin[:]
call[name[output]][name[key]] assign[=] call[name[value].to_parameter_value, parameter[]]
return[name[output]] | keyword[def] identifier[get_cfn_parameters] ( identifier[self] ):
literal[string]
identifier[variables] = identifier[self] . identifier[get_variables] ()
identifier[output] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[variables] . identifier[items] ():
keyword[if] identifier[hasattr] ( identifier[value] , literal[string] ):
identifier[output] [ identifier[key] ]= identifier[value] . identifier[to_parameter_value] ()
keyword[return] identifier[output] | def get_cfn_parameters(self):
"""Return a dictionary of variables with `type` :class:`CFNType`.
Returns:
dict: variables that need to be submitted as CloudFormation
Parameters.
"""
variables = self.get_variables()
output = {}
for (key, value) in variables.items():
if hasattr(value, 'to_parameter_value'):
output[key] = value.to_parameter_value() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return output |
def sim(adata, tmax_realization=None, as_heatmap=False, shuffle=False,
show=None, save=None):
"""Plot results of simulation.
Parameters
----------
as_heatmap : bool (default: False)
Plot the timeseries as heatmap.
tmax_realization : int or None (default: False)
Number of observations in one realization of the time series. The data matrix
adata.X consists in concatenated realizations.
shuffle : bool, optional (default: False)
Shuffle the data.
save : `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the
default filename. Infer the filetype if ending on {{'.pdf', '.png', '.svg'}}.
show : bool, optional (default: `None`)
Show the plot, do not return axis.
"""
from ... import utils as sc_utils
if tmax_realization is not None: tmax = tmax_realization
elif 'tmax_write' in adata.uns: tmax = adata.uns['tmax_write']
else: tmax = adata.n_obs
n_realizations = adata.n_obs/tmax
if not shuffle:
if not as_heatmap:
timeseries(adata.X,
var_names=adata.var_names,
xlim=[0, 1.25*adata.n_obs],
highlightsX=np.arange(tmax, n_realizations*tmax, tmax),
xlabel='realizations')
else:
# plot time series as heatmap, as in Haghverdi et al. (2016), Fig. 1d
timeseries_as_heatmap(adata.X,
var_names=adata.var_names,
highlightsX=np.arange(tmax, n_realizations*tmax, tmax))
pl.xticks(np.arange(0, n_realizations*tmax, tmax),
np.arange(n_realizations).astype(int) + 1)
utils.savefig_or_show('sim', save=save, show=show)
else:
# shuffled data
X = adata.X
X, rows = sc_utils.subsample(X, seed=1)
timeseries(X,
var_names=adata.var_names,
xlim=[0, 1.25*adata.n_obs],
highlightsX=np.arange(tmax, n_realizations*tmax, tmax),
xlabel='index (arbitrary order)')
utils.savefig_or_show('sim_shuffled', save=save, show=show) | def function[sim, parameter[adata, tmax_realization, as_heatmap, shuffle, show, save]]:
constant[Plot results of simulation.
Parameters
----------
as_heatmap : bool (default: False)
Plot the timeseries as heatmap.
tmax_realization : int or None (default: False)
Number of observations in one realization of the time series. The data matrix
adata.X consists in concatenated realizations.
shuffle : bool, optional (default: False)
Shuffle the data.
save : `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the
default filename. Infer the filetype if ending on {{'.pdf', '.png', '.svg'}}.
show : bool, optional (default: `None`)
Show the plot, do not return axis.
]
from relative_module[None] import module[utils]
if compare[name[tmax_realization] is_not constant[None]] begin[:]
variable[tmax] assign[=] name[tmax_realization]
variable[n_realizations] assign[=] binary_operation[name[adata].n_obs / name[tmax]]
if <ast.UnaryOp object at 0x7da20e957a90> begin[:]
if <ast.UnaryOp object at 0x7da20e9575b0> begin[:]
call[name[timeseries], parameter[name[adata].X]]
call[name[pl].xticks, parameter[call[name[np].arange, parameter[constant[0], binary_operation[name[n_realizations] * name[tmax]], name[tmax]]], binary_operation[call[call[name[np].arange, parameter[name[n_realizations]]].astype, parameter[name[int]]] + constant[1]]]]
call[name[utils].savefig_or_show, parameter[constant[sim]]] | keyword[def] identifier[sim] ( identifier[adata] , identifier[tmax_realization] = keyword[None] , identifier[as_heatmap] = keyword[False] , identifier[shuffle] = keyword[False] ,
identifier[show] = keyword[None] , identifier[save] = keyword[None] ):
literal[string]
keyword[from] ... keyword[import] identifier[utils] keyword[as] identifier[sc_utils]
keyword[if] identifier[tmax_realization] keyword[is] keyword[not] keyword[None] : identifier[tmax] = identifier[tmax_realization]
keyword[elif] literal[string] keyword[in] identifier[adata] . identifier[uns] : identifier[tmax] = identifier[adata] . identifier[uns] [ literal[string] ]
keyword[else] : identifier[tmax] = identifier[adata] . identifier[n_obs]
identifier[n_realizations] = identifier[adata] . identifier[n_obs] / identifier[tmax]
keyword[if] keyword[not] identifier[shuffle] :
keyword[if] keyword[not] identifier[as_heatmap] :
identifier[timeseries] ( identifier[adata] . identifier[X] ,
identifier[var_names] = identifier[adata] . identifier[var_names] ,
identifier[xlim] =[ literal[int] , literal[int] * identifier[adata] . identifier[n_obs] ],
identifier[highlightsX] = identifier[np] . identifier[arange] ( identifier[tmax] , identifier[n_realizations] * identifier[tmax] , identifier[tmax] ),
identifier[xlabel] = literal[string] )
keyword[else] :
identifier[timeseries_as_heatmap] ( identifier[adata] . identifier[X] ,
identifier[var_names] = identifier[adata] . identifier[var_names] ,
identifier[highlightsX] = identifier[np] . identifier[arange] ( identifier[tmax] , identifier[n_realizations] * identifier[tmax] , identifier[tmax] ))
identifier[pl] . identifier[xticks] ( identifier[np] . identifier[arange] ( literal[int] , identifier[n_realizations] * identifier[tmax] , identifier[tmax] ),
identifier[np] . identifier[arange] ( identifier[n_realizations] ). identifier[astype] ( identifier[int] )+ literal[int] )
identifier[utils] . identifier[savefig_or_show] ( literal[string] , identifier[save] = identifier[save] , identifier[show] = identifier[show] )
keyword[else] :
identifier[X] = identifier[adata] . identifier[X]
identifier[X] , identifier[rows] = identifier[sc_utils] . identifier[subsample] ( identifier[X] , identifier[seed] = literal[int] )
identifier[timeseries] ( identifier[X] ,
identifier[var_names] = identifier[adata] . identifier[var_names] ,
identifier[xlim] =[ literal[int] , literal[int] * identifier[adata] . identifier[n_obs] ],
identifier[highlightsX] = identifier[np] . identifier[arange] ( identifier[tmax] , identifier[n_realizations] * identifier[tmax] , identifier[tmax] ),
identifier[xlabel] = literal[string] )
identifier[utils] . identifier[savefig_or_show] ( literal[string] , identifier[save] = identifier[save] , identifier[show] = identifier[show] ) | def sim(adata, tmax_realization=None, as_heatmap=False, shuffle=False, show=None, save=None):
"""Plot results of simulation.
Parameters
----------
as_heatmap : bool (default: False)
Plot the timeseries as heatmap.
tmax_realization : int or None (default: False)
Number of observations in one realization of the time series. The data matrix
adata.X consists in concatenated realizations.
shuffle : bool, optional (default: False)
Shuffle the data.
save : `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the
default filename. Infer the filetype if ending on {{'.pdf', '.png', '.svg'}}.
show : bool, optional (default: `None`)
Show the plot, do not return axis.
"""
from ... import utils as sc_utils
if tmax_realization is not None:
tmax = tmax_realization # depends on [control=['if'], data=['tmax_realization']]
elif 'tmax_write' in adata.uns:
tmax = adata.uns['tmax_write'] # depends on [control=['if'], data=[]]
else:
tmax = adata.n_obs
n_realizations = adata.n_obs / tmax
if not shuffle:
if not as_heatmap:
timeseries(adata.X, var_names=adata.var_names, xlim=[0, 1.25 * adata.n_obs], highlightsX=np.arange(tmax, n_realizations * tmax, tmax), xlabel='realizations') # depends on [control=['if'], data=[]]
else:
# plot time series as heatmap, as in Haghverdi et al. (2016), Fig. 1d
timeseries_as_heatmap(adata.X, var_names=adata.var_names, highlightsX=np.arange(tmax, n_realizations * tmax, tmax))
pl.xticks(np.arange(0, n_realizations * tmax, tmax), np.arange(n_realizations).astype(int) + 1)
utils.savefig_or_show('sim', save=save, show=show) # depends on [control=['if'], data=[]]
else:
# shuffled data
X = adata.X
(X, rows) = sc_utils.subsample(X, seed=1)
timeseries(X, var_names=adata.var_names, xlim=[0, 1.25 * adata.n_obs], highlightsX=np.arange(tmax, n_realizations * tmax, tmax), xlabel='index (arbitrary order)')
utils.savefig_or_show('sim_shuffled', save=save, show=show) |
def write(self, rows, keyed=False):
"""Write rows/keyed_rows to table
"""
for row in rows:
keyed_row = row
if not keyed:
keyed_row = dict(zip(self.__schema.field_names, row))
keyed_row = self.__convert_row(keyed_row)
if self.__check_existing(keyed_row):
for wr in self.__insert():
yield wr
ret = self.__update(keyed_row)
if ret is not None:
yield WrittenRow(keyed_row, True, ret if self.__autoincrement else None)
continue
self.__buffer.append(keyed_row)
if len(self.__buffer) > BUFFER_SIZE:
for wr in self.__insert():
yield wr
for wr in self.__insert():
yield wr | def function[write, parameter[self, rows, keyed]]:
constant[Write rows/keyed_rows to table
]
for taget[name[row]] in starred[name[rows]] begin[:]
variable[keyed_row] assign[=] name[row]
if <ast.UnaryOp object at 0x7da20c6a9a20> begin[:]
variable[keyed_row] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[self].__schema.field_names, name[row]]]]]
variable[keyed_row] assign[=] call[name[self].__convert_row, parameter[name[keyed_row]]]
if call[name[self].__check_existing, parameter[name[keyed_row]]] begin[:]
for taget[name[wr]] in starred[call[name[self].__insert, parameter[]]] begin[:]
<ast.Yield object at 0x7da1b1082710>
variable[ret] assign[=] call[name[self].__update, parameter[name[keyed_row]]]
if compare[name[ret] is_not constant[None]] begin[:]
<ast.Yield object at 0x7da1b1082b30>
continue
call[name[self].__buffer.append, parameter[name[keyed_row]]]
if compare[call[name[len], parameter[name[self].__buffer]] greater[>] name[BUFFER_SIZE]] begin[:]
for taget[name[wr]] in starred[call[name[self].__insert, parameter[]]] begin[:]
<ast.Yield object at 0x7da20c76c7c0>
for taget[name[wr]] in starred[call[name[self].__insert, parameter[]]] begin[:]
<ast.Yield object at 0x7da20c76fd30> | keyword[def] identifier[write] ( identifier[self] , identifier[rows] , identifier[keyed] = keyword[False] ):
literal[string]
keyword[for] identifier[row] keyword[in] identifier[rows] :
identifier[keyed_row] = identifier[row]
keyword[if] keyword[not] identifier[keyed] :
identifier[keyed_row] = identifier[dict] ( identifier[zip] ( identifier[self] . identifier[__schema] . identifier[field_names] , identifier[row] ))
identifier[keyed_row] = identifier[self] . identifier[__convert_row] ( identifier[keyed_row] )
keyword[if] identifier[self] . identifier[__check_existing] ( identifier[keyed_row] ):
keyword[for] identifier[wr] keyword[in] identifier[self] . identifier[__insert] ():
keyword[yield] identifier[wr]
identifier[ret] = identifier[self] . identifier[__update] ( identifier[keyed_row] )
keyword[if] identifier[ret] keyword[is] keyword[not] keyword[None] :
keyword[yield] identifier[WrittenRow] ( identifier[keyed_row] , keyword[True] , identifier[ret] keyword[if] identifier[self] . identifier[__autoincrement] keyword[else] keyword[None] )
keyword[continue]
identifier[self] . identifier[__buffer] . identifier[append] ( identifier[keyed_row] )
keyword[if] identifier[len] ( identifier[self] . identifier[__buffer] )> identifier[BUFFER_SIZE] :
keyword[for] identifier[wr] keyword[in] identifier[self] . identifier[__insert] ():
keyword[yield] identifier[wr]
keyword[for] identifier[wr] keyword[in] identifier[self] . identifier[__insert] ():
keyword[yield] identifier[wr] | def write(self, rows, keyed=False):
"""Write rows/keyed_rows to table
"""
for row in rows:
keyed_row = row
if not keyed:
keyed_row = dict(zip(self.__schema.field_names, row)) # depends on [control=['if'], data=[]]
keyed_row = self.__convert_row(keyed_row)
if self.__check_existing(keyed_row):
for wr in self.__insert():
yield wr # depends on [control=['for'], data=['wr']]
ret = self.__update(keyed_row)
if ret is not None:
yield WrittenRow(keyed_row, True, ret if self.__autoincrement else None)
continue # depends on [control=['if'], data=['ret']] # depends on [control=['if'], data=[]]
self.__buffer.append(keyed_row)
if len(self.__buffer) > BUFFER_SIZE:
for wr in self.__insert():
yield wr # depends on [control=['for'], data=['wr']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']]
for wr in self.__insert():
yield wr # depends on [control=['for'], data=['wr']] |
def set_user_password(self, uid, mode='set_password', password=None):
"""Set user password and (modes)
:param uid: id number of user. see: get_names_uid()['name']
:param mode:
disable = disable user connections
enable = enable user connections
set_password = set or ensure password
test_password = test password is correct
:param password: max 16 char string
(optional when mode is [disable or enable])
:return:
True on success
when mode = test_password, return False on bad password
"""
mode_mask = {
'disable': 0,
'enable': 1,
'set_password': 2,
'test_password': 3
}
data = [uid, mode_mask[mode]]
if password:
password = str(password)
if 21 > len(password) > 16:
password = password.ljust(20, b'\x00')
data[0] |= 0b10000000
elif len(password) > 20:
raise Exception('password has limit of 20 chars')
else:
password = password.ljust(16, "\x00")
data.extend([ord(x) for x in password])
try:
self.xraw_command(netfn=0x06, command=0x47, data=data)
except exc.IpmiException as ie:
if mode == 'test_password':
return False
elif mode in ('enable', 'disable') and ie.ipmicode == 0xcc:
# Some BMCs see redundant calls to password disable/enable
# as invalid
return True
raise
return True | def function[set_user_password, parameter[self, uid, mode, password]]:
constant[Set user password and (modes)
:param uid: id number of user. see: get_names_uid()['name']
:param mode:
disable = disable user connections
enable = enable user connections
set_password = set or ensure password
test_password = test password is correct
:param password: max 16 char string
(optional when mode is [disable or enable])
:return:
True on success
when mode = test_password, return False on bad password
]
variable[mode_mask] assign[=] dictionary[[<ast.Constant object at 0x7da20e955120>, <ast.Constant object at 0x7da20e957280>, <ast.Constant object at 0x7da20e955cf0>, <ast.Constant object at 0x7da20e9540d0>], [<ast.Constant object at 0x7da20e955300>, <ast.Constant object at 0x7da20e956410>, <ast.Constant object at 0x7da20e954f10>, <ast.Constant object at 0x7da20e956890>]]
variable[data] assign[=] list[[<ast.Name object at 0x7da20e9564d0>, <ast.Subscript object at 0x7da20e955450>]]
if name[password] begin[:]
variable[password] assign[=] call[name[str], parameter[name[password]]]
if compare[constant[21] greater[>] call[name[len], parameter[name[password]]]] begin[:]
variable[password] assign[=] call[name[password].ljust, parameter[constant[20], constant[b'\x00']]]
<ast.AugAssign object at 0x7da20e957580>
call[name[data].extend, parameter[<ast.ListComp object at 0x7da20e954a60>]]
<ast.Try object at 0x7da20e955480>
return[constant[True]] | keyword[def] identifier[set_user_password] ( identifier[self] , identifier[uid] , identifier[mode] = literal[string] , identifier[password] = keyword[None] ):
literal[string]
identifier[mode_mask] ={
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int]
}
identifier[data] =[ identifier[uid] , identifier[mode_mask] [ identifier[mode] ]]
keyword[if] identifier[password] :
identifier[password] = identifier[str] ( identifier[password] )
keyword[if] literal[int] > identifier[len] ( identifier[password] )> literal[int] :
identifier[password] = identifier[password] . identifier[ljust] ( literal[int] , literal[string] )
identifier[data] [ literal[int] ]|= literal[int]
keyword[elif] identifier[len] ( identifier[password] )> literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[else] :
identifier[password] = identifier[password] . identifier[ljust] ( literal[int] , literal[string] )
identifier[data] . identifier[extend] ([ identifier[ord] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[password] ])
keyword[try] :
identifier[self] . identifier[xraw_command] ( identifier[netfn] = literal[int] , identifier[command] = literal[int] , identifier[data] = identifier[data] )
keyword[except] identifier[exc] . identifier[IpmiException] keyword[as] identifier[ie] :
keyword[if] identifier[mode] == literal[string] :
keyword[return] keyword[False]
keyword[elif] identifier[mode] keyword[in] ( literal[string] , literal[string] ) keyword[and] identifier[ie] . identifier[ipmicode] == literal[int] :
keyword[return] keyword[True]
keyword[raise]
keyword[return] keyword[True] | def set_user_password(self, uid, mode='set_password', password=None):
"""Set user password and (modes)
:param uid: id number of user. see: get_names_uid()['name']
:param mode:
disable = disable user connections
enable = enable user connections
set_password = set or ensure password
test_password = test password is correct
:param password: max 16 char string
(optional when mode is [disable or enable])
:return:
True on success
when mode = test_password, return False on bad password
"""
mode_mask = {'disable': 0, 'enable': 1, 'set_password': 2, 'test_password': 3}
data = [uid, mode_mask[mode]]
if password:
password = str(password)
if 21 > len(password) > 16:
password = password.ljust(20, b'\x00')
data[0] |= 128 # depends on [control=['if'], data=[]]
elif len(password) > 20:
raise Exception('password has limit of 20 chars') # depends on [control=['if'], data=[]]
else:
password = password.ljust(16, '\x00')
data.extend([ord(x) for x in password]) # depends on [control=['if'], data=[]]
try:
self.xraw_command(netfn=6, command=71, data=data) # depends on [control=['try'], data=[]]
except exc.IpmiException as ie:
if mode == 'test_password':
return False # depends on [control=['if'], data=[]]
elif mode in ('enable', 'disable') and ie.ipmicode == 204:
# Some BMCs see redundant calls to password disable/enable
# as invalid
return True # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=['ie']]
return True |
def monochrome(clr):
"""
Returns colors in the same hue with varying brightness/saturation.
"""
def _wrap(x, min, threshold, plus):
if x - min < threshold:
return x + plus
else:
return x - min
colors = colorlist(clr)
c = clr.copy()
c.brightness = _wrap(clr.brightness, 0.5, 0.2, 0.3)
c.saturation = _wrap(clr.saturation, 0.3, 0.1, 0.3)
colors.append(c)
c = clr.copy()
c.brightness = _wrap(clr.brightness, 0.2, 0.2, 0.6)
colors.append(c)
c = clr.copy()
c.brightness = max(0.2, clr.brightness + (1 - clr.brightness) * 0.2)
c.saturation = _wrap(clr.saturation, 0.3, 0.1, 0.3)
colors.append(c)
c = clr.copy()
c.brightness = _wrap(clr.brightness, 0.5, 0.2, 0.3)
colors.append(c)
return colors | def function[monochrome, parameter[clr]]:
constant[
Returns colors in the same hue with varying brightness/saturation.
]
def function[_wrap, parameter[x, min, threshold, plus]]:
if compare[binary_operation[name[x] - name[min]] less[<] name[threshold]] begin[:]
return[binary_operation[name[x] + name[plus]]]
variable[colors] assign[=] call[name[colorlist], parameter[name[clr]]]
variable[c] assign[=] call[name[clr].copy, parameter[]]
name[c].brightness assign[=] call[name[_wrap], parameter[name[clr].brightness, constant[0.5], constant[0.2], constant[0.3]]]
name[c].saturation assign[=] call[name[_wrap], parameter[name[clr].saturation, constant[0.3], constant[0.1], constant[0.3]]]
call[name[colors].append, parameter[name[c]]]
variable[c] assign[=] call[name[clr].copy, parameter[]]
name[c].brightness assign[=] call[name[_wrap], parameter[name[clr].brightness, constant[0.2], constant[0.2], constant[0.6]]]
call[name[colors].append, parameter[name[c]]]
variable[c] assign[=] call[name[clr].copy, parameter[]]
name[c].brightness assign[=] call[name[max], parameter[constant[0.2], binary_operation[name[clr].brightness + binary_operation[binary_operation[constant[1] - name[clr].brightness] * constant[0.2]]]]]
name[c].saturation assign[=] call[name[_wrap], parameter[name[clr].saturation, constant[0.3], constant[0.1], constant[0.3]]]
call[name[colors].append, parameter[name[c]]]
variable[c] assign[=] call[name[clr].copy, parameter[]]
name[c].brightness assign[=] call[name[_wrap], parameter[name[clr].brightness, constant[0.5], constant[0.2], constant[0.3]]]
call[name[colors].append, parameter[name[c]]]
return[name[colors]] | keyword[def] identifier[monochrome] ( identifier[clr] ):
literal[string]
keyword[def] identifier[_wrap] ( identifier[x] , identifier[min] , identifier[threshold] , identifier[plus] ):
keyword[if] identifier[x] - identifier[min] < identifier[threshold] :
keyword[return] identifier[x] + identifier[plus]
keyword[else] :
keyword[return] identifier[x] - identifier[min]
identifier[colors] = identifier[colorlist] ( identifier[clr] )
identifier[c] = identifier[clr] . identifier[copy] ()
identifier[c] . identifier[brightness] = identifier[_wrap] ( identifier[clr] . identifier[brightness] , literal[int] , literal[int] , literal[int] )
identifier[c] . identifier[saturation] = identifier[_wrap] ( identifier[clr] . identifier[saturation] , literal[int] , literal[int] , literal[int] )
identifier[colors] . identifier[append] ( identifier[c] )
identifier[c] = identifier[clr] . identifier[copy] ()
identifier[c] . identifier[brightness] = identifier[_wrap] ( identifier[clr] . identifier[brightness] , literal[int] , literal[int] , literal[int] )
identifier[colors] . identifier[append] ( identifier[c] )
identifier[c] = identifier[clr] . identifier[copy] ()
identifier[c] . identifier[brightness] = identifier[max] ( literal[int] , identifier[clr] . identifier[brightness] +( literal[int] - identifier[clr] . identifier[brightness] )* literal[int] )
identifier[c] . identifier[saturation] = identifier[_wrap] ( identifier[clr] . identifier[saturation] , literal[int] , literal[int] , literal[int] )
identifier[colors] . identifier[append] ( identifier[c] )
identifier[c] = identifier[clr] . identifier[copy] ()
identifier[c] . identifier[brightness] = identifier[_wrap] ( identifier[clr] . identifier[brightness] , literal[int] , literal[int] , literal[int] )
identifier[colors] . identifier[append] ( identifier[c] )
keyword[return] identifier[colors] | def monochrome(clr):
"""
Returns colors in the same hue with varying brightness/saturation.
"""
def _wrap(x, min, threshold, plus):
if x - min < threshold:
return x + plus # depends on [control=['if'], data=[]]
else:
return x - min
colors = colorlist(clr)
c = clr.copy()
c.brightness = _wrap(clr.brightness, 0.5, 0.2, 0.3)
c.saturation = _wrap(clr.saturation, 0.3, 0.1, 0.3)
colors.append(c)
c = clr.copy()
c.brightness = _wrap(clr.brightness, 0.2, 0.2, 0.6)
colors.append(c)
c = clr.copy()
c.brightness = max(0.2, clr.brightness + (1 - clr.brightness) * 0.2)
c.saturation = _wrap(clr.saturation, 0.3, 0.1, 0.3)
colors.append(c)
c = clr.copy()
c.brightness = _wrap(clr.brightness, 0.5, 0.2, 0.3)
colors.append(c)
return colors |
def make_sqlite_url(filename: str) -> str:
"""
Makes an SQLAlchemy URL for a SQLite database.
"""
absfile = os.path.abspath(filename)
return "sqlite://{host}/{path}".format(host="", path=absfile) | def function[make_sqlite_url, parameter[filename]]:
constant[
Makes an SQLAlchemy URL for a SQLite database.
]
variable[absfile] assign[=] call[name[os].path.abspath, parameter[name[filename]]]
return[call[constant[sqlite://{host}/{path}].format, parameter[]]] | keyword[def] identifier[make_sqlite_url] ( identifier[filename] : identifier[str] )-> identifier[str] :
literal[string]
identifier[absfile] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[filename] )
keyword[return] literal[string] . identifier[format] ( identifier[host] = literal[string] , identifier[path] = identifier[absfile] ) | def make_sqlite_url(filename: str) -> str:
"""
Makes an SQLAlchemy URL for a SQLite database.
"""
absfile = os.path.abspath(filename)
return 'sqlite://{host}/{path}'.format(host='', path=absfile) |
def gmeta_pop(gmeta, info=False):
"""Remove GMeta wrapping from a Globus Search result.
This function can be called on the raw GlobusHTTPResponse that Search returns,
or a string or dictionary representation of it.
Arguments:
gmeta (dict, str, or GlobusHTTPResponse): The Globus Search result to unwrap.
info (bool): If ``False``, will return a list of the results
and discard the metadata. If ``True``, will return a tuple containing
the results list, and other information about the query.
**Default**: ``False``.
Returns:
list (if ``info=False``): The unwrapped results.
tuple (if ``info=True``): The unwrapped results, and a dictionary of query information.
"""
if type(gmeta) is GlobusHTTPResponse:
gmeta = json.loads(gmeta.text)
elif type(gmeta) is str:
gmeta = json.loads(gmeta)
elif type(gmeta) is not dict:
raise TypeError("gmeta must be dict, GlobusHTTPResponse, or JSON string")
results = []
for res in gmeta["gmeta"]:
for con in res["content"]:
results.append(con)
if info:
fyi = {
"total_query_matches": gmeta.get("total")
}
return results, fyi
else:
return results | def function[gmeta_pop, parameter[gmeta, info]]:
constant[Remove GMeta wrapping from a Globus Search result.
This function can be called on the raw GlobusHTTPResponse that Search returns,
or a string or dictionary representation of it.
Arguments:
gmeta (dict, str, or GlobusHTTPResponse): The Globus Search result to unwrap.
info (bool): If ``False``, will return a list of the results
and discard the metadata. If ``True``, will return a tuple containing
the results list, and other information about the query.
**Default**: ``False``.
Returns:
list (if ``info=False``): The unwrapped results.
tuple (if ``info=True``): The unwrapped results, and a dictionary of query information.
]
if compare[call[name[type], parameter[name[gmeta]]] is name[GlobusHTTPResponse]] begin[:]
variable[gmeta] assign[=] call[name[json].loads, parameter[name[gmeta].text]]
variable[results] assign[=] list[[]]
for taget[name[res]] in starred[call[name[gmeta]][constant[gmeta]]] begin[:]
for taget[name[con]] in starred[call[name[res]][constant[content]]] begin[:]
call[name[results].append, parameter[name[con]]]
if name[info] begin[:]
variable[fyi] assign[=] dictionary[[<ast.Constant object at 0x7da1b23c2080>], [<ast.Call object at 0x7da1b23c3460>]]
return[tuple[[<ast.Name object at 0x7da1b23c2890>, <ast.Name object at 0x7da1b23c0a60>]]] | keyword[def] identifier[gmeta_pop] ( identifier[gmeta] , identifier[info] = keyword[False] ):
literal[string]
keyword[if] identifier[type] ( identifier[gmeta] ) keyword[is] identifier[GlobusHTTPResponse] :
identifier[gmeta] = identifier[json] . identifier[loads] ( identifier[gmeta] . identifier[text] )
keyword[elif] identifier[type] ( identifier[gmeta] ) keyword[is] identifier[str] :
identifier[gmeta] = identifier[json] . identifier[loads] ( identifier[gmeta] )
keyword[elif] identifier[type] ( identifier[gmeta] ) keyword[is] keyword[not] identifier[dict] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[results] =[]
keyword[for] identifier[res] keyword[in] identifier[gmeta] [ literal[string] ]:
keyword[for] identifier[con] keyword[in] identifier[res] [ literal[string] ]:
identifier[results] . identifier[append] ( identifier[con] )
keyword[if] identifier[info] :
identifier[fyi] ={
literal[string] : identifier[gmeta] . identifier[get] ( literal[string] )
}
keyword[return] identifier[results] , identifier[fyi]
keyword[else] :
keyword[return] identifier[results] | def gmeta_pop(gmeta, info=False):
"""Remove GMeta wrapping from a Globus Search result.
This function can be called on the raw GlobusHTTPResponse that Search returns,
or a string or dictionary representation of it.
Arguments:
gmeta (dict, str, or GlobusHTTPResponse): The Globus Search result to unwrap.
info (bool): If ``False``, will return a list of the results
and discard the metadata. If ``True``, will return a tuple containing
the results list, and other information about the query.
**Default**: ``False``.
Returns:
list (if ``info=False``): The unwrapped results.
tuple (if ``info=True``): The unwrapped results, and a dictionary of query information.
"""
if type(gmeta) is GlobusHTTPResponse:
gmeta = json.loads(gmeta.text) # depends on [control=['if'], data=[]]
elif type(gmeta) is str:
gmeta = json.loads(gmeta) # depends on [control=['if'], data=[]]
elif type(gmeta) is not dict:
raise TypeError('gmeta must be dict, GlobusHTTPResponse, or JSON string') # depends on [control=['if'], data=[]]
results = []
for res in gmeta['gmeta']:
for con in res['content']:
results.append(con) # depends on [control=['for'], data=['con']] # depends on [control=['for'], data=['res']]
if info:
fyi = {'total_query_matches': gmeta.get('total')}
return (results, fyi) # depends on [control=['if'], data=[]]
else:
return results |
def add_labels(self, objects, count=1):
"""Add multiple labels to the sheet.
Parameters
----------
objects: iterable
An iterable of the objects to add. Each of these will be passed to
the add_label method. Note that if this is a generator it will be
consumed.
count: positive integer or iterable of positive integers, default 1
The number of copies of each label to add. If a single integer,
that many copies of every label are added. If an iterable, then
each value specifies how many copies of the corresponding label to
add. The iterables are advanced in parallel until one is exhausted;
extra values in the other one are ignored. This means that if there
are fewer count entries than objects, the objects corresponding to
the missing counts will not be added to the sheet.
Note that if this is a generator it will be consumed. Also note
that the drawing function will only be called once for each label
and the results copied for the repeats. If the drawing function
maintains any state internally then using this parameter may break
it.
"""
# If we can convert it to an int, do so and use the itertools.repeat()
# method to create an infinite iterator from it. Otherwise, assume it
# is an iterable or sequence.
try:
count = int(count)
except TypeError:
pass
else:
count = repeat(count)
# If it is not an iterable (e.g., a list or range object),
# create an iterator over it.
if not hasattr(count, 'next') and not hasattr(count, '__next__'):
count = iter(count)
# Go through the objects.
for obj in objects:
# Check we have a count for this one.
try:
thiscount = next(count)
except StopIteration:
break
# Draw it.
self._draw_label(obj, thiscount) | def function[add_labels, parameter[self, objects, count]]:
constant[Add multiple labels to the sheet.
Parameters
----------
objects: iterable
An iterable of the objects to add. Each of these will be passed to
the add_label method. Note that if this is a generator it will be
consumed.
count: positive integer or iterable of positive integers, default 1
The number of copies of each label to add. If a single integer,
that many copies of every label are added. If an iterable, then
each value specifies how many copies of the corresponding label to
add. The iterables are advanced in parallel until one is exhausted;
extra values in the other one are ignored. This means that if there
are fewer count entries than objects, the objects corresponding to
the missing counts will not be added to the sheet.
Note that if this is a generator it will be consumed. Also note
that the drawing function will only be called once for each label
and the results copied for the repeats. If the drawing function
maintains any state internally then using this parameter may break
it.
]
<ast.Try object at 0x7da204962da0>
if <ast.BoolOp object at 0x7da1b031e4a0> begin[:]
variable[count] assign[=] call[name[iter], parameter[name[count]]]
for taget[name[obj]] in starred[name[objects]] begin[:]
<ast.Try object at 0x7da18eb56cb0>
call[name[self]._draw_label, parameter[name[obj], name[thiscount]]] | keyword[def] identifier[add_labels] ( identifier[self] , identifier[objects] , identifier[count] = literal[int] ):
literal[string]
keyword[try] :
identifier[count] = identifier[int] ( identifier[count] )
keyword[except] identifier[TypeError] :
keyword[pass]
keyword[else] :
identifier[count] = identifier[repeat] ( identifier[count] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[count] , literal[string] ) keyword[and] keyword[not] identifier[hasattr] ( identifier[count] , literal[string] ):
identifier[count] = identifier[iter] ( identifier[count] )
keyword[for] identifier[obj] keyword[in] identifier[objects] :
keyword[try] :
identifier[thiscount] = identifier[next] ( identifier[count] )
keyword[except] identifier[StopIteration] :
keyword[break]
identifier[self] . identifier[_draw_label] ( identifier[obj] , identifier[thiscount] ) | def add_labels(self, objects, count=1):
"""Add multiple labels to the sheet.
Parameters
----------
objects: iterable
An iterable of the objects to add. Each of these will be passed to
the add_label method. Note that if this is a generator it will be
consumed.
count: positive integer or iterable of positive integers, default 1
The number of copies of each label to add. If a single integer,
that many copies of every label are added. If an iterable, then
each value specifies how many copies of the corresponding label to
add. The iterables are advanced in parallel until one is exhausted;
extra values in the other one are ignored. This means that if there
are fewer count entries than objects, the objects corresponding to
the missing counts will not be added to the sheet.
Note that if this is a generator it will be consumed. Also note
that the drawing function will only be called once for each label
and the results copied for the repeats. If the drawing function
maintains any state internally then using this parameter may break
it.
"""
# If we can convert it to an int, do so and use the itertools.repeat()
# method to create an infinite iterator from it. Otherwise, assume it
# is an iterable or sequence.
try:
count = int(count) # depends on [control=['try'], data=[]]
except TypeError:
pass # depends on [control=['except'], data=[]]
else:
count = repeat(count)
# If it is not an iterable (e.g., a list or range object),
# create an iterator over it.
if not hasattr(count, 'next') and (not hasattr(count, '__next__')):
count = iter(count) # depends on [control=['if'], data=[]]
# Go through the objects.
for obj in objects:
# Check we have a count for this one.
try:
thiscount = next(count) # depends on [control=['try'], data=[]]
except StopIteration:
break # depends on [control=['except'], data=[]]
# Draw it.
self._draw_label(obj, thiscount) # depends on [control=['for'], data=['obj']] |
def _tree_to_labels(X, single_linkage_tree, min_cluster_size=10,
cluster_selection_method='eom',
allow_single_cluster=False,
match_reference_implementation=False):
"""Converts a pretrained tree and cluster size into a
set of labels and probabilities.
"""
condensed_tree = condense_tree(single_linkage_tree,
min_cluster_size)
stability_dict = compute_stability(condensed_tree)
labels, probabilities, stabilities = get_clusters(condensed_tree,
stability_dict,
cluster_selection_method,
allow_single_cluster,
match_reference_implementation)
return (labels, probabilities, stabilities, condensed_tree,
single_linkage_tree) | def function[_tree_to_labels, parameter[X, single_linkage_tree, min_cluster_size, cluster_selection_method, allow_single_cluster, match_reference_implementation]]:
constant[Converts a pretrained tree and cluster size into a
set of labels and probabilities.
]
variable[condensed_tree] assign[=] call[name[condense_tree], parameter[name[single_linkage_tree], name[min_cluster_size]]]
variable[stability_dict] assign[=] call[name[compute_stability], parameter[name[condensed_tree]]]
<ast.Tuple object at 0x7da1b1d65ab0> assign[=] call[name[get_clusters], parameter[name[condensed_tree], name[stability_dict], name[cluster_selection_method], name[allow_single_cluster], name[match_reference_implementation]]]
return[tuple[[<ast.Name object at 0x7da18c4ce7a0>, <ast.Name object at 0x7da18c4cdf60>, <ast.Name object at 0x7da18c4cc2b0>, <ast.Name object at 0x7da18c4ce9b0>, <ast.Name object at 0x7da18c4ce710>]]] | keyword[def] identifier[_tree_to_labels] ( identifier[X] , identifier[single_linkage_tree] , identifier[min_cluster_size] = literal[int] ,
identifier[cluster_selection_method] = literal[string] ,
identifier[allow_single_cluster] = keyword[False] ,
identifier[match_reference_implementation] = keyword[False] ):
literal[string]
identifier[condensed_tree] = identifier[condense_tree] ( identifier[single_linkage_tree] ,
identifier[min_cluster_size] )
identifier[stability_dict] = identifier[compute_stability] ( identifier[condensed_tree] )
identifier[labels] , identifier[probabilities] , identifier[stabilities] = identifier[get_clusters] ( identifier[condensed_tree] ,
identifier[stability_dict] ,
identifier[cluster_selection_method] ,
identifier[allow_single_cluster] ,
identifier[match_reference_implementation] )
keyword[return] ( identifier[labels] , identifier[probabilities] , identifier[stabilities] , identifier[condensed_tree] ,
identifier[single_linkage_tree] ) | def _tree_to_labels(X, single_linkage_tree, min_cluster_size=10, cluster_selection_method='eom', allow_single_cluster=False, match_reference_implementation=False):
"""Converts a pretrained tree and cluster size into a
set of labels and probabilities.
"""
condensed_tree = condense_tree(single_linkage_tree, min_cluster_size)
stability_dict = compute_stability(condensed_tree)
(labels, probabilities, stabilities) = get_clusters(condensed_tree, stability_dict, cluster_selection_method, allow_single_cluster, match_reference_implementation)
return (labels, probabilities, stabilities, condensed_tree, single_linkage_tree) |
def substitute_str_in_file(i):
"""
Input: {
filename - file
string1 - string to be replaced
string2 - replace string
}
Output: {
return - return code = 0, if successful
= 16, if file not found
> 0, if error
(error) - error text if return > 0
}
"""
fn=i['filename']
s1=i['string1']
s2=i['string2']
# Load text file (unicode)
r=load_text_file({'text_file':fn})
if r['return']>0: return r
# Replace
x=r['string']
x=x.replace(s1,s2)
# Save text file (unicode)
r=save_text_file({'text_file':fn, 'string':x})
if r['return']>0: return r
return {'return':0} | def function[substitute_str_in_file, parameter[i]]:
constant[
Input: {
filename - file
string1 - string to be replaced
string2 - replace string
}
Output: {
return - return code = 0, if successful
= 16, if file not found
> 0, if error
(error) - error text if return > 0
}
]
variable[fn] assign[=] call[name[i]][constant[filename]]
variable[s1] assign[=] call[name[i]][constant[string1]]
variable[s2] assign[=] call[name[i]][constant[string2]]
variable[r] assign[=] call[name[load_text_file], parameter[dictionary[[<ast.Constant object at 0x7da1b23ef880>], [<ast.Name object at 0x7da1b23ef010>]]]]
if compare[call[name[r]][constant[return]] greater[>] constant[0]] begin[:]
return[name[r]]
variable[x] assign[=] call[name[r]][constant[string]]
variable[x] assign[=] call[name[x].replace, parameter[name[s1], name[s2]]]
variable[r] assign[=] call[name[save_text_file], parameter[dictionary[[<ast.Constant object at 0x7da1b2270e80>, <ast.Constant object at 0x7da1b2273100>], [<ast.Name object at 0x7da1b22716f0>, <ast.Name object at 0x7da1b22710f0>]]]]
if compare[call[name[r]][constant[return]] greater[>] constant[0]] begin[:]
return[name[r]]
return[dictionary[[<ast.Constant object at 0x7da1b2270ac0>], [<ast.Constant object at 0x7da1b2272e60>]]] | keyword[def] identifier[substitute_str_in_file] ( identifier[i] ):
literal[string]
identifier[fn] = identifier[i] [ literal[string] ]
identifier[s1] = identifier[i] [ literal[string] ]
identifier[s2] = identifier[i] [ literal[string] ]
identifier[r] = identifier[load_text_file] ({ literal[string] : identifier[fn] })
keyword[if] identifier[r] [ literal[string] ]> literal[int] : keyword[return] identifier[r]
identifier[x] = identifier[r] [ literal[string] ]
identifier[x] = identifier[x] . identifier[replace] ( identifier[s1] , identifier[s2] )
identifier[r] = identifier[save_text_file] ({ literal[string] : identifier[fn] , literal[string] : identifier[x] })
keyword[if] identifier[r] [ literal[string] ]> literal[int] : keyword[return] identifier[r]
keyword[return] { literal[string] : literal[int] } | def substitute_str_in_file(i):
"""
Input: {
filename - file
string1 - string to be replaced
string2 - replace string
}
Output: {
return - return code = 0, if successful
= 16, if file not found
> 0, if error
(error) - error text if return > 0
}
"""
fn = i['filename']
s1 = i['string1']
s2 = i['string2']
# Load text file (unicode)
r = load_text_file({'text_file': fn})
if r['return'] > 0:
return r # depends on [control=['if'], data=[]]
# Replace
x = r['string']
x = x.replace(s1, s2)
# Save text file (unicode)
r = save_text_file({'text_file': fn, 'string': x})
if r['return'] > 0:
return r # depends on [control=['if'], data=[]]
return {'return': 0} |
def volume(self):
"""
Mesh volume
Returns
-------
volume : float
Total volume of the mesh.
"""
sizes = self.compute_cell_sizes(length=False, area=False, volume=True)
return np.sum(sizes.cell_arrays['Volume']) | def function[volume, parameter[self]]:
constant[
Mesh volume
Returns
-------
volume : float
Total volume of the mesh.
]
variable[sizes] assign[=] call[name[self].compute_cell_sizes, parameter[]]
return[call[name[np].sum, parameter[call[name[sizes].cell_arrays][constant[Volume]]]]] | keyword[def] identifier[volume] ( identifier[self] ):
literal[string]
identifier[sizes] = identifier[self] . identifier[compute_cell_sizes] ( identifier[length] = keyword[False] , identifier[area] = keyword[False] , identifier[volume] = keyword[True] )
keyword[return] identifier[np] . identifier[sum] ( identifier[sizes] . identifier[cell_arrays] [ literal[string] ]) | def volume(self):
"""
Mesh volume
Returns
-------
volume : float
Total volume of the mesh.
"""
sizes = self.compute_cell_sizes(length=False, area=False, volume=True)
return np.sum(sizes.cell_arrays['Volume']) |
def strip_leading_comments(text):
"""Strips the leading whitespaces and % from the given text.
Adapted from textwrap.dedent
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text | def function[strip_leading_comments, parameter[text]]:
constant[Strips the leading whitespaces and % from the given text.
Adapted from textwrap.dedent
]
variable[margin] assign[=] constant[None]
variable[text] assign[=] call[name[_whitespace_only_re].sub, parameter[constant[], name[text]]]
variable[indents] assign[=] call[name[_leading_whitespace_re].findall, parameter[name[text]]]
for taget[name[indent]] in starred[name[indents]] begin[:]
if compare[name[margin] is constant[None]] begin[:]
variable[margin] assign[=] name[indent]
if <ast.BoolOp object at 0x7da2046231c0> begin[:]
for taget[name[line]] in starred[call[name[text].split, parameter[constant[
]]]] begin[:]
assert[<ast.BoolOp object at 0x7da2046200a0>]
if name[margin] begin[:]
variable[text] assign[=] call[name[re].sub, parameter[binary_operation[constant[(?m)^] + name[margin]], constant[], name[text]]]
return[name[text]] | keyword[def] identifier[strip_leading_comments] ( identifier[text] ):
literal[string]
identifier[margin] = keyword[None]
identifier[text] = identifier[_whitespace_only_re] . identifier[sub] ( literal[string] , identifier[text] )
identifier[indents] = identifier[_leading_whitespace_re] . identifier[findall] ( identifier[text] )
keyword[for] identifier[indent] keyword[in] identifier[indents] :
keyword[if] identifier[margin] keyword[is] keyword[None] :
identifier[margin] = identifier[indent]
keyword[elif] identifier[indent] . identifier[startswith] ( identifier[margin] ):
keyword[pass]
keyword[elif] identifier[margin] . identifier[startswith] ( identifier[indent] ):
identifier[margin] = identifier[indent]
keyword[else] :
identifier[margin] = literal[string]
keyword[break]
keyword[if] literal[int] keyword[and] identifier[margin] :
keyword[for] identifier[line] keyword[in] identifier[text] . identifier[split] ( literal[string] ):
keyword[assert] keyword[not] identifier[line] keyword[or] identifier[line] . identifier[startswith] ( identifier[margin] ), literal[string] %( identifier[line] , identifier[margin] )
keyword[if] identifier[margin] :
identifier[text] = identifier[re] . identifier[sub] ( literal[string] + identifier[margin] , literal[string] , identifier[text] )
keyword[return] identifier[text] | def strip_leading_comments(text):
"""Strips the leading whitespaces and % from the given text.
Adapted from textwrap.dedent
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent # depends on [control=['if'], data=['margin']]
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass # depends on [control=['if'], data=[]]
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent # depends on [control=['if'], data=[]]
else:
# Current line and previous winner have no common whitespace:
# there is no margin.
margin = ''
break # depends on [control=['for'], data=['indent']]
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split('\n'):
assert not line or line.startswith(margin), 'line = %r, margin = %r' % (line, margin) # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]]
if margin:
text = re.sub('(?m)^' + margin, '', text) # depends on [control=['if'], data=[]]
return text |
def get_system_path():
"""Return the path that Windows will search for dlls."""
_bpath = []
if is_win:
try:
import win32api
except ImportError:
logger.warn("Cannot determine your Windows or System directories")
logger.warn("Please add them to your PATH if .dlls are not found")
logger.warn("or install http://sourceforge.net/projects/pywin32/")
else:
sysdir = win32api.GetSystemDirectory()
sysdir2 = os.path.normpath(os.path.join(sysdir, '..', 'SYSTEM'))
windir = win32api.GetWindowsDirectory()
_bpath = [sysdir, sysdir2, windir]
_bpath.extend(compat.getenv('PATH', '').split(os.pathsep))
return _bpath | def function[get_system_path, parameter[]]:
constant[Return the path that Windows will search for dlls.]
variable[_bpath] assign[=] list[[]]
if name[is_win] begin[:]
<ast.Try object at 0x7da18f7239d0>
call[name[_bpath].extend, parameter[call[call[name[compat].getenv, parameter[constant[PATH], constant[]]].split, parameter[name[os].pathsep]]]]
return[name[_bpath]] | keyword[def] identifier[get_system_path] ():
literal[string]
identifier[_bpath] =[]
keyword[if] identifier[is_win] :
keyword[try] :
keyword[import] identifier[win32api]
keyword[except] identifier[ImportError] :
identifier[logger] . identifier[warn] ( literal[string] )
identifier[logger] . identifier[warn] ( literal[string] )
identifier[logger] . identifier[warn] ( literal[string] )
keyword[else] :
identifier[sysdir] = identifier[win32api] . identifier[GetSystemDirectory] ()
identifier[sysdir2] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[sysdir] , literal[string] , literal[string] ))
identifier[windir] = identifier[win32api] . identifier[GetWindowsDirectory] ()
identifier[_bpath] =[ identifier[sysdir] , identifier[sysdir2] , identifier[windir] ]
identifier[_bpath] . identifier[extend] ( identifier[compat] . identifier[getenv] ( literal[string] , literal[string] ). identifier[split] ( identifier[os] . identifier[pathsep] ))
keyword[return] identifier[_bpath] | def get_system_path():
"""Return the path that Windows will search for dlls."""
_bpath = []
if is_win:
try:
import win32api # depends on [control=['try'], data=[]]
except ImportError:
logger.warn('Cannot determine your Windows or System directories')
logger.warn('Please add them to your PATH if .dlls are not found')
logger.warn('or install http://sourceforge.net/projects/pywin32/') # depends on [control=['except'], data=[]]
else:
sysdir = win32api.GetSystemDirectory()
sysdir2 = os.path.normpath(os.path.join(sysdir, '..', 'SYSTEM'))
windir = win32api.GetWindowsDirectory()
_bpath = [sysdir, sysdir2, windir] # depends on [control=['if'], data=[]]
_bpath.extend(compat.getenv('PATH', '').split(os.pathsep))
return _bpath |
def render_cvmfs_sc(cvmfs_volume):
"""Render REANA_CVMFS_SC_TEMPLATE."""
name = CVMFS_REPOSITORIES[cvmfs_volume]
rendered_template = dict(REANA_CVMFS_SC_TEMPLATE)
rendered_template['metadata']['name'] = "csi-cvmfs-{}".format(name)
rendered_template['parameters']['repository'] = cvmfs_volume
return rendered_template | def function[render_cvmfs_sc, parameter[cvmfs_volume]]:
constant[Render REANA_CVMFS_SC_TEMPLATE.]
variable[name] assign[=] call[name[CVMFS_REPOSITORIES]][name[cvmfs_volume]]
variable[rendered_template] assign[=] call[name[dict], parameter[name[REANA_CVMFS_SC_TEMPLATE]]]
call[call[name[rendered_template]][constant[metadata]]][constant[name]] assign[=] call[constant[csi-cvmfs-{}].format, parameter[name[name]]]
call[call[name[rendered_template]][constant[parameters]]][constant[repository]] assign[=] name[cvmfs_volume]
return[name[rendered_template]] | keyword[def] identifier[render_cvmfs_sc] ( identifier[cvmfs_volume] ):
literal[string]
identifier[name] = identifier[CVMFS_REPOSITORIES] [ identifier[cvmfs_volume] ]
identifier[rendered_template] = identifier[dict] ( identifier[REANA_CVMFS_SC_TEMPLATE] )
identifier[rendered_template] [ literal[string] ][ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[rendered_template] [ literal[string] ][ literal[string] ]= identifier[cvmfs_volume]
keyword[return] identifier[rendered_template] | def render_cvmfs_sc(cvmfs_volume):
"""Render REANA_CVMFS_SC_TEMPLATE."""
name = CVMFS_REPOSITORIES[cvmfs_volume]
rendered_template = dict(REANA_CVMFS_SC_TEMPLATE)
rendered_template['metadata']['name'] = 'csi-cvmfs-{}'.format(name)
rendered_template['parameters']['repository'] = cvmfs_volume
return rendered_template |
def _set_object_view(self, session):
"""Sets the underlying object views to match current view"""
for obj_name in self._object_views:
if self._object_views[obj_name] == PLENARY:
try:
getattr(session, 'use_plenary_' + obj_name + '_view')()
except AttributeError:
pass
else:
try:
getattr(session, 'use_comparative_' + obj_name + '_view')()
except AttributeError:
pass | def function[_set_object_view, parameter[self, session]]:
constant[Sets the underlying object views to match current view]
for taget[name[obj_name]] in starred[name[self]._object_views] begin[:]
if compare[call[name[self]._object_views][name[obj_name]] equal[==] name[PLENARY]] begin[:]
<ast.Try object at 0x7da20e9550f0> | keyword[def] identifier[_set_object_view] ( identifier[self] , identifier[session] ):
literal[string]
keyword[for] identifier[obj_name] keyword[in] identifier[self] . identifier[_object_views] :
keyword[if] identifier[self] . identifier[_object_views] [ identifier[obj_name] ]== identifier[PLENARY] :
keyword[try] :
identifier[getattr] ( identifier[session] , literal[string] + identifier[obj_name] + literal[string] )()
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[else] :
keyword[try] :
identifier[getattr] ( identifier[session] , literal[string] + identifier[obj_name] + literal[string] )()
keyword[except] identifier[AttributeError] :
keyword[pass] | def _set_object_view(self, session):
"""Sets the underlying object views to match current view"""
for obj_name in self._object_views:
if self._object_views[obj_name] == PLENARY:
try:
getattr(session, 'use_plenary_' + obj_name + '_view')() # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
try:
getattr(session, 'use_comparative_' + obj_name + '_view')() # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['obj_name']] |
def _call(callback, args=[], kwargs={}):
"""
Calls a callback with optional args and keyword args lists. This method exists so
we can inspect the `_max_calls` attribute that's set by `_on`. If this value is None,
the callback is considered to have no limit. Otherwise, an integer value is expected
and decremented until there are no remaining calls
"""
if not hasattr(callback, '_max_calls'):
callback._max_calls = None
# None implies no callback limit
if callback._max_calls is None:
return _call_partial(callback, *args, **kwargs)
# Should the signal be disconnected?
if callback._max_calls <= 0:
return disconnect(callback)
callback._max_calls -= 1
return _call_partial(callback, *args, **kwargs) | def function[_call, parameter[callback, args, kwargs]]:
constant[
Calls a callback with optional args and keyword args lists. This method exists so
we can inspect the `_max_calls` attribute that's set by `_on`. If this value is None,
the callback is considered to have no limit. Otherwise, an integer value is expected
and decremented until there are no remaining calls
]
if <ast.UnaryOp object at 0x7da204347e50> begin[:]
name[callback]._max_calls assign[=] constant[None]
if compare[name[callback]._max_calls is constant[None]] begin[:]
return[call[name[_call_partial], parameter[name[callback], <ast.Starred object at 0x7da204346170>]]]
if compare[name[callback]._max_calls less_or_equal[<=] constant[0]] begin[:]
return[call[name[disconnect], parameter[name[callback]]]]
<ast.AugAssign object at 0x7da204347940>
return[call[name[_call_partial], parameter[name[callback], <ast.Starred object at 0x7da2043467a0>]]] | keyword[def] identifier[_call] ( identifier[callback] , identifier[args] =[], identifier[kwargs] ={}):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[callback] , literal[string] ):
identifier[callback] . identifier[_max_calls] = keyword[None]
keyword[if] identifier[callback] . identifier[_max_calls] keyword[is] keyword[None] :
keyword[return] identifier[_call_partial] ( identifier[callback] ,* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[callback] . identifier[_max_calls] <= literal[int] :
keyword[return] identifier[disconnect] ( identifier[callback] )
identifier[callback] . identifier[_max_calls] -= literal[int]
keyword[return] identifier[_call_partial] ( identifier[callback] ,* identifier[args] ,** identifier[kwargs] ) | def _call(callback, args=[], kwargs={}):
"""
Calls a callback with optional args and keyword args lists. This method exists so
we can inspect the `_max_calls` attribute that's set by `_on`. If this value is None,
the callback is considered to have no limit. Otherwise, an integer value is expected
and decremented until there are no remaining calls
"""
if not hasattr(callback, '_max_calls'):
callback._max_calls = None # depends on [control=['if'], data=[]]
# None implies no callback limit
if callback._max_calls is None:
return _call_partial(callback, *args, **kwargs) # depends on [control=['if'], data=[]]
# Should the signal be disconnected?
if callback._max_calls <= 0:
return disconnect(callback) # depends on [control=['if'], data=[]]
callback._max_calls -= 1
return _call_partial(callback, *args, **kwargs) |
def kruskal(dv=None, between=None, data=None, detailed=False,
export_filename=None):
"""Kruskal-Wallis H-test for independent samples.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'H' : The Kruskal-Wallis H statistic, corrected for ties
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes.
Due to the assumption that H has a chi square distribution, the number of
samples in each group must not be too small. A typical rule is that each
sample must have at least 5 measurements.
NaN values are automatically removed.
Examples
--------
Compute the Kruskal-Wallis H-test for independent samples.
>>> from pingouin import kruskal, read_dataset
>>> df = read_dataset('anova')
>>> kruskal(dv='Pain threshold', between='Hair color', data=df)
Source ddof1 H p-unc
Kruskal Hair color 3 10.589 0.014172
"""
from scipy.stats import chi2, rankdata, tiecorrect
# Check data
_check_dataframe(dv=dv, between=between, data=data,
effects='between')
# Remove NaN values
data = data.dropna()
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
# Extract number of groups and total sample size
groups = list(data[between].unique())
n_groups = len(groups)
n = data[dv].size
# Rank data, dealing with ties appropriately
data['rank'] = rankdata(data[dv])
# Find the total of rank per groups
grp = data.groupby(between)['rank']
sum_rk_grp = grp.sum().values
n_per_grp = grp.count().values
# Calculate chi-square statistic (H)
H = (12 / (n * (n + 1)) * np.sum(sum_rk_grp**2 / n_per_grp)) - 3 * (n + 1)
# Correct for ties
H /= tiecorrect(data['rank'].values)
# Calculate DOF and p-value
ddof1 = n_groups - 1
p_unc = chi2.sf(H, ddof1)
# Create output dataframe
stats = pd.DataFrame({'Source': between,
'ddof1': ddof1,
'H': np.round(H, 3),
'p-unc': p_unc,
}, index=['Kruskal'])
col_order = ['Source', 'ddof1', 'H', 'p-unc']
stats = stats.reindex(columns=col_order)
stats.dropna(how='all', axis=1, inplace=True)
# Export to .csv
if export_filename is not None:
_export_table(stats, export_filename)
return stats | def function[kruskal, parameter[dv, between, data, detailed, export_filename]]:
constant[Kruskal-Wallis H-test for independent samples.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'H' : The Kruskal-Wallis H statistic, corrected for ties
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes.
Due to the assumption that H has a chi square distribution, the number of
samples in each group must not be too small. A typical rule is that each
sample must have at least 5 measurements.
NaN values are automatically removed.
Examples
--------
Compute the Kruskal-Wallis H-test for independent samples.
>>> from pingouin import kruskal, read_dataset
>>> df = read_dataset('anova')
>>> kruskal(dv='Pain threshold', between='Hair color', data=df)
Source ddof1 H p-unc
Kruskal Hair color 3 10.589 0.014172
]
from relative_module[scipy.stats] import module[chi2], module[rankdata], module[tiecorrect]
call[name[_check_dataframe], parameter[]]
variable[data] assign[=] call[name[data].dropna, parameter[]]
variable[data] assign[=] call[name[data].reset_index, parameter[]]
variable[groups] assign[=] call[name[list], parameter[call[call[name[data]][name[between]].unique, parameter[]]]]
variable[n_groups] assign[=] call[name[len], parameter[name[groups]]]
variable[n] assign[=] call[name[data]][name[dv]].size
call[name[data]][constant[rank]] assign[=] call[name[rankdata], parameter[call[name[data]][name[dv]]]]
variable[grp] assign[=] call[call[name[data].groupby, parameter[name[between]]]][constant[rank]]
variable[sum_rk_grp] assign[=] call[name[grp].sum, parameter[]].values
variable[n_per_grp] assign[=] call[name[grp].count, parameter[]].values
variable[H] assign[=] binary_operation[binary_operation[binary_operation[constant[12] / binary_operation[name[n] * binary_operation[name[n] + constant[1]]]] * call[name[np].sum, parameter[binary_operation[binary_operation[name[sum_rk_grp] ** constant[2]] / name[n_per_grp]]]]] - binary_operation[constant[3] * binary_operation[name[n] + constant[1]]]]
<ast.AugAssign object at 0x7da20c6e5300>
variable[ddof1] assign[=] binary_operation[name[n_groups] - constant[1]]
variable[p_unc] assign[=] call[name[chi2].sf, parameter[name[H], name[ddof1]]]
variable[stats] assign[=] call[name[pd].DataFrame, parameter[dictionary[[<ast.Constant object at 0x7da20c6e7d30>, <ast.Constant object at 0x7da20c6e56c0>, <ast.Constant object at 0x7da20c6e5cc0>, <ast.Constant object at 0x7da20c6e43d0>], [<ast.Name object at 0x7da20c6e4c70>, <ast.Name object at 0x7da20c6e4640>, <ast.Call object at 0x7da20c6e7580>, <ast.Name object at 0x7da20c6e6a10>]]]]
variable[col_order] assign[=] list[[<ast.Constant object at 0x7da20c6e6d70>, <ast.Constant object at 0x7da20c6e51e0>, <ast.Constant object at 0x7da20c6e6e60>, <ast.Constant object at 0x7da20c6e70d0>]]
variable[stats] assign[=] call[name[stats].reindex, parameter[]]
call[name[stats].dropna, parameter[]]
if compare[name[export_filename] is_not constant[None]] begin[:]
call[name[_export_table], parameter[name[stats], name[export_filename]]]
return[name[stats]] | keyword[def] identifier[kruskal] ( identifier[dv] = keyword[None] , identifier[between] = keyword[None] , identifier[data] = keyword[None] , identifier[detailed] = keyword[False] ,
identifier[export_filename] = keyword[None] ):
literal[string]
keyword[from] identifier[scipy] . identifier[stats] keyword[import] identifier[chi2] , identifier[rankdata] , identifier[tiecorrect]
identifier[_check_dataframe] ( identifier[dv] = identifier[dv] , identifier[between] = identifier[between] , identifier[data] = identifier[data] ,
identifier[effects] = literal[string] )
identifier[data] = identifier[data] . identifier[dropna] ()
identifier[data] = identifier[data] . identifier[reset_index] ( identifier[drop] = keyword[True] )
identifier[groups] = identifier[list] ( identifier[data] [ identifier[between] ]. identifier[unique] ())
identifier[n_groups] = identifier[len] ( identifier[groups] )
identifier[n] = identifier[data] [ identifier[dv] ]. identifier[size]
identifier[data] [ literal[string] ]= identifier[rankdata] ( identifier[data] [ identifier[dv] ])
identifier[grp] = identifier[data] . identifier[groupby] ( identifier[between] )[ literal[string] ]
identifier[sum_rk_grp] = identifier[grp] . identifier[sum] (). identifier[values]
identifier[n_per_grp] = identifier[grp] . identifier[count] (). identifier[values]
identifier[H] =( literal[int] /( identifier[n] *( identifier[n] + literal[int] ))* identifier[np] . identifier[sum] ( identifier[sum_rk_grp] ** literal[int] / identifier[n_per_grp] ))- literal[int] *( identifier[n] + literal[int] )
identifier[H] /= identifier[tiecorrect] ( identifier[data] [ literal[string] ]. identifier[values] )
identifier[ddof1] = identifier[n_groups] - literal[int]
identifier[p_unc] = identifier[chi2] . identifier[sf] ( identifier[H] , identifier[ddof1] )
identifier[stats] = identifier[pd] . identifier[DataFrame] ({ literal[string] : identifier[between] ,
literal[string] : identifier[ddof1] ,
literal[string] : identifier[np] . identifier[round] ( identifier[H] , literal[int] ),
literal[string] : identifier[p_unc] ,
}, identifier[index] =[ literal[string] ])
identifier[col_order] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[stats] = identifier[stats] . identifier[reindex] ( identifier[columns] = identifier[col_order] )
identifier[stats] . identifier[dropna] ( identifier[how] = literal[string] , identifier[axis] = literal[int] , identifier[inplace] = keyword[True] )
keyword[if] identifier[export_filename] keyword[is] keyword[not] keyword[None] :
identifier[_export_table] ( identifier[stats] , identifier[export_filename] )
keyword[return] identifier[stats] | def kruskal(dv=None, between=None, data=None, detailed=False, export_filename=None):
"""Kruskal-Wallis H-test for independent samples.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'H' : The Kruskal-Wallis H statistic, corrected for ties
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes.
Due to the assumption that H has a chi square distribution, the number of
samples in each group must not be too small. A typical rule is that each
sample must have at least 5 measurements.
NaN values are automatically removed.
Examples
--------
Compute the Kruskal-Wallis H-test for independent samples.
>>> from pingouin import kruskal, read_dataset
>>> df = read_dataset('anova')
>>> kruskal(dv='Pain threshold', between='Hair color', data=df)
Source ddof1 H p-unc
Kruskal Hair color 3 10.589 0.014172
"""
from scipy.stats import chi2, rankdata, tiecorrect
# Check data
_check_dataframe(dv=dv, between=between, data=data, effects='between')
# Remove NaN values
data = data.dropna()
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
# Extract number of groups and total sample size
groups = list(data[between].unique())
n_groups = len(groups)
n = data[dv].size
# Rank data, dealing with ties appropriately
data['rank'] = rankdata(data[dv])
# Find the total of rank per groups
grp = data.groupby(between)['rank']
sum_rk_grp = grp.sum().values
n_per_grp = grp.count().values
# Calculate chi-square statistic (H)
H = 12 / (n * (n + 1)) * np.sum(sum_rk_grp ** 2 / n_per_grp) - 3 * (n + 1)
# Correct for ties
H /= tiecorrect(data['rank'].values)
# Calculate DOF and p-value
ddof1 = n_groups - 1
p_unc = chi2.sf(H, ddof1)
# Create output dataframe
stats = pd.DataFrame({'Source': between, 'ddof1': ddof1, 'H': np.round(H, 3), 'p-unc': p_unc}, index=['Kruskal'])
col_order = ['Source', 'ddof1', 'H', 'p-unc']
stats = stats.reindex(columns=col_order)
stats.dropna(how='all', axis=1, inplace=True)
# Export to .csv
if export_filename is not None:
_export_table(stats, export_filename) # depends on [control=['if'], data=['export_filename']]
return stats |
def cmd_dataflash_logger(self, args):
'''control behaviour of the module'''
if len(args) == 0:
print (self.usage())
elif args[0] == "status":
print (self.status())
elif args[0] == "stop":
self.new_log_started = False
self.stopped = True
elif args[0] == "start":
self.stopped = False
elif args[0] == "set":
self.log_settings.command(args[1:])
else:
print (self.usage()) | def function[cmd_dataflash_logger, parameter[self, args]]:
constant[control behaviour of the module]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[0]] begin[:]
call[name[print], parameter[call[name[self].usage, parameter[]]]] | keyword[def] identifier[cmd_dataflash_logger] ( identifier[self] , identifier[args] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
identifier[print] ( identifier[self] . identifier[usage] ())
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
identifier[print] ( identifier[self] . identifier[status] ())
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
identifier[self] . identifier[new_log_started] = keyword[False]
identifier[self] . identifier[stopped] = keyword[True]
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
identifier[self] . identifier[stopped] = keyword[False]
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
identifier[self] . identifier[log_settings] . identifier[command] ( identifier[args] [ literal[int] :])
keyword[else] :
identifier[print] ( identifier[self] . identifier[usage] ()) | def cmd_dataflash_logger(self, args):
"""control behaviour of the module"""
if len(args) == 0:
print(self.usage()) # depends on [control=['if'], data=[]]
elif args[0] == 'status':
print(self.status()) # depends on [control=['if'], data=[]]
elif args[0] == 'stop':
self.new_log_started = False
self.stopped = True # depends on [control=['if'], data=[]]
elif args[0] == 'start':
self.stopped = False # depends on [control=['if'], data=[]]
elif args[0] == 'set':
self.log_settings.command(args[1:]) # depends on [control=['if'], data=[]]
else:
print(self.usage()) |
def set(self, alpha):
"""
Set cursor position on the color corresponding to the alpha value.
:param alpha: new alpha value (between 0 and 255)
:type alpha: int
"""
if alpha > 255:
alpha = 255
elif alpha < 0:
alpha = 0
x = alpha / 255. * self.winfo_width()
self.coords('cursor', x, 0, x, self.winfo_height())
self._variable.set(alpha) | def function[set, parameter[self, alpha]]:
constant[
Set cursor position on the color corresponding to the alpha value.
:param alpha: new alpha value (between 0 and 255)
:type alpha: int
]
if compare[name[alpha] greater[>] constant[255]] begin[:]
variable[alpha] assign[=] constant[255]
variable[x] assign[=] binary_operation[binary_operation[name[alpha] / constant[255.0]] * call[name[self].winfo_width, parameter[]]]
call[name[self].coords, parameter[constant[cursor], name[x], constant[0], name[x], call[name[self].winfo_height, parameter[]]]]
call[name[self]._variable.set, parameter[name[alpha]]] | keyword[def] identifier[set] ( identifier[self] , identifier[alpha] ):
literal[string]
keyword[if] identifier[alpha] > literal[int] :
identifier[alpha] = literal[int]
keyword[elif] identifier[alpha] < literal[int] :
identifier[alpha] = literal[int]
identifier[x] = identifier[alpha] / literal[int] * identifier[self] . identifier[winfo_width] ()
identifier[self] . identifier[coords] ( literal[string] , identifier[x] , literal[int] , identifier[x] , identifier[self] . identifier[winfo_height] ())
identifier[self] . identifier[_variable] . identifier[set] ( identifier[alpha] ) | def set(self, alpha):
"""
Set cursor position on the color corresponding to the alpha value.
:param alpha: new alpha value (between 0 and 255)
:type alpha: int
"""
if alpha > 255:
alpha = 255 # depends on [control=['if'], data=['alpha']]
elif alpha < 0:
alpha = 0 # depends on [control=['if'], data=['alpha']]
x = alpha / 255.0 * self.winfo_width()
self.coords('cursor', x, 0, x, self.winfo_height())
self._variable.set(alpha) |
def expression_filters(self):
""" Dict[str, ExpressionFilter]: Returns the expression filters for this selector. """
return {
name: filter for name, filter in iter(self.filters.items())
if isinstance(filter, ExpressionFilter)} | def function[expression_filters, parameter[self]]:
constant[ Dict[str, ExpressionFilter]: Returns the expression filters for this selector. ]
return[<ast.DictComp object at 0x7da1b0217f70>] | keyword[def] identifier[expression_filters] ( identifier[self] ):
literal[string]
keyword[return] {
identifier[name] : identifier[filter] keyword[for] identifier[name] , identifier[filter] keyword[in] identifier[iter] ( identifier[self] . identifier[filters] . identifier[items] ())
keyword[if] identifier[isinstance] ( identifier[filter] , identifier[ExpressionFilter] )} | def expression_filters(self):
""" Dict[str, ExpressionFilter]: Returns the expression filters for this selector. """
return {name: filter for (name, filter) in iter(self.filters.items()) if isinstance(filter, ExpressionFilter)} |
def decode_osgi_props(input_props):
# type: (Dict[str, Any]) -> Dict[str, Any]
"""
Decodes the OSGi properties of the given endpoint properties
"""
result_props = {}
intfs = decode_list(input_props, OBJECTCLASS)
result_props[OBJECTCLASS] = intfs
for intf in intfs:
package_key = ENDPOINT_PACKAGE_VERSION_ + package_name(intf)
intfversionstr = input_props.get(package_key, None)
if intfversionstr:
result_props[package_key] = intfversionstr
result_props[ENDPOINT_ID] = input_props[ENDPOINT_ID]
result_props[ENDPOINT_SERVICE_ID] = input_props[ENDPOINT_SERVICE_ID]
result_props[ENDPOINT_FRAMEWORK_UUID] = input_props[ENDPOINT_FRAMEWORK_UUID]
imp_configs = decode_list(input_props, SERVICE_IMPORTED_CONFIGS)
if imp_configs:
result_props[SERVICE_IMPORTED_CONFIGS] = imp_configs
intents = decode_list(input_props, SERVICE_INTENTS)
if intents:
result_props[SERVICE_INTENTS] = intents
remote_configs = decode_list(input_props, REMOTE_CONFIGS_SUPPORTED)
if remote_configs:
result_props[REMOTE_CONFIGS_SUPPORTED] = remote_configs
remote_intents = decode_list(input_props, REMOTE_INTENTS_SUPPORTED)
if remote_intents:
result_props[REMOTE_INTENTS_SUPPORTED] = remote_intents
return result_props | def function[decode_osgi_props, parameter[input_props]]:
constant[
Decodes the OSGi properties of the given endpoint properties
]
variable[result_props] assign[=] dictionary[[], []]
variable[intfs] assign[=] call[name[decode_list], parameter[name[input_props], name[OBJECTCLASS]]]
call[name[result_props]][name[OBJECTCLASS]] assign[=] name[intfs]
for taget[name[intf]] in starred[name[intfs]] begin[:]
variable[package_key] assign[=] binary_operation[name[ENDPOINT_PACKAGE_VERSION_] + call[name[package_name], parameter[name[intf]]]]
variable[intfversionstr] assign[=] call[name[input_props].get, parameter[name[package_key], constant[None]]]
if name[intfversionstr] begin[:]
call[name[result_props]][name[package_key]] assign[=] name[intfversionstr]
call[name[result_props]][name[ENDPOINT_ID]] assign[=] call[name[input_props]][name[ENDPOINT_ID]]
call[name[result_props]][name[ENDPOINT_SERVICE_ID]] assign[=] call[name[input_props]][name[ENDPOINT_SERVICE_ID]]
call[name[result_props]][name[ENDPOINT_FRAMEWORK_UUID]] assign[=] call[name[input_props]][name[ENDPOINT_FRAMEWORK_UUID]]
variable[imp_configs] assign[=] call[name[decode_list], parameter[name[input_props], name[SERVICE_IMPORTED_CONFIGS]]]
if name[imp_configs] begin[:]
call[name[result_props]][name[SERVICE_IMPORTED_CONFIGS]] assign[=] name[imp_configs]
variable[intents] assign[=] call[name[decode_list], parameter[name[input_props], name[SERVICE_INTENTS]]]
if name[intents] begin[:]
call[name[result_props]][name[SERVICE_INTENTS]] assign[=] name[intents]
variable[remote_configs] assign[=] call[name[decode_list], parameter[name[input_props], name[REMOTE_CONFIGS_SUPPORTED]]]
if name[remote_configs] begin[:]
call[name[result_props]][name[REMOTE_CONFIGS_SUPPORTED]] assign[=] name[remote_configs]
variable[remote_intents] assign[=] call[name[decode_list], parameter[name[input_props], name[REMOTE_INTENTS_SUPPORTED]]]
if name[remote_intents] begin[:]
call[name[result_props]][name[REMOTE_INTENTS_SUPPORTED]] assign[=] name[remote_intents]
return[name[result_props]] | keyword[def] identifier[decode_osgi_props] ( identifier[input_props] ):
literal[string]
identifier[result_props] ={}
identifier[intfs] = identifier[decode_list] ( identifier[input_props] , identifier[OBJECTCLASS] )
identifier[result_props] [ identifier[OBJECTCLASS] ]= identifier[intfs]
keyword[for] identifier[intf] keyword[in] identifier[intfs] :
identifier[package_key] = identifier[ENDPOINT_PACKAGE_VERSION_] + identifier[package_name] ( identifier[intf] )
identifier[intfversionstr] = identifier[input_props] . identifier[get] ( identifier[package_key] , keyword[None] )
keyword[if] identifier[intfversionstr] :
identifier[result_props] [ identifier[package_key] ]= identifier[intfversionstr]
identifier[result_props] [ identifier[ENDPOINT_ID] ]= identifier[input_props] [ identifier[ENDPOINT_ID] ]
identifier[result_props] [ identifier[ENDPOINT_SERVICE_ID] ]= identifier[input_props] [ identifier[ENDPOINT_SERVICE_ID] ]
identifier[result_props] [ identifier[ENDPOINT_FRAMEWORK_UUID] ]= identifier[input_props] [ identifier[ENDPOINT_FRAMEWORK_UUID] ]
identifier[imp_configs] = identifier[decode_list] ( identifier[input_props] , identifier[SERVICE_IMPORTED_CONFIGS] )
keyword[if] identifier[imp_configs] :
identifier[result_props] [ identifier[SERVICE_IMPORTED_CONFIGS] ]= identifier[imp_configs]
identifier[intents] = identifier[decode_list] ( identifier[input_props] , identifier[SERVICE_INTENTS] )
keyword[if] identifier[intents] :
identifier[result_props] [ identifier[SERVICE_INTENTS] ]= identifier[intents]
identifier[remote_configs] = identifier[decode_list] ( identifier[input_props] , identifier[REMOTE_CONFIGS_SUPPORTED] )
keyword[if] identifier[remote_configs] :
identifier[result_props] [ identifier[REMOTE_CONFIGS_SUPPORTED] ]= identifier[remote_configs]
identifier[remote_intents] = identifier[decode_list] ( identifier[input_props] , identifier[REMOTE_INTENTS_SUPPORTED] )
keyword[if] identifier[remote_intents] :
identifier[result_props] [ identifier[REMOTE_INTENTS_SUPPORTED] ]= identifier[remote_intents]
keyword[return] identifier[result_props] | def decode_osgi_props(input_props):
# type: (Dict[str, Any]) -> Dict[str, Any]
'\n Decodes the OSGi properties of the given endpoint properties\n '
result_props = {}
intfs = decode_list(input_props, OBJECTCLASS)
result_props[OBJECTCLASS] = intfs
for intf in intfs:
package_key = ENDPOINT_PACKAGE_VERSION_ + package_name(intf)
intfversionstr = input_props.get(package_key, None)
if intfversionstr:
result_props[package_key] = intfversionstr # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['intf']]
result_props[ENDPOINT_ID] = input_props[ENDPOINT_ID]
result_props[ENDPOINT_SERVICE_ID] = input_props[ENDPOINT_SERVICE_ID]
result_props[ENDPOINT_FRAMEWORK_UUID] = input_props[ENDPOINT_FRAMEWORK_UUID]
imp_configs = decode_list(input_props, SERVICE_IMPORTED_CONFIGS)
if imp_configs:
result_props[SERVICE_IMPORTED_CONFIGS] = imp_configs # depends on [control=['if'], data=[]]
intents = decode_list(input_props, SERVICE_INTENTS)
if intents:
result_props[SERVICE_INTENTS] = intents # depends on [control=['if'], data=[]]
remote_configs = decode_list(input_props, REMOTE_CONFIGS_SUPPORTED)
if remote_configs:
result_props[REMOTE_CONFIGS_SUPPORTED] = remote_configs # depends on [control=['if'], data=[]]
remote_intents = decode_list(input_props, REMOTE_INTENTS_SUPPORTED)
if remote_intents:
result_props[REMOTE_INTENTS_SUPPORTED] = remote_intents # depends on [control=['if'], data=[]]
return result_props |
def ionic_radii(self):
"""
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
if "Ionic radii" in self._data:
return {int(k): v for k, v in self._data["Ionic radii"].items()}
else:
return {} | def function[ionic_radii, parameter[self]]:
constant[
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
]
if compare[constant[Ionic radii] in name[self]._data] begin[:]
return[<ast.DictComp object at 0x7da204565330>] | keyword[def] identifier[ionic_radii] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[_data] :
keyword[return] { identifier[int] ( identifier[k] ): identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[_data] [ literal[string] ]. identifier[items] ()}
keyword[else] :
keyword[return] {} | def ionic_radii(self):
"""
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
if 'Ionic radii' in self._data:
return {int(k): v for (k, v) in self._data['Ionic radii'].items()} # depends on [control=['if'], data=[]]
else:
return {} |
def load_user_config(args, log):
"""Load settings from the user's confiuration file, and add them to `args`.
Settings are loaded from the configuration file in the user's home
directory. Those parameters are added (as attributes) to the `args`
object.
Arguments
---------
args : `argparse.Namespace`
Namespace object to which configuration attributes will be added.
Returns
-------
args : `argparse.Namespace`
Namespace object with added attributes.
"""
if not os.path.exists(_CONFIG_PATH):
err_str = (
"Configuration file does not exists ({}).\n".format(_CONFIG_PATH) +
"Run `python -m astrocats setup` to configure.")
log_raise(log, err_str)
config = json.load(open(_CONFIG_PATH, 'r'))
setattr(args, _BASE_PATH_KEY, config[_BASE_PATH_KEY])
log.debug("Loaded configuration: {}: {}".format(_BASE_PATH_KEY, config[
_BASE_PATH_KEY]))
return args | def function[load_user_config, parameter[args, log]]:
constant[Load settings from the user's confiuration file, and add them to `args`.
Settings are loaded from the configuration file in the user's home
directory. Those parameters are added (as attributes) to the `args`
object.
Arguments
---------
args : `argparse.Namespace`
Namespace object to which configuration attributes will be added.
Returns
-------
args : `argparse.Namespace`
Namespace object with added attributes.
]
if <ast.UnaryOp object at 0x7da1b0fc4cd0> begin[:]
variable[err_str] assign[=] binary_operation[call[constant[Configuration file does not exists ({}).
].format, parameter[name[_CONFIG_PATH]]] + constant[Run `python -m astrocats setup` to configure.]]
call[name[log_raise], parameter[name[log], name[err_str]]]
variable[config] assign[=] call[name[json].load, parameter[call[name[open], parameter[name[_CONFIG_PATH], constant[r]]]]]
call[name[setattr], parameter[name[args], name[_BASE_PATH_KEY], call[name[config]][name[_BASE_PATH_KEY]]]]
call[name[log].debug, parameter[call[constant[Loaded configuration: {}: {}].format, parameter[name[_BASE_PATH_KEY], call[name[config]][name[_BASE_PATH_KEY]]]]]]
return[name[args]] | keyword[def] identifier[load_user_config] ( identifier[args] , identifier[log] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[_CONFIG_PATH] ):
identifier[err_str] =(
literal[string] . identifier[format] ( identifier[_CONFIG_PATH] )+
literal[string] )
identifier[log_raise] ( identifier[log] , identifier[err_str] )
identifier[config] = identifier[json] . identifier[load] ( identifier[open] ( identifier[_CONFIG_PATH] , literal[string] ))
identifier[setattr] ( identifier[args] , identifier[_BASE_PATH_KEY] , identifier[config] [ identifier[_BASE_PATH_KEY] ])
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[_BASE_PATH_KEY] , identifier[config] [
identifier[_BASE_PATH_KEY] ]))
keyword[return] identifier[args] | def load_user_config(args, log):
"""Load settings from the user's confiuration file, and add them to `args`.
Settings are loaded from the configuration file in the user's home
directory. Those parameters are added (as attributes) to the `args`
object.
Arguments
---------
args : `argparse.Namespace`
Namespace object to which configuration attributes will be added.
Returns
-------
args : `argparse.Namespace`
Namespace object with added attributes.
"""
if not os.path.exists(_CONFIG_PATH):
err_str = 'Configuration file does not exists ({}).\n'.format(_CONFIG_PATH) + 'Run `python -m astrocats setup` to configure.'
log_raise(log, err_str) # depends on [control=['if'], data=[]]
config = json.load(open(_CONFIG_PATH, 'r'))
setattr(args, _BASE_PATH_KEY, config[_BASE_PATH_KEY])
log.debug('Loaded configuration: {}: {}'.format(_BASE_PATH_KEY, config[_BASE_PATH_KEY]))
return args |
def normalize_cjk_fullwidth_ascii(seq: str) -> str:
"""
Conver fullwith ASCII to halfwidth ASCII.
See https://en.wikipedia.org/wiki/Halfwidth_and_fullwidth_forms
"""
def convert(char: str) -> str:
code_point = ord(char)
if not 0xFF01 <= code_point <= 0xFF5E:
return char
return chr(code_point - 0xFEE0)
return ''.join(map(convert, seq)) | def function[normalize_cjk_fullwidth_ascii, parameter[seq]]:
constant[
Conver fullwith ASCII to halfwidth ASCII.
See https://en.wikipedia.org/wiki/Halfwidth_and_fullwidth_forms
]
def function[convert, parameter[char]]:
variable[code_point] assign[=] call[name[ord], parameter[name[char]]]
if <ast.UnaryOp object at 0x7da18c4ccee0> begin[:]
return[name[char]]
return[call[name[chr], parameter[binary_operation[name[code_point] - constant[65248]]]]]
return[call[constant[].join, parameter[call[name[map], parameter[name[convert], name[seq]]]]]] | keyword[def] identifier[normalize_cjk_fullwidth_ascii] ( identifier[seq] : identifier[str] )-> identifier[str] :
literal[string]
keyword[def] identifier[convert] ( identifier[char] : identifier[str] )-> identifier[str] :
identifier[code_point] = identifier[ord] ( identifier[char] )
keyword[if] keyword[not] literal[int] <= identifier[code_point] <= literal[int] :
keyword[return] identifier[char]
keyword[return] identifier[chr] ( identifier[code_point] - literal[int] )
keyword[return] literal[string] . identifier[join] ( identifier[map] ( identifier[convert] , identifier[seq] )) | def normalize_cjk_fullwidth_ascii(seq: str) -> str:
"""
Conver fullwith ASCII to halfwidth ASCII.
See https://en.wikipedia.org/wiki/Halfwidth_and_fullwidth_forms
"""
def convert(char: str) -> str:
code_point = ord(char)
if not 65281 <= code_point <= 65374:
return char # depends on [control=['if'], data=[]]
return chr(code_point - 65248)
return ''.join(map(convert, seq)) |
def add_child_bin(self, bin_id, child_id):
"""Adds a child to a bin.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``bin_id`` is already a parent of
``child_id``
raise: NotFound - ``bin_id`` or ``child_id`` not found
raise: NullArgument - ``bin_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.add_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.add_child_catalog(catalog_id=bin_id, child_id=child_id)
return self._hierarchy_session.add_child(id_=bin_id, child_id=child_id) | def function[add_child_bin, parameter[self, bin_id, child_id]]:
constant[Adds a child to a bin.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``bin_id`` is already a parent of
``child_id``
raise: NotFound - ``bin_id`` or ``child_id`` not found
raise: NullArgument - ``bin_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
if compare[name[self]._catalog_session is_not constant[None]] begin[:]
return[call[name[self]._catalog_session.add_child_catalog, parameter[]]]
return[call[name[self]._hierarchy_session.add_child, parameter[]]] | keyword[def] identifier[add_child_bin] ( identifier[self] , identifier[bin_id] , identifier[child_id] ):
literal[string]
keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_catalog_session] . identifier[add_child_catalog] ( identifier[catalog_id] = identifier[bin_id] , identifier[child_id] = identifier[child_id] )
keyword[return] identifier[self] . identifier[_hierarchy_session] . identifier[add_child] ( identifier[id_] = identifier[bin_id] , identifier[child_id] = identifier[child_id] ) | def add_child_bin(self, bin_id, child_id):
"""Adds a child to a bin.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``bin_id`` is already a parent of
``child_id``
raise: NotFound - ``bin_id`` or ``child_id`` not found
raise: NullArgument - ``bin_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.add_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.add_child_catalog(catalog_id=bin_id, child_id=child_id) # depends on [control=['if'], data=[]]
return self._hierarchy_session.add_child(id_=bin_id, child_id=child_id) |
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if version is not None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
# We hit a problem on Travis where enum34 was installed and doesn't
# have a provides attribute ...
if not hasattr(dist, 'provides'):
logger.debug('No "provides": %s', dist)
else:
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break | def function[provides_distribution, parameter[self, name, version]]:
constant[
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
]
variable[matcher] assign[=] constant[None]
if compare[name[version] is_not constant[None]] begin[:]
<ast.Try object at 0x7da18dc06fb0>
for taget[name[dist]] in starred[call[name[self].get_distributions, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da18dc04dc0> begin[:]
call[name[logger].debug, parameter[constant[No "provides": %s], name[dist]]] | keyword[def] identifier[provides_distribution] ( identifier[self] , identifier[name] , identifier[version] = keyword[None] ):
literal[string]
identifier[matcher] = keyword[None]
keyword[if] identifier[version] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[matcher] = identifier[self] . identifier[_scheme] . identifier[matcher] ( literal[string] %( identifier[name] , identifier[version] ))
keyword[except] identifier[ValueError] :
keyword[raise] identifier[DistlibException] ( literal[string] %
( identifier[name] , identifier[version] ))
keyword[for] identifier[dist] keyword[in] identifier[self] . identifier[get_distributions] ():
keyword[if] keyword[not] identifier[hasattr] ( identifier[dist] , literal[string] ):
identifier[logger] . identifier[debug] ( literal[string] , identifier[dist] )
keyword[else] :
identifier[provided] = identifier[dist] . identifier[provides]
keyword[for] identifier[p] keyword[in] identifier[provided] :
identifier[p_name] , identifier[p_ver] = identifier[parse_name_and_version] ( identifier[p] )
keyword[if] identifier[matcher] keyword[is] keyword[None] :
keyword[if] identifier[p_name] == identifier[name] :
keyword[yield] identifier[dist]
keyword[break]
keyword[else] :
keyword[if] identifier[p_name] == identifier[name] keyword[and] identifier[matcher] . identifier[match] ( identifier[p_ver] ):
keyword[yield] identifier[dist]
keyword[break] | def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if version is not None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version)) # depends on [control=['try'], data=[]]
except ValueError:
raise DistlibException('invalid name or version: %r, %r' % (name, version)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['version']]
for dist in self.get_distributions():
# We hit a problem on Travis where enum34 was installed and doesn't
# have a provides attribute ...
if not hasattr(dist, 'provides'):
logger.debug('No "provides": %s', dist) # depends on [control=['if'], data=[]]
else:
provided = dist.provides
for p in provided:
(p_name, p_ver) = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif p_name == name and matcher.match(p_ver):
yield dist
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']] # depends on [control=['for'], data=['dist']] |
def limit_spec(self, spec):
"""
Whenever we do a Pseudo ID lookup from the database, we need to limit
based on the memberships -> organization -> jurisdiction, so we scope
the resolution.
"""
if list(spec.keys()) == ['name']:
# if we're just resolving on name, include other names
return ((Q(name=spec['name']) | Q(other_names__name=spec['name'])) &
Q(memberships__organization__jurisdiction_id=self.jurisdiction_id))
spec['memberships__organization__jurisdiction_id'] = self.jurisdiction_id
return spec | def function[limit_spec, parameter[self, spec]]:
constant[
Whenever we do a Pseudo ID lookup from the database, we need to limit
based on the memberships -> organization -> jurisdiction, so we scope
the resolution.
]
if compare[call[name[list], parameter[call[name[spec].keys, parameter[]]]] equal[==] list[[<ast.Constant object at 0x7da2047ea4d0>]]] begin[:]
return[binary_operation[binary_operation[call[name[Q], parameter[]] <ast.BitOr object at 0x7da2590d6aa0> call[name[Q], parameter[]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[Q], parameter[]]]]
call[name[spec]][constant[memberships__organization__jurisdiction_id]] assign[=] name[self].jurisdiction_id
return[name[spec]] | keyword[def] identifier[limit_spec] ( identifier[self] , identifier[spec] ):
literal[string]
keyword[if] identifier[list] ( identifier[spec] . identifier[keys] ())==[ literal[string] ]:
keyword[return] (( identifier[Q] ( identifier[name] = identifier[spec] [ literal[string] ])| identifier[Q] ( identifier[other_names__name] = identifier[spec] [ literal[string] ]))&
identifier[Q] ( identifier[memberships__organization__jurisdiction_id] = identifier[self] . identifier[jurisdiction_id] ))
identifier[spec] [ literal[string] ]= identifier[self] . identifier[jurisdiction_id]
keyword[return] identifier[spec] | def limit_spec(self, spec):
"""
Whenever we do a Pseudo ID lookup from the database, we need to limit
based on the memberships -> organization -> jurisdiction, so we scope
the resolution.
"""
if list(spec.keys()) == ['name']:
# if we're just resolving on name, include other names
return (Q(name=spec['name']) | Q(other_names__name=spec['name'])) & Q(memberships__organization__jurisdiction_id=self.jurisdiction_id) # depends on [control=['if'], data=[]]
spec['memberships__organization__jurisdiction_id'] = self.jurisdiction_id
return spec |
def pipe_filter(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that filters for source items matching the given rules.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'MODE': {'value': <'permit' or 'block'>},
'COMBINE': {'value': <'and' or 'or'>}
'RULE': [
{
'field': {'value': 'search field'},
'op': {'value': 'one of SWITCH above'},
'value': {'value': 'search term'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of filtered items
Examples
--------
>>> import os.path as p
>>> from pipe2py.modules.pipeforever import pipe_forever
>>> from pipe2py.modules.pipefetchdata import pipe_fetchdata
>>> parent = p.dirname(p.dirname(__file__))
>>> file_name = p.abspath(p.join(parent, 'data', 'gigs.json'))
>>> path = 'value.items'
>>> url = 'file://%s' % file_name
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> input = pipe_fetchdata(_INPUT=pipe_forever(), conf=conf)
>>> mode = {'value': 'permit'}
>>> combine = {'value': 'and'}
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, \
'value': {'value': 'web'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> pipe_filter(_INPUT=input, conf=conf).next()['title']
u'E-Commerce Website Developer | Elance Job'
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, \
'value': {'value': 'kjhlked'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> list(pipe_filter(_INPUT=input, conf=conf))
[]
"""
conf = DotDict(conf)
test = kwargs.pop('pass_if', None)
permit = conf.get('MODE', **kwargs) == 'permit'
combine = conf.get('COMBINE', **kwargs)
if not combine in {'and', 'or'}:
raise Exception(
"Invalid combine: %s. (Expected 'and' or 'or')" % combine)
rule_defs = map(DotDict, utils.listize(conf['RULE']))
get_pass = partial(utils.get_pass, test=test)
get_value = partial(utils.get_value, **kwargs)
parse_conf = partial(utils.parse_conf, parse_func=get_value, **kwargs)
get_rules = lambda i: imap(parse_conf, rule_defs, repeat(i))
funcs = [COMBINE_BOOLEAN[combine], utils.passthrough, utils.passthrough]
inputs = imap(DotDict, _INPUT)
splits = utils.broadcast(inputs, get_rules, utils.passthrough, get_pass)
outputs = starmap(partial(parse_rules, **kwargs), splits)
parsed = utils.dispatch(outputs, *funcs)
gathered = starmap(partial(parse_result, permit=permit), parsed)
_OUTPUT = ifilter(None, gathered)
return _OUTPUT | def function[pipe_filter, parameter[context, _INPUT, conf]]:
constant[An operator that filters for source items matching the given rules.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'MODE': {'value': <'permit' or 'block'>},
'COMBINE': {'value': <'and' or 'or'>}
'RULE': [
{
'field': {'value': 'search field'},
'op': {'value': 'one of SWITCH above'},
'value': {'value': 'search term'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of filtered items
Examples
--------
>>> import os.path as p
>>> from pipe2py.modules.pipeforever import pipe_forever
>>> from pipe2py.modules.pipefetchdata import pipe_fetchdata
>>> parent = p.dirname(p.dirname(__file__))
>>> file_name = p.abspath(p.join(parent, 'data', 'gigs.json'))
>>> path = 'value.items'
>>> url = 'file://%s' % file_name
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> input = pipe_fetchdata(_INPUT=pipe_forever(), conf=conf)
>>> mode = {'value': 'permit'}
>>> combine = {'value': 'and'}
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, 'value': {'value': 'web'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> pipe_filter(_INPUT=input, conf=conf).next()['title']
u'E-Commerce Website Developer | Elance Job'
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, 'value': {'value': 'kjhlked'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> list(pipe_filter(_INPUT=input, conf=conf))
[]
]
variable[conf] assign[=] call[name[DotDict], parameter[name[conf]]]
variable[test] assign[=] call[name[kwargs].pop, parameter[constant[pass_if], constant[None]]]
variable[permit] assign[=] compare[call[name[conf].get, parameter[constant[MODE]]] equal[==] constant[permit]]
variable[combine] assign[=] call[name[conf].get, parameter[constant[COMBINE]]]
if <ast.UnaryOp object at 0x7da1b045fca0> begin[:]
<ast.Raise object at 0x7da1b045fbb0>
variable[rule_defs] assign[=] call[name[map], parameter[name[DotDict], call[name[utils].listize, parameter[call[name[conf]][constant[RULE]]]]]]
variable[get_pass] assign[=] call[name[partial], parameter[name[utils].get_pass]]
variable[get_value] assign[=] call[name[partial], parameter[name[utils].get_value]]
variable[parse_conf] assign[=] call[name[partial], parameter[name[utils].parse_conf]]
variable[get_rules] assign[=] <ast.Lambda object at 0x7da1b045c460>
variable[funcs] assign[=] list[[<ast.Subscript object at 0x7da1b045c8b0>, <ast.Attribute object at 0x7da1b045e2f0>, <ast.Attribute object at 0x7da1b045ffa0>]]
variable[inputs] assign[=] call[name[imap], parameter[name[DotDict], name[_INPUT]]]
variable[splits] assign[=] call[name[utils].broadcast, parameter[name[inputs], name[get_rules], name[utils].passthrough, name[get_pass]]]
variable[outputs] assign[=] call[name[starmap], parameter[call[name[partial], parameter[name[parse_rules]]], name[splits]]]
variable[parsed] assign[=] call[name[utils].dispatch, parameter[name[outputs], <ast.Starred object at 0x7da1b045c0d0>]]
variable[gathered] assign[=] call[name[starmap], parameter[call[name[partial], parameter[name[parse_result]]], name[parsed]]]
variable[_OUTPUT] assign[=] call[name[ifilter], parameter[constant[None], name[gathered]]]
return[name[_OUTPUT]] | keyword[def] identifier[pipe_filter] ( identifier[context] = keyword[None] , identifier[_INPUT] = keyword[None] , identifier[conf] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[conf] = identifier[DotDict] ( identifier[conf] )
identifier[test] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[permit] = identifier[conf] . identifier[get] ( literal[string] ,** identifier[kwargs] )== literal[string]
identifier[combine] = identifier[conf] . identifier[get] ( literal[string] ,** identifier[kwargs] )
keyword[if] keyword[not] identifier[combine] keyword[in] { literal[string] , literal[string] }:
keyword[raise] identifier[Exception] (
literal[string] % identifier[combine] )
identifier[rule_defs] = identifier[map] ( identifier[DotDict] , identifier[utils] . identifier[listize] ( identifier[conf] [ literal[string] ]))
identifier[get_pass] = identifier[partial] ( identifier[utils] . identifier[get_pass] , identifier[test] = identifier[test] )
identifier[get_value] = identifier[partial] ( identifier[utils] . identifier[get_value] ,** identifier[kwargs] )
identifier[parse_conf] = identifier[partial] ( identifier[utils] . identifier[parse_conf] , identifier[parse_func] = identifier[get_value] ,** identifier[kwargs] )
identifier[get_rules] = keyword[lambda] identifier[i] : identifier[imap] ( identifier[parse_conf] , identifier[rule_defs] , identifier[repeat] ( identifier[i] ))
identifier[funcs] =[ identifier[COMBINE_BOOLEAN] [ identifier[combine] ], identifier[utils] . identifier[passthrough] , identifier[utils] . identifier[passthrough] ]
identifier[inputs] = identifier[imap] ( identifier[DotDict] , identifier[_INPUT] )
identifier[splits] = identifier[utils] . identifier[broadcast] ( identifier[inputs] , identifier[get_rules] , identifier[utils] . identifier[passthrough] , identifier[get_pass] )
identifier[outputs] = identifier[starmap] ( identifier[partial] ( identifier[parse_rules] ,** identifier[kwargs] ), identifier[splits] )
identifier[parsed] = identifier[utils] . identifier[dispatch] ( identifier[outputs] ,* identifier[funcs] )
identifier[gathered] = identifier[starmap] ( identifier[partial] ( identifier[parse_result] , identifier[permit] = identifier[permit] ), identifier[parsed] )
identifier[_OUTPUT] = identifier[ifilter] ( keyword[None] , identifier[gathered] )
keyword[return] identifier[_OUTPUT] | def pipe_filter(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that filters for source items matching the given rules.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'MODE': {'value': <'permit' or 'block'>},
'COMBINE': {'value': <'and' or 'or'>}
'RULE': [
{
'field': {'value': 'search field'},
'op': {'value': 'one of SWITCH above'},
'value': {'value': 'search term'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of filtered items
Examples
--------
>>> import os.path as p
>>> from pipe2py.modules.pipeforever import pipe_forever
>>> from pipe2py.modules.pipefetchdata import pipe_fetchdata
>>> parent = p.dirname(p.dirname(__file__))
>>> file_name = p.abspath(p.join(parent, 'data', 'gigs.json'))
>>> path = 'value.items'
>>> url = 'file://%s' % file_name
>>> conf = {'URL': {'value': url}, 'path': {'value': path}}
>>> input = pipe_fetchdata(_INPUT=pipe_forever(), conf=conf)
>>> mode = {'value': 'permit'}
>>> combine = {'value': 'and'}
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, 'value': {'value': 'web'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> pipe_filter(_INPUT=input, conf=conf).next()['title']
u'E-Commerce Website Developer | Elance Job'
>>> rule = [{'field': {'value': 'title'}, 'op': {'value': 'contains'}, 'value': {'value': 'kjhlked'}}]
>>> conf = {'MODE': mode, 'COMBINE': combine, 'RULE': rule}
>>> list(pipe_filter(_INPUT=input, conf=conf))
[]
"""
conf = DotDict(conf)
test = kwargs.pop('pass_if', None)
permit = conf.get('MODE', **kwargs) == 'permit'
combine = conf.get('COMBINE', **kwargs)
if not combine in {'and', 'or'}:
raise Exception("Invalid combine: %s. (Expected 'and' or 'or')" % combine) # depends on [control=['if'], data=[]]
rule_defs = map(DotDict, utils.listize(conf['RULE']))
get_pass = partial(utils.get_pass, test=test)
get_value = partial(utils.get_value, **kwargs)
parse_conf = partial(utils.parse_conf, parse_func=get_value, **kwargs)
get_rules = lambda i: imap(parse_conf, rule_defs, repeat(i))
funcs = [COMBINE_BOOLEAN[combine], utils.passthrough, utils.passthrough]
inputs = imap(DotDict, _INPUT)
splits = utils.broadcast(inputs, get_rules, utils.passthrough, get_pass)
outputs = starmap(partial(parse_rules, **kwargs), splits)
parsed = utils.dispatch(outputs, *funcs)
gathered = starmap(partial(parse_result, permit=permit), parsed)
_OUTPUT = ifilter(None, gathered)
return _OUTPUT |
def mk_privkeys(num):
"make privkeys that support coloring, see utils.cstr"
privkeys = []
assert num <= num_colors
for i in range(num):
j = 0
while True:
k = sha3(str(j))
a = privtoaddr(k)
an = big_endian_to_int(a)
if an % num_colors == i:
break
j += 1
privkeys.append(k)
return privkeys | def function[mk_privkeys, parameter[num]]:
constant[make privkeys that support coloring, see utils.cstr]
variable[privkeys] assign[=] list[[]]
assert[compare[name[num] less_or_equal[<=] name[num_colors]]]
for taget[name[i]] in starred[call[name[range], parameter[name[num]]]] begin[:]
variable[j] assign[=] constant[0]
while constant[True] begin[:]
variable[k] assign[=] call[name[sha3], parameter[call[name[str], parameter[name[j]]]]]
variable[a] assign[=] call[name[privtoaddr], parameter[name[k]]]
variable[an] assign[=] call[name[big_endian_to_int], parameter[name[a]]]
if compare[binary_operation[name[an] <ast.Mod object at 0x7da2590d6920> name[num_colors]] equal[==] name[i]] begin[:]
break
<ast.AugAssign object at 0x7da2054a72b0>
call[name[privkeys].append, parameter[name[k]]]
return[name[privkeys]] | keyword[def] identifier[mk_privkeys] ( identifier[num] ):
literal[string]
identifier[privkeys] =[]
keyword[assert] identifier[num] <= identifier[num_colors]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num] ):
identifier[j] = literal[int]
keyword[while] keyword[True] :
identifier[k] = identifier[sha3] ( identifier[str] ( identifier[j] ))
identifier[a] = identifier[privtoaddr] ( identifier[k] )
identifier[an] = identifier[big_endian_to_int] ( identifier[a] )
keyword[if] identifier[an] % identifier[num_colors] == identifier[i] :
keyword[break]
identifier[j] += literal[int]
identifier[privkeys] . identifier[append] ( identifier[k] )
keyword[return] identifier[privkeys] | def mk_privkeys(num):
"""make privkeys that support coloring, see utils.cstr"""
privkeys = []
assert num <= num_colors
for i in range(num):
j = 0
while True:
k = sha3(str(j))
a = privtoaddr(k)
an = big_endian_to_int(a)
if an % num_colors == i:
break # depends on [control=['if'], data=[]]
j += 1 # depends on [control=['while'], data=[]]
privkeys.append(k) # depends on [control=['for'], data=['i']]
return privkeys |
def get_broadcast_transactions(coin_symbol='btc', limit=10, api_key=None):
"""
Get a list of broadcast but unconfirmed transactions
Similar to bitcoind's getrawmempool method
"""
url = make_url(coin_symbol, 'txs')
params = {}
if api_key:
params['token'] = api_key
if limit:
params['limit'] = limit
r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
response_dict = get_valid_json(r)
unconfirmed_txs = []
for unconfirmed_tx in response_dict:
unconfirmed_tx['received'] = parser.parse(unconfirmed_tx['received'])
unconfirmed_txs.append(unconfirmed_tx)
return unconfirmed_txs | def function[get_broadcast_transactions, parameter[coin_symbol, limit, api_key]]:
constant[
Get a list of broadcast but unconfirmed transactions
Similar to bitcoind's getrawmempool method
]
variable[url] assign[=] call[name[make_url], parameter[name[coin_symbol], constant[txs]]]
variable[params] assign[=] dictionary[[], []]
if name[api_key] begin[:]
call[name[params]][constant[token]] assign[=] name[api_key]
if name[limit] begin[:]
call[name[params]][constant[limit]] assign[=] name[limit]
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
variable[response_dict] assign[=] call[name[get_valid_json], parameter[name[r]]]
variable[unconfirmed_txs] assign[=] list[[]]
for taget[name[unconfirmed_tx]] in starred[name[response_dict]] begin[:]
call[name[unconfirmed_tx]][constant[received]] assign[=] call[name[parser].parse, parameter[call[name[unconfirmed_tx]][constant[received]]]]
call[name[unconfirmed_txs].append, parameter[name[unconfirmed_tx]]]
return[name[unconfirmed_txs]] | keyword[def] identifier[get_broadcast_transactions] ( identifier[coin_symbol] = literal[string] , identifier[limit] = literal[int] , identifier[api_key] = keyword[None] ):
literal[string]
identifier[url] = identifier[make_url] ( identifier[coin_symbol] , literal[string] )
identifier[params] ={}
keyword[if] identifier[api_key] :
identifier[params] [ literal[string] ]= identifier[api_key]
keyword[if] identifier[limit] :
identifier[params] [ literal[string] ]= identifier[limit]
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] , identifier[params] = identifier[params] , identifier[verify] = keyword[True] , identifier[timeout] = identifier[TIMEOUT_IN_SECONDS] )
identifier[response_dict] = identifier[get_valid_json] ( identifier[r] )
identifier[unconfirmed_txs] =[]
keyword[for] identifier[unconfirmed_tx] keyword[in] identifier[response_dict] :
identifier[unconfirmed_tx] [ literal[string] ]= identifier[parser] . identifier[parse] ( identifier[unconfirmed_tx] [ literal[string] ])
identifier[unconfirmed_txs] . identifier[append] ( identifier[unconfirmed_tx] )
keyword[return] identifier[unconfirmed_txs] | def get_broadcast_transactions(coin_symbol='btc', limit=10, api_key=None):
"""
Get a list of broadcast but unconfirmed transactions
Similar to bitcoind's getrawmempool method
"""
url = make_url(coin_symbol, 'txs')
params = {}
if api_key:
params['token'] = api_key # depends on [control=['if'], data=[]]
if limit:
params['limit'] = limit # depends on [control=['if'], data=[]]
r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
response_dict = get_valid_json(r)
unconfirmed_txs = []
for unconfirmed_tx in response_dict:
unconfirmed_tx['received'] = parser.parse(unconfirmed_tx['received'])
unconfirmed_txs.append(unconfirmed_tx) # depends on [control=['for'], data=['unconfirmed_tx']]
return unconfirmed_txs |
def build(filenames, uri, cl_args, link_args, x64, native):
"""Build (Don't use for the time being).
"""
logging.info(_('This is source file building mode.'))
logging.debug(_('filenames: %s'), filenames)
logging.debug(_('uri: %s'), uri)
logging.debug(_('cl_args: %s'), cl_args)
logging.debug(_('link_args: %s'), link_args)
logging.debug(_('native: %s'), native)
logging.debug(_('x64: %s'), x64)
if is_windows():
pass
# ret = msbuild(uri, native, list(filenames), x64=x64,
# cl_args=cl_args, link_args=link_args)
else:
builder = LinuxBuilder()
ret = builder.build(filenames, x64, 'src', 'out')
sys.exit(ret) | def function[build, parameter[filenames, uri, cl_args, link_args, x64, native]]:
constant[Build (Don't use for the time being).
]
call[name[logging].info, parameter[call[name[_], parameter[constant[This is source file building mode.]]]]]
call[name[logging].debug, parameter[call[name[_], parameter[constant[filenames: %s]]], name[filenames]]]
call[name[logging].debug, parameter[call[name[_], parameter[constant[uri: %s]]], name[uri]]]
call[name[logging].debug, parameter[call[name[_], parameter[constant[cl_args: %s]]], name[cl_args]]]
call[name[logging].debug, parameter[call[name[_], parameter[constant[link_args: %s]]], name[link_args]]]
call[name[logging].debug, parameter[call[name[_], parameter[constant[native: %s]]], name[native]]]
call[name[logging].debug, parameter[call[name[_], parameter[constant[x64: %s]]], name[x64]]]
if call[name[is_windows], parameter[]] begin[:]
pass
call[name[sys].exit, parameter[name[ret]]] | keyword[def] identifier[build] ( identifier[filenames] , identifier[uri] , identifier[cl_args] , identifier[link_args] , identifier[x64] , identifier[native] ):
literal[string]
identifier[logging] . identifier[info] ( identifier[_] ( literal[string] ))
identifier[logging] . identifier[debug] ( identifier[_] ( literal[string] ), identifier[filenames] )
identifier[logging] . identifier[debug] ( identifier[_] ( literal[string] ), identifier[uri] )
identifier[logging] . identifier[debug] ( identifier[_] ( literal[string] ), identifier[cl_args] )
identifier[logging] . identifier[debug] ( identifier[_] ( literal[string] ), identifier[link_args] )
identifier[logging] . identifier[debug] ( identifier[_] ( literal[string] ), identifier[native] )
identifier[logging] . identifier[debug] ( identifier[_] ( literal[string] ), identifier[x64] )
keyword[if] identifier[is_windows] ():
keyword[pass]
keyword[else] :
identifier[builder] = identifier[LinuxBuilder] ()
identifier[ret] = identifier[builder] . identifier[build] ( identifier[filenames] , identifier[x64] , literal[string] , literal[string] )
identifier[sys] . identifier[exit] ( identifier[ret] ) | def build(filenames, uri, cl_args, link_args, x64, native):
"""Build (Don't use for the time being).
"""
logging.info(_('This is source file building mode.'))
logging.debug(_('filenames: %s'), filenames)
logging.debug(_('uri: %s'), uri)
logging.debug(_('cl_args: %s'), cl_args)
logging.debug(_('link_args: %s'), link_args)
logging.debug(_('native: %s'), native)
logging.debug(_('x64: %s'), x64)
if is_windows():
pass # depends on [control=['if'], data=[]]
else:
# ret = msbuild(uri, native, list(filenames), x64=x64,
# cl_args=cl_args, link_args=link_args)
builder = LinuxBuilder()
ret = builder.build(filenames, x64, 'src', 'out')
sys.exit(ret) |
def from_file(cls, file_path, validate=True):
""" Creates a Python object from a XML file
:param file_path: Path to the XML file
:param validate: XML should be validated against the embedded XSD definition
:type validate: Boolean
:returns: the Python object
"""
return xmlmap.load_xmlobject_from_file(file_path, xmlclass=cls, validate=validate) | def function[from_file, parameter[cls, file_path, validate]]:
constant[ Creates a Python object from a XML file
:param file_path: Path to the XML file
:param validate: XML should be validated against the embedded XSD definition
:type validate: Boolean
:returns: the Python object
]
return[call[name[xmlmap].load_xmlobject_from_file, parameter[name[file_path]]]] | keyword[def] identifier[from_file] ( identifier[cls] , identifier[file_path] , identifier[validate] = keyword[True] ):
literal[string]
keyword[return] identifier[xmlmap] . identifier[load_xmlobject_from_file] ( identifier[file_path] , identifier[xmlclass] = identifier[cls] , identifier[validate] = identifier[validate] ) | def from_file(cls, file_path, validate=True):
""" Creates a Python object from a XML file
:param file_path: Path to the XML file
:param validate: XML should be validated against the embedded XSD definition
:type validate: Boolean
:returns: the Python object
"""
return xmlmap.load_xmlobject_from_file(file_path, xmlclass=cls, validate=validate) |
def exact(self, *args, **kwargs):
"""Compare attributes of pairs exactly.
Shortcut of :class:`recordlinkage.compare.Exact`::
from recordlinkage.compare import Exact
indexer = recordlinkage.Compare()
indexer.add(Exact())
"""
compare = Exact(*args, **kwargs)
self.add(compare)
return self | def function[exact, parameter[self]]:
constant[Compare attributes of pairs exactly.
Shortcut of :class:`recordlinkage.compare.Exact`::
from recordlinkage.compare import Exact
indexer = recordlinkage.Compare()
indexer.add(Exact())
]
variable[compare] assign[=] call[name[Exact], parameter[<ast.Starred object at 0x7da18f58dcc0>]]
call[name[self].add, parameter[name[compare]]]
return[name[self]] | keyword[def] identifier[exact] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[compare] = identifier[Exact] (* identifier[args] ,** identifier[kwargs] )
identifier[self] . identifier[add] ( identifier[compare] )
keyword[return] identifier[self] | def exact(self, *args, **kwargs):
"""Compare attributes of pairs exactly.
Shortcut of :class:`recordlinkage.compare.Exact`::
from recordlinkage.compare import Exact
indexer = recordlinkage.Compare()
indexer.add(Exact())
"""
compare = Exact(*args, **kwargs)
self.add(compare)
return self |
def FindAttribute(self, textAttributeId: int, val, backward: bool) -> 'TextRange':
"""
Call IUIAutomationTextRange::FindAttribute.
textAttributeID: int, a value in class `TextAttributeId`.
val: COM VARIANT according to textAttributeId? todo.
backward: bool, True if the last occurring text range should be returned instead of the first; otherwise False.
return `TextRange` or None, a text range subset that has the specified text attribute value.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-findattribute
"""
textRange = self.textRange.FindAttribute(textAttributeId, val, int(backward))
if textRange:
return TextRange(textRange=textRange) | def function[FindAttribute, parameter[self, textAttributeId, val, backward]]:
constant[
Call IUIAutomationTextRange::FindAttribute.
textAttributeID: int, a value in class `TextAttributeId`.
val: COM VARIANT according to textAttributeId? todo.
backward: bool, True if the last occurring text range should be returned instead of the first; otherwise False.
return `TextRange` or None, a text range subset that has the specified text attribute value.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-findattribute
]
variable[textRange] assign[=] call[name[self].textRange.FindAttribute, parameter[name[textAttributeId], name[val], call[name[int], parameter[name[backward]]]]]
if name[textRange] begin[:]
return[call[name[TextRange], parameter[]]] | keyword[def] identifier[FindAttribute] ( identifier[self] , identifier[textAttributeId] : identifier[int] , identifier[val] , identifier[backward] : identifier[bool] )-> literal[string] :
literal[string]
identifier[textRange] = identifier[self] . identifier[textRange] . identifier[FindAttribute] ( identifier[textAttributeId] , identifier[val] , identifier[int] ( identifier[backward] ))
keyword[if] identifier[textRange] :
keyword[return] identifier[TextRange] ( identifier[textRange] = identifier[textRange] ) | def FindAttribute(self, textAttributeId: int, val, backward: bool) -> 'TextRange':
"""
Call IUIAutomationTextRange::FindAttribute.
textAttributeID: int, a value in class `TextAttributeId`.
val: COM VARIANT according to textAttributeId? todo.
backward: bool, True if the last occurring text range should be returned instead of the first; otherwise False.
return `TextRange` or None, a text range subset that has the specified text attribute value.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-findattribute
"""
textRange = self.textRange.FindAttribute(textAttributeId, val, int(backward))
if textRange:
return TextRange(textRange=textRange) # depends on [control=['if'], data=[]] |
def get_authorization_vault_assignment_session(self):
"""Gets the session for assigning authorizations to vault mappings.
return: (osid.authorization.AuthorizationVaultAssignmentSession)
- a ``AuthorizationVaultAssignmentSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_authorization_vault_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_authorization_vault_assignment()`` is ``true``.*
"""
if not self.supports_authorization_vault_assignment():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AuthorizationVaultAssignmentSession(runtime=self._runtime) | def function[get_authorization_vault_assignment_session, parameter[self]]:
constant[Gets the session for assigning authorizations to vault mappings.
return: (osid.authorization.AuthorizationVaultAssignmentSession)
- a ``AuthorizationVaultAssignmentSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_authorization_vault_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_authorization_vault_assignment()`` is ``true``.*
]
if <ast.UnaryOp object at 0x7da2054a6ec0> begin[:]
<ast.Raise object at 0x7da18dc06710>
return[call[name[sessions].AuthorizationVaultAssignmentSession, parameter[]]] | keyword[def] identifier[get_authorization_vault_assignment_session] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[supports_authorization_vault_assignment] ():
keyword[raise] identifier[errors] . identifier[Unimplemented] ()
keyword[return] identifier[sessions] . identifier[AuthorizationVaultAssignmentSession] ( identifier[runtime] = identifier[self] . identifier[_runtime] ) | def get_authorization_vault_assignment_session(self):
"""Gets the session for assigning authorizations to vault mappings.
return: (osid.authorization.AuthorizationVaultAssignmentSession)
- a ``AuthorizationVaultAssignmentSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_authorization_vault_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_authorization_vault_assignment()`` is ``true``.*
"""
if not self.supports_authorization_vault_assignment():
raise errors.Unimplemented() # depends on [control=['if'], data=[]]
# pylint: disable=no-member
return sessions.AuthorizationVaultAssignmentSession(runtime=self._runtime) |
def arbiter_priority(req_vec, gnt_vec=None, gnt_idx=None, gnt_vld=None):
""" Static priority arbiter: grants the request with highest priority, which is the lower index
req_vec - (i) vector of request signals, req_vec[0] is with the highest priority
gnt_vec - (o) optional, vector of grants, one grant per request, only one grant can be active at at time
gnt_idx - (o) optional, grant index, index of the granted request
gnt_vld - (o) optional, grant valid, indicate that there is a granted request
"""
REQ_NUM = len(req_vec)
gnt_vec_s = Signal(intbv(0)[REQ_NUM:])
gnt_idx_s = Signal(intbv(0, min=0, max=REQ_NUM))
gnt_vld_s = Signal(bool(0))
@always_comb
def prioroty_encoder():
gnt_vec_s.next = 0
gnt_idx_s.next = 0
gnt_vld_s.next = 0
for i in range(REQ_NUM):
if ( req_vec[i]==1 ):
gnt_vec_s.next[i] = 1
gnt_idx_s.next = i
gnt_vld_s.next = 1
break
if gnt_vec!=None: _vec = assign(gnt_vec, gnt_vec_s)
if gnt_idx!=None: _idx = assign(gnt_idx, gnt_idx_s)
if gnt_vld!=None: _vld = assign(gnt_vld, gnt_vld_s)
return instances() | def function[arbiter_priority, parameter[req_vec, gnt_vec, gnt_idx, gnt_vld]]:
constant[ Static priority arbiter: grants the request with highest priority, which is the lower index
req_vec - (i) vector of request signals, req_vec[0] is with the highest priority
gnt_vec - (o) optional, vector of grants, one grant per request, only one grant can be active at at time
gnt_idx - (o) optional, grant index, index of the granted request
gnt_vld - (o) optional, grant valid, indicate that there is a granted request
]
variable[REQ_NUM] assign[=] call[name[len], parameter[name[req_vec]]]
variable[gnt_vec_s] assign[=] call[name[Signal], parameter[call[call[name[intbv], parameter[constant[0]]]][<ast.Slice object at 0x7da1b0ca6bc0>]]]
variable[gnt_idx_s] assign[=] call[name[Signal], parameter[call[name[intbv], parameter[constant[0]]]]]
variable[gnt_vld_s] assign[=] call[name[Signal], parameter[call[name[bool], parameter[constant[0]]]]]
def function[prioroty_encoder, parameter[]]:
name[gnt_vec_s].next assign[=] constant[0]
name[gnt_idx_s].next assign[=] constant[0]
name[gnt_vld_s].next assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[name[REQ_NUM]]]] begin[:]
if compare[call[name[req_vec]][name[i]] equal[==] constant[1]] begin[:]
call[name[gnt_vec_s].next][name[i]] assign[=] constant[1]
name[gnt_idx_s].next assign[=] name[i]
name[gnt_vld_s].next assign[=] constant[1]
break
if compare[name[gnt_vec] not_equal[!=] constant[None]] begin[:]
variable[_vec] assign[=] call[name[assign], parameter[name[gnt_vec], name[gnt_vec_s]]]
if compare[name[gnt_idx] not_equal[!=] constant[None]] begin[:]
variable[_idx] assign[=] call[name[assign], parameter[name[gnt_idx], name[gnt_idx_s]]]
if compare[name[gnt_vld] not_equal[!=] constant[None]] begin[:]
variable[_vld] assign[=] call[name[assign], parameter[name[gnt_vld], name[gnt_vld_s]]]
return[call[name[instances], parameter[]]] | keyword[def] identifier[arbiter_priority] ( identifier[req_vec] , identifier[gnt_vec] = keyword[None] , identifier[gnt_idx] = keyword[None] , identifier[gnt_vld] = keyword[None] ):
literal[string]
identifier[REQ_NUM] = identifier[len] ( identifier[req_vec] )
identifier[gnt_vec_s] = identifier[Signal] ( identifier[intbv] ( literal[int] )[ identifier[REQ_NUM] :])
identifier[gnt_idx_s] = identifier[Signal] ( identifier[intbv] ( literal[int] , identifier[min] = literal[int] , identifier[max] = identifier[REQ_NUM] ))
identifier[gnt_vld_s] = identifier[Signal] ( identifier[bool] ( literal[int] ))
@ identifier[always_comb]
keyword[def] identifier[prioroty_encoder] ():
identifier[gnt_vec_s] . identifier[next] = literal[int]
identifier[gnt_idx_s] . identifier[next] = literal[int]
identifier[gnt_vld_s] . identifier[next] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[REQ_NUM] ):
keyword[if] ( identifier[req_vec] [ identifier[i] ]== literal[int] ):
identifier[gnt_vec_s] . identifier[next] [ identifier[i] ]= literal[int]
identifier[gnt_idx_s] . identifier[next] = identifier[i]
identifier[gnt_vld_s] . identifier[next] = literal[int]
keyword[break]
keyword[if] identifier[gnt_vec] != keyword[None] : identifier[_vec] = identifier[assign] ( identifier[gnt_vec] , identifier[gnt_vec_s] )
keyword[if] identifier[gnt_idx] != keyword[None] : identifier[_idx] = identifier[assign] ( identifier[gnt_idx] , identifier[gnt_idx_s] )
keyword[if] identifier[gnt_vld] != keyword[None] : identifier[_vld] = identifier[assign] ( identifier[gnt_vld] , identifier[gnt_vld_s] )
keyword[return] identifier[instances] () | def arbiter_priority(req_vec, gnt_vec=None, gnt_idx=None, gnt_vld=None):
""" Static priority arbiter: grants the request with highest priority, which is the lower index
req_vec - (i) vector of request signals, req_vec[0] is with the highest priority
gnt_vec - (o) optional, vector of grants, one grant per request, only one grant can be active at at time
gnt_idx - (o) optional, grant index, index of the granted request
gnt_vld - (o) optional, grant valid, indicate that there is a granted request
"""
REQ_NUM = len(req_vec)
gnt_vec_s = Signal(intbv(0)[REQ_NUM:])
gnt_idx_s = Signal(intbv(0, min=0, max=REQ_NUM))
gnt_vld_s = Signal(bool(0))
@always_comb
def prioroty_encoder():
gnt_vec_s.next = 0
gnt_idx_s.next = 0
gnt_vld_s.next = 0
for i in range(REQ_NUM):
if req_vec[i] == 1:
gnt_vec_s.next[i] = 1
gnt_idx_s.next = i
gnt_vld_s.next = 1
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if gnt_vec != None:
_vec = assign(gnt_vec, gnt_vec_s) # depends on [control=['if'], data=['gnt_vec']]
if gnt_idx != None:
_idx = assign(gnt_idx, gnt_idx_s) # depends on [control=['if'], data=['gnt_idx']]
if gnt_vld != None:
_vld = assign(gnt_vld, gnt_vld_s) # depends on [control=['if'], data=['gnt_vld']]
return instances() |
def _grad(self, funct, params=None, dl=2e-5, rts=False, nout=1, out=None,
**kwargs):
"""
Gradient of `func` wrt a set of parameters params. (see _graddoc)
"""
if params is None:
params = self.param_all()
ps = util.listify(params)
f0 = funct(**kwargs)
# get the shape of the entire gradient to return and make an array
calc_shape = (
lambda ar: (len(ps),) + (ar.shape if isinstance(
ar, np.ndarray) else (1,)))
if out is not None:
grad = out # reference
elif nout == 1:
shape = calc_shape(f0)
grad = np.zeros(shape) # must be preallocated for mem reasons
else:
shape = [calc_shape(f0[i]) for i in range(nout)]
grad = [np.zeros(shp) for shp in shape]
for i, p in enumerate(ps):
if nout == 1:
grad[i] = self._grad_one_param(funct, p, dl=dl, rts=rts,
nout=nout, **kwargs)
else:
stuff = self._grad_one_param(funct, p, dl=dl, rts=rts,
nout=nout, **kwargs)
for a in range(nout): grad[a][i] = stuff[a]
return grad | def function[_grad, parameter[self, funct, params, dl, rts, nout, out]]:
constant[
Gradient of `func` wrt a set of parameters params. (see _graddoc)
]
if compare[name[params] is constant[None]] begin[:]
variable[params] assign[=] call[name[self].param_all, parameter[]]
variable[ps] assign[=] call[name[util].listify, parameter[name[params]]]
variable[f0] assign[=] call[name[funct], parameter[]]
variable[calc_shape] assign[=] <ast.Lambda object at 0x7da2047ebcd0>
if compare[name[out] is_not constant[None]] begin[:]
variable[grad] assign[=] name[out]
for taget[tuple[[<ast.Name object at 0x7da2047eb3d0>, <ast.Name object at 0x7da2047ea470>]]] in starred[call[name[enumerate], parameter[name[ps]]]] begin[:]
if compare[name[nout] equal[==] constant[1]] begin[:]
call[name[grad]][name[i]] assign[=] call[name[self]._grad_one_param, parameter[name[funct], name[p]]]
return[name[grad]] | keyword[def] identifier[_grad] ( identifier[self] , identifier[funct] , identifier[params] = keyword[None] , identifier[dl] = literal[int] , identifier[rts] = keyword[False] , identifier[nout] = literal[int] , identifier[out] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[params] keyword[is] keyword[None] :
identifier[params] = identifier[self] . identifier[param_all] ()
identifier[ps] = identifier[util] . identifier[listify] ( identifier[params] )
identifier[f0] = identifier[funct] (** identifier[kwargs] )
identifier[calc_shape] =(
keyword[lambda] identifier[ar] :( identifier[len] ( identifier[ps] ),)+( identifier[ar] . identifier[shape] keyword[if] identifier[isinstance] (
identifier[ar] , identifier[np] . identifier[ndarray] ) keyword[else] ( literal[int] ,)))
keyword[if] identifier[out] keyword[is] keyword[not] keyword[None] :
identifier[grad] = identifier[out]
keyword[elif] identifier[nout] == literal[int] :
identifier[shape] = identifier[calc_shape] ( identifier[f0] )
identifier[grad] = identifier[np] . identifier[zeros] ( identifier[shape] )
keyword[else] :
identifier[shape] =[ identifier[calc_shape] ( identifier[f0] [ identifier[i] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nout] )]
identifier[grad] =[ identifier[np] . identifier[zeros] ( identifier[shp] ) keyword[for] identifier[shp] keyword[in] identifier[shape] ]
keyword[for] identifier[i] , identifier[p] keyword[in] identifier[enumerate] ( identifier[ps] ):
keyword[if] identifier[nout] == literal[int] :
identifier[grad] [ identifier[i] ]= identifier[self] . identifier[_grad_one_param] ( identifier[funct] , identifier[p] , identifier[dl] = identifier[dl] , identifier[rts] = identifier[rts] ,
identifier[nout] = identifier[nout] ,** identifier[kwargs] )
keyword[else] :
identifier[stuff] = identifier[self] . identifier[_grad_one_param] ( identifier[funct] , identifier[p] , identifier[dl] = identifier[dl] , identifier[rts] = identifier[rts] ,
identifier[nout] = identifier[nout] ,** identifier[kwargs] )
keyword[for] identifier[a] keyword[in] identifier[range] ( identifier[nout] ): identifier[grad] [ identifier[a] ][ identifier[i] ]= identifier[stuff] [ identifier[a] ]
keyword[return] identifier[grad] | def _grad(self, funct, params=None, dl=2e-05, rts=False, nout=1, out=None, **kwargs):
"""
Gradient of `func` wrt a set of parameters params. (see _graddoc)
"""
if params is None:
params = self.param_all() # depends on [control=['if'], data=['params']]
ps = util.listify(params)
f0 = funct(**kwargs)
# get the shape of the entire gradient to return and make an array
calc_shape = lambda ar: (len(ps),) + (ar.shape if isinstance(ar, np.ndarray) else (1,))
if out is not None:
grad = out # reference # depends on [control=['if'], data=['out']]
elif nout == 1:
shape = calc_shape(f0)
grad = np.zeros(shape) # must be preallocated for mem reasons # depends on [control=['if'], data=[]]
else:
shape = [calc_shape(f0[i]) for i in range(nout)]
grad = [np.zeros(shp) for shp in shape]
for (i, p) in enumerate(ps):
if nout == 1:
grad[i] = self._grad_one_param(funct, p, dl=dl, rts=rts, nout=nout, **kwargs) # depends on [control=['if'], data=['nout']]
else:
stuff = self._grad_one_param(funct, p, dl=dl, rts=rts, nout=nout, **kwargs)
for a in range(nout):
grad[a][i] = stuff[a] # depends on [control=['for'], data=['a']] # depends on [control=['for'], data=[]]
return grad |
def get_register(self, motors, disable_sync_read=False):
""" Gets the value from the specified register and sets it to the :class:`~pypot.dynamixel.motor.DxlMotor`. """
if not motors:
return False
ids = [m.id for m in motors]
getter = getattr(self.io, 'get_{}'.format(self.regname))
values = (sum([list(getter([id])) for id in ids], [])
if disable_sync_read else
getter(ids))
if not values:
return False
for m, val in zip(motors, values):
m.__dict__[self.varname] = val
for m in motors:
m._read_synced[self.varname].done()
return True | def function[get_register, parameter[self, motors, disable_sync_read]]:
constant[ Gets the value from the specified register and sets it to the :class:`~pypot.dynamixel.motor.DxlMotor`. ]
if <ast.UnaryOp object at 0x7da1b1305990> begin[:]
return[constant[False]]
variable[ids] assign[=] <ast.ListComp object at 0x7da1b1307fd0>
variable[getter] assign[=] call[name[getattr], parameter[name[self].io, call[constant[get_{}].format, parameter[name[self].regname]]]]
variable[values] assign[=] <ast.IfExp object at 0x7da1b13051b0>
if <ast.UnaryOp object at 0x7da1b1305150> begin[:]
return[constant[False]]
for taget[tuple[[<ast.Name object at 0x7da1b1304a30>, <ast.Name object at 0x7da1b1304550>]]] in starred[call[name[zip], parameter[name[motors], name[values]]]] begin[:]
call[name[m].__dict__][name[self].varname] assign[=] name[val]
for taget[name[m]] in starred[name[motors]] begin[:]
call[call[name[m]._read_synced][name[self].varname].done, parameter[]]
return[constant[True]] | keyword[def] identifier[get_register] ( identifier[self] , identifier[motors] , identifier[disable_sync_read] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[motors] :
keyword[return] keyword[False]
identifier[ids] =[ identifier[m] . identifier[id] keyword[for] identifier[m] keyword[in] identifier[motors] ]
identifier[getter] = identifier[getattr] ( identifier[self] . identifier[io] , literal[string] . identifier[format] ( identifier[self] . identifier[regname] ))
identifier[values] =( identifier[sum] ([ identifier[list] ( identifier[getter] ([ identifier[id] ])) keyword[for] identifier[id] keyword[in] identifier[ids] ],[])
keyword[if] identifier[disable_sync_read] keyword[else]
identifier[getter] ( identifier[ids] ))
keyword[if] keyword[not] identifier[values] :
keyword[return] keyword[False]
keyword[for] identifier[m] , identifier[val] keyword[in] identifier[zip] ( identifier[motors] , identifier[values] ):
identifier[m] . identifier[__dict__] [ identifier[self] . identifier[varname] ]= identifier[val]
keyword[for] identifier[m] keyword[in] identifier[motors] :
identifier[m] . identifier[_read_synced] [ identifier[self] . identifier[varname] ]. identifier[done] ()
keyword[return] keyword[True] | def get_register(self, motors, disable_sync_read=False):
""" Gets the value from the specified register and sets it to the :class:`~pypot.dynamixel.motor.DxlMotor`. """
if not motors:
return False # depends on [control=['if'], data=[]]
ids = [m.id for m in motors]
getter = getattr(self.io, 'get_{}'.format(self.regname))
values = sum([list(getter([id])) for id in ids], []) if disable_sync_read else getter(ids)
if not values:
return False # depends on [control=['if'], data=[]]
for (m, val) in zip(motors, values):
m.__dict__[self.varname] = val # depends on [control=['for'], data=[]]
for m in motors:
m._read_synced[self.varname].done() # depends on [control=['for'], data=['m']]
return True |
def _converge(c):
"""
Examine world state, returning data on what needs updating for release.
:param c: Invoke ``Context`` object or subclass.
:returns:
Two dicts (technically, dict subclasses, which allow attribute access),
``actions`` and ``state`` (in that order.)
``actions`` maps release component names to variables (usually class
constants) determining what action should be taken for that component:
- ``changelog``: members of `.Changelog` such as ``NEEDS_RELEASE`` or
``OKAY``.
- ``version``: members of `.VersionFile`.
``state`` contains the data used to calculate the actions, in case the
caller wants to do further analysis:
- ``branch``: the name of the checked-out Git branch.
- ``changelog``: the parsed project changelog, a `dict` of releases.
- ``release_type``: what type of release the branch appears to be (will
be a member of `.Release` such as ``Release.BUGFIX``.)
- ``latest_line_release``: the latest changelog release found for
current release type/line.
- ``latest_overall_release``: the absolute most recent release entry.
Useful for determining next minor/feature release.
- ``current_version``: the version string as found in the package's
``__version__``.
"""
#
# Data/state gathering
#
# Get data about current repo context: what branch are we on & what kind of
# release does it appear to represent?
branch, release_type = _release_line(c)
# Short-circuit if type is undefined; we can't do useful work for that.
if release_type is Release.UNDEFINED:
raise UndefinedReleaseType(
"You don't seem to be on a release-related branch; "
"why are you trying to cut a release?"
)
# Parse our changelog so we can tell what's released and what's not.
# TODO: below needs to go in something doc-y somewhere; having it in a
# non-user-facing subroutine docstring isn't visible enough.
"""
.. note::
Requires that one sets the ``packaging.changelog_file`` configuration
option; it should be a relative or absolute path to your
``changelog.rst`` (or whatever it's named in your project).
"""
# TODO: allow skipping changelog if not using Releases since we have no
# other good way of detecting whether a changelog needs/got an update.
# TODO: chdir to sphinx.source, import conf.py, look at
# releases_changelog_name - that way it will honor that setting and we can
# ditch this explicit one instead. (and the docstring above)
changelog = parse_changelog(
c.packaging.changelog_file, load_extensions=True
)
# Get latest appropriate changelog release and any unreleased issues, for
# current line
line_release, issues = _release_and_issues(changelog, branch, release_type)
# Also get latest overall release, sometimes that matters (usually only
# when latest *appropriate* release doesn't exist yet)
overall_release = _versions_from_changelog(changelog)[-1]
# Obtain the project's main package & its version data
current_version = load_version(c)
# Grab all git tags
tags = _get_tags(c)
state = Lexicon(
{
"branch": branch,
"release_type": release_type,
"changelog": changelog,
"latest_line_release": Version(line_release)
if line_release
else None,
"latest_overall_release": overall_release, # already a Version
"unreleased_issues": issues,
"current_version": Version(current_version),
"tags": tags,
}
)
# Version number determinations:
# - latest actually-released version
# - the next version after that for current branch
# - which of the two is the actual version we're looking to converge on,
# depends on current changelog state.
latest_version, next_version = _latest_and_next_version(state)
state.latest_version = latest_version
state.next_version = next_version
state.expected_version = latest_version
if state.unreleased_issues:
state.expected_version = next_version
#
# Logic determination / convergence
#
actions = Lexicon()
# Changelog: needs new release entry if there are any unreleased issues for
# current branch's line.
# TODO: annotate with number of released issues [of each type?] - so not
# just "up to date!" but "all set (will release 3 features & 5 bugs)"
actions.changelog = Changelog.OKAY
if release_type in (Release.BUGFIX, Release.FEATURE) and issues:
actions.changelog = Changelog.NEEDS_RELEASE
# Version file: simply whether version file equals the target version.
# TODO: corner case of 'version file is >1 release in the future', but
# that's still wrong, just would be a different 'bad' status output.
actions.version = VersionFile.OKAY
if state.current_version != state.expected_version:
actions.version = VersionFile.NEEDS_BUMP
# Git tag: similar to version file, except the check is existence of tag
# instead of comparison to file contents. We even reuse the
# 'expected_version' variable wholesale.
actions.tag = Tag.OKAY
if state.expected_version not in state.tags:
actions.tag = Tag.NEEDS_CUTTING
#
# Return
#
return actions, state | def function[_converge, parameter[c]]:
constant[
Examine world state, returning data on what needs updating for release.
:param c: Invoke ``Context`` object or subclass.
:returns:
Two dicts (technically, dict subclasses, which allow attribute access),
``actions`` and ``state`` (in that order.)
``actions`` maps release component names to variables (usually class
constants) determining what action should be taken for that component:
- ``changelog``: members of `.Changelog` such as ``NEEDS_RELEASE`` or
``OKAY``.
- ``version``: members of `.VersionFile`.
``state`` contains the data used to calculate the actions, in case the
caller wants to do further analysis:
- ``branch``: the name of the checked-out Git branch.
- ``changelog``: the parsed project changelog, a `dict` of releases.
- ``release_type``: what type of release the branch appears to be (will
be a member of `.Release` such as ``Release.BUGFIX``.)
- ``latest_line_release``: the latest changelog release found for
current release type/line.
- ``latest_overall_release``: the absolute most recent release entry.
Useful for determining next minor/feature release.
- ``current_version``: the version string as found in the package's
``__version__``.
]
<ast.Tuple object at 0x7da2046230a0> assign[=] call[name[_release_line], parameter[name[c]]]
if compare[name[release_type] is name[Release].UNDEFINED] begin[:]
<ast.Raise object at 0x7da204620580>
constant[
.. note::
Requires that one sets the ``packaging.changelog_file`` configuration
option; it should be a relative or absolute path to your
``changelog.rst`` (or whatever it's named in your project).
]
variable[changelog] assign[=] call[name[parse_changelog], parameter[name[c].packaging.changelog_file]]
<ast.Tuple object at 0x7da2046216f0> assign[=] call[name[_release_and_issues], parameter[name[changelog], name[branch], name[release_type]]]
variable[overall_release] assign[=] call[call[name[_versions_from_changelog], parameter[name[changelog]]]][<ast.UnaryOp object at 0x7da2046218d0>]
variable[current_version] assign[=] call[name[load_version], parameter[name[c]]]
variable[tags] assign[=] call[name[_get_tags], parameter[name[c]]]
variable[state] assign[=] call[name[Lexicon], parameter[dictionary[[<ast.Constant object at 0x7da204620af0>, <ast.Constant object at 0x7da204621060>, <ast.Constant object at 0x7da204620cd0>, <ast.Constant object at 0x7da204621d50>, <ast.Constant object at 0x7da204621ab0>, <ast.Constant object at 0x7da204621c60>, <ast.Constant object at 0x7da2046235e0>, <ast.Constant object at 0x7da2046212a0>], [<ast.Name object at 0x7da204622590>, <ast.Name object at 0x7da204622f50>, <ast.Name object at 0x7da204623430>, <ast.IfExp object at 0x7da204622f20>, <ast.Name object at 0x7da204623070>, <ast.Name object at 0x7da2046225f0>, <ast.Call object at 0x7da204622f80>, <ast.Name object at 0x7da2046203d0>]]]]
<ast.Tuple object at 0x7da204621960> assign[=] call[name[_latest_and_next_version], parameter[name[state]]]
name[state].latest_version assign[=] name[latest_version]
name[state].next_version assign[=] name[next_version]
name[state].expected_version assign[=] name[latest_version]
if name[state].unreleased_issues begin[:]
name[state].expected_version assign[=] name[next_version]
variable[actions] assign[=] call[name[Lexicon], parameter[]]
name[actions].changelog assign[=] name[Changelog].OKAY
if <ast.BoolOp object at 0x7da204621450> begin[:]
name[actions].changelog assign[=] name[Changelog].NEEDS_RELEASE
name[actions].version assign[=] name[VersionFile].OKAY
if compare[name[state].current_version not_equal[!=] name[state].expected_version] begin[:]
name[actions].version assign[=] name[VersionFile].NEEDS_BUMP
name[actions].tag assign[=] name[Tag].OKAY
if compare[name[state].expected_version <ast.NotIn object at 0x7da2590d7190> name[state].tags] begin[:]
name[actions].tag assign[=] name[Tag].NEEDS_CUTTING
return[tuple[[<ast.Name object at 0x7da204623850>, <ast.Name object at 0x7da204621630>]]] | keyword[def] identifier[_converge] ( identifier[c] ):
literal[string]
identifier[branch] , identifier[release_type] = identifier[_release_line] ( identifier[c] )
keyword[if] identifier[release_type] keyword[is] identifier[Release] . identifier[UNDEFINED] :
keyword[raise] identifier[UndefinedReleaseType] (
literal[string]
literal[string]
)
literal[string]
identifier[changelog] = identifier[parse_changelog] (
identifier[c] . identifier[packaging] . identifier[changelog_file] , identifier[load_extensions] = keyword[True]
)
identifier[line_release] , identifier[issues] = identifier[_release_and_issues] ( identifier[changelog] , identifier[branch] , identifier[release_type] )
identifier[overall_release] = identifier[_versions_from_changelog] ( identifier[changelog] )[- literal[int] ]
identifier[current_version] = identifier[load_version] ( identifier[c] )
identifier[tags] = identifier[_get_tags] ( identifier[c] )
identifier[state] = identifier[Lexicon] (
{
literal[string] : identifier[branch] ,
literal[string] : identifier[release_type] ,
literal[string] : identifier[changelog] ,
literal[string] : identifier[Version] ( identifier[line_release] )
keyword[if] identifier[line_release]
keyword[else] keyword[None] ,
literal[string] : identifier[overall_release] ,
literal[string] : identifier[issues] ,
literal[string] : identifier[Version] ( identifier[current_version] ),
literal[string] : identifier[tags] ,
}
)
identifier[latest_version] , identifier[next_version] = identifier[_latest_and_next_version] ( identifier[state] )
identifier[state] . identifier[latest_version] = identifier[latest_version]
identifier[state] . identifier[next_version] = identifier[next_version]
identifier[state] . identifier[expected_version] = identifier[latest_version]
keyword[if] identifier[state] . identifier[unreleased_issues] :
identifier[state] . identifier[expected_version] = identifier[next_version]
identifier[actions] = identifier[Lexicon] ()
identifier[actions] . identifier[changelog] = identifier[Changelog] . identifier[OKAY]
keyword[if] identifier[release_type] keyword[in] ( identifier[Release] . identifier[BUGFIX] , identifier[Release] . identifier[FEATURE] ) keyword[and] identifier[issues] :
identifier[actions] . identifier[changelog] = identifier[Changelog] . identifier[NEEDS_RELEASE]
identifier[actions] . identifier[version] = identifier[VersionFile] . identifier[OKAY]
keyword[if] identifier[state] . identifier[current_version] != identifier[state] . identifier[expected_version] :
identifier[actions] . identifier[version] = identifier[VersionFile] . identifier[NEEDS_BUMP]
identifier[actions] . identifier[tag] = identifier[Tag] . identifier[OKAY]
keyword[if] identifier[state] . identifier[expected_version] keyword[not] keyword[in] identifier[state] . identifier[tags] :
identifier[actions] . identifier[tag] = identifier[Tag] . identifier[NEEDS_CUTTING]
keyword[return] identifier[actions] , identifier[state] | def _converge(c):
"""
Examine world state, returning data on what needs updating for release.
:param c: Invoke ``Context`` object or subclass.
:returns:
Two dicts (technically, dict subclasses, which allow attribute access),
``actions`` and ``state`` (in that order.)
``actions`` maps release component names to variables (usually class
constants) determining what action should be taken for that component:
- ``changelog``: members of `.Changelog` such as ``NEEDS_RELEASE`` or
``OKAY``.
- ``version``: members of `.VersionFile`.
``state`` contains the data used to calculate the actions, in case the
caller wants to do further analysis:
- ``branch``: the name of the checked-out Git branch.
- ``changelog``: the parsed project changelog, a `dict` of releases.
- ``release_type``: what type of release the branch appears to be (will
be a member of `.Release` such as ``Release.BUGFIX``.)
- ``latest_line_release``: the latest changelog release found for
current release type/line.
- ``latest_overall_release``: the absolute most recent release entry.
Useful for determining next minor/feature release.
- ``current_version``: the version string as found in the package's
``__version__``.
"""
#
# Data/state gathering
#
# Get data about current repo context: what branch are we on & what kind of
# release does it appear to represent?
(branch, release_type) = _release_line(c)
# Short-circuit if type is undefined; we can't do useful work for that.
if release_type is Release.UNDEFINED:
raise UndefinedReleaseType("You don't seem to be on a release-related branch; why are you trying to cut a release?") # depends on [control=['if'], data=[]]
# Parse our changelog so we can tell what's released and what's not.
# TODO: below needs to go in something doc-y somewhere; having it in a
# non-user-facing subroutine docstring isn't visible enough.
"\n .. note::\n Requires that one sets the ``packaging.changelog_file`` configuration\n option; it should be a relative or absolute path to your\n ``changelog.rst`` (or whatever it's named in your project).\n "
# TODO: allow skipping changelog if not using Releases since we have no
# other good way of detecting whether a changelog needs/got an update.
# TODO: chdir to sphinx.source, import conf.py, look at
# releases_changelog_name - that way it will honor that setting and we can
# ditch this explicit one instead. (and the docstring above)
changelog = parse_changelog(c.packaging.changelog_file, load_extensions=True)
# Get latest appropriate changelog release and any unreleased issues, for
# current line
(line_release, issues) = _release_and_issues(changelog, branch, release_type)
# Also get latest overall release, sometimes that matters (usually only
# when latest *appropriate* release doesn't exist yet)
overall_release = _versions_from_changelog(changelog)[-1]
# Obtain the project's main package & its version data
current_version = load_version(c)
# Grab all git tags
tags = _get_tags(c) # already a Version
state = Lexicon({'branch': branch, 'release_type': release_type, 'changelog': changelog, 'latest_line_release': Version(line_release) if line_release else None, 'latest_overall_release': overall_release, 'unreleased_issues': issues, 'current_version': Version(current_version), 'tags': tags})
# Version number determinations:
# - latest actually-released version
# - the next version after that for current branch
# - which of the two is the actual version we're looking to converge on,
# depends on current changelog state.
(latest_version, next_version) = _latest_and_next_version(state)
state.latest_version = latest_version
state.next_version = next_version
state.expected_version = latest_version
if state.unreleased_issues:
state.expected_version = next_version # depends on [control=['if'], data=[]]
#
# Logic determination / convergence
#
actions = Lexicon()
# Changelog: needs new release entry if there are any unreleased issues for
# current branch's line.
# TODO: annotate with number of released issues [of each type?] - so not
# just "up to date!" but "all set (will release 3 features & 5 bugs)"
actions.changelog = Changelog.OKAY
if release_type in (Release.BUGFIX, Release.FEATURE) and issues:
actions.changelog = Changelog.NEEDS_RELEASE # depends on [control=['if'], data=[]]
# Version file: simply whether version file equals the target version.
# TODO: corner case of 'version file is >1 release in the future', but
# that's still wrong, just would be a different 'bad' status output.
actions.version = VersionFile.OKAY
if state.current_version != state.expected_version:
actions.version = VersionFile.NEEDS_BUMP # depends on [control=['if'], data=[]]
# Git tag: similar to version file, except the check is existence of tag
# instead of comparison to file contents. We even reuse the
# 'expected_version' variable wholesale.
actions.tag = Tag.OKAY
if state.expected_version not in state.tags:
actions.tag = Tag.NEEDS_CUTTING # depends on [control=['if'], data=[]]
#
# Return
#
return (actions, state) |
def run(command, **kw):
"""Run `command`, catch any exception, and return lines of output."""
# Windows low-level subprocess API wants str for current working
# directory.
if sys.platform == 'win32':
_cwd = kw.get('cwd', None)
if _cwd is not None:
kw['cwd'] = _cwd.decode()
try:
# In Python 3, iterating over bytes yield integers, so we call
# `splitlines()` to force Python 3 to give us lines instead.
return check_output(command, **kw).splitlines()
except CalledProcessError:
return ()
except FileNotFoundError:
print("The {} binary was not found. Skipping directory {}.\n"
.format(command[0], kw['cwd'].decode("UTF-8")))
return () | def function[run, parameter[command]]:
constant[Run `command`, catch any exception, and return lines of output.]
if compare[name[sys].platform equal[==] constant[win32]] begin[:]
variable[_cwd] assign[=] call[name[kw].get, parameter[constant[cwd], constant[None]]]
if compare[name[_cwd] is_not constant[None]] begin[:]
call[name[kw]][constant[cwd]] assign[=] call[name[_cwd].decode, parameter[]]
<ast.Try object at 0x7da1b26acf40> | keyword[def] identifier[run] ( identifier[command] ,** identifier[kw] ):
literal[string]
keyword[if] identifier[sys] . identifier[platform] == literal[string] :
identifier[_cwd] = identifier[kw] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[_cwd] keyword[is] keyword[not] keyword[None] :
identifier[kw] [ literal[string] ]= identifier[_cwd] . identifier[decode] ()
keyword[try] :
keyword[return] identifier[check_output] ( identifier[command] ,** identifier[kw] ). identifier[splitlines] ()
keyword[except] identifier[CalledProcessError] :
keyword[return] ()
keyword[except] identifier[FileNotFoundError] :
identifier[print] ( literal[string]
. identifier[format] ( identifier[command] [ literal[int] ], identifier[kw] [ literal[string] ]. identifier[decode] ( literal[string] )))
keyword[return] () | def run(command, **kw):
"""Run `command`, catch any exception, and return lines of output."""
# Windows low-level subprocess API wants str for current working
# directory.
if sys.platform == 'win32':
_cwd = kw.get('cwd', None)
if _cwd is not None:
kw['cwd'] = _cwd.decode() # depends on [control=['if'], data=['_cwd']] # depends on [control=['if'], data=[]]
try:
# In Python 3, iterating over bytes yield integers, so we call
# `splitlines()` to force Python 3 to give us lines instead.
return check_output(command, **kw).splitlines() # depends on [control=['try'], data=[]]
except CalledProcessError:
return () # depends on [control=['except'], data=[]]
except FileNotFoundError:
print('The {} binary was not found. Skipping directory {}.\n'.format(command[0], kw['cwd'].decode('UTF-8')))
return () # depends on [control=['except'], data=[]] |
def image_scale(xscale=1.0, yscale=1.0, axes="gca"):
"""
Scales the image extent.
"""
if axes == "gca": axes = _pylab.gca()
e = axes.images[0].get_extent()
x1 = e[0]*xscale
x2 = e[1]*xscale
y1 = e[2]*yscale
y2 = e[3]*yscale
image_set_extent([x1,x2],[y1,y2], axes) | def function[image_scale, parameter[xscale, yscale, axes]]:
constant[
Scales the image extent.
]
if compare[name[axes] equal[==] constant[gca]] begin[:]
variable[axes] assign[=] call[name[_pylab].gca, parameter[]]
variable[e] assign[=] call[call[name[axes].images][constant[0]].get_extent, parameter[]]
variable[x1] assign[=] binary_operation[call[name[e]][constant[0]] * name[xscale]]
variable[x2] assign[=] binary_operation[call[name[e]][constant[1]] * name[xscale]]
variable[y1] assign[=] binary_operation[call[name[e]][constant[2]] * name[yscale]]
variable[y2] assign[=] binary_operation[call[name[e]][constant[3]] * name[yscale]]
call[name[image_set_extent], parameter[list[[<ast.Name object at 0x7da18dc9a1d0>, <ast.Name object at 0x7da18dc98a00>]], list[[<ast.Name object at 0x7da18dc9a6e0>, <ast.Name object at 0x7da18dc99510>]], name[axes]]] | keyword[def] identifier[image_scale] ( identifier[xscale] = literal[int] , identifier[yscale] = literal[int] , identifier[axes] = literal[string] ):
literal[string]
keyword[if] identifier[axes] == literal[string] : identifier[axes] = identifier[_pylab] . identifier[gca] ()
identifier[e] = identifier[axes] . identifier[images] [ literal[int] ]. identifier[get_extent] ()
identifier[x1] = identifier[e] [ literal[int] ]* identifier[xscale]
identifier[x2] = identifier[e] [ literal[int] ]* identifier[xscale]
identifier[y1] = identifier[e] [ literal[int] ]* identifier[yscale]
identifier[y2] = identifier[e] [ literal[int] ]* identifier[yscale]
identifier[image_set_extent] ([ identifier[x1] , identifier[x2] ],[ identifier[y1] , identifier[y2] ], identifier[axes] ) | def image_scale(xscale=1.0, yscale=1.0, axes='gca'):
"""
Scales the image extent.
"""
if axes == 'gca':
axes = _pylab.gca() # depends on [control=['if'], data=['axes']]
e = axes.images[0].get_extent()
x1 = e[0] * xscale
x2 = e[1] * xscale
y1 = e[2] * yscale
y2 = e[3] * yscale
image_set_extent([x1, x2], [y1, y2], axes) |
def do_cleanup(cleanup):
'''
Clean up clone domain leftovers as much as possible.
Extra robust clean up in order to deal with some small changes in libvirt
behavior over time. Passed in volumes and domains are deleted, any errors
are ignored. Used when cloning/provisioning a domain fails.
:param cleanup: list containing dictonaries with two keys: 'what' and 'item'.
If 'what' is domain the 'item' is a libvirt domain object.
If 'what' is volume then the item is a libvirt volume object.
Returns:
none
.. versionadded: 2017.7.3
'''
log.info('Cleaning up after exception')
for leftover in cleanup:
what = leftover['what']
item = leftover['item']
if what == 'domain':
log.info('Cleaning up %s %s', what, item.name())
try:
item.destroy()
log.debug('%s %s forced off', what, item.name())
except libvirtError:
pass
try:
item.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE+
libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA+
libvirt.VIR_DOMAIN_UNDEFINE_NVRAM)
log.debug('%s %s undefined', what, item.name())
except libvirtError:
pass
if what == 'volume':
try:
item.delete()
log.debug('%s %s cleaned up', what, item.name())
except libvirtError:
pass | def function[do_cleanup, parameter[cleanup]]:
constant[
Clean up clone domain leftovers as much as possible.
Extra robust clean up in order to deal with some small changes in libvirt
behavior over time. Passed in volumes and domains are deleted, any errors
are ignored. Used when cloning/provisioning a domain fails.
:param cleanup: list containing dictonaries with two keys: 'what' and 'item'.
If 'what' is domain the 'item' is a libvirt domain object.
If 'what' is volume then the item is a libvirt volume object.
Returns:
none
.. versionadded: 2017.7.3
]
call[name[log].info, parameter[constant[Cleaning up after exception]]]
for taget[name[leftover]] in starred[name[cleanup]] begin[:]
variable[what] assign[=] call[name[leftover]][constant[what]]
variable[item] assign[=] call[name[leftover]][constant[item]]
if compare[name[what] equal[==] constant[domain]] begin[:]
call[name[log].info, parameter[constant[Cleaning up %s %s], name[what], call[name[item].name, parameter[]]]]
<ast.Try object at 0x7da20c7c8610>
<ast.Try object at 0x7da20c7c9390>
if compare[name[what] equal[==] constant[volume]] begin[:]
<ast.Try object at 0x7da18bccaa70> | keyword[def] identifier[do_cleanup] ( identifier[cleanup] ):
literal[string]
identifier[log] . identifier[info] ( literal[string] )
keyword[for] identifier[leftover] keyword[in] identifier[cleanup] :
identifier[what] = identifier[leftover] [ literal[string] ]
identifier[item] = identifier[leftover] [ literal[string] ]
keyword[if] identifier[what] == literal[string] :
identifier[log] . identifier[info] ( literal[string] , identifier[what] , identifier[item] . identifier[name] ())
keyword[try] :
identifier[item] . identifier[destroy] ()
identifier[log] . identifier[debug] ( literal[string] , identifier[what] , identifier[item] . identifier[name] ())
keyword[except] identifier[libvirtError] :
keyword[pass]
keyword[try] :
identifier[item] . identifier[undefineFlags] ( identifier[libvirt] . identifier[VIR_DOMAIN_UNDEFINE_MANAGED_SAVE] +
identifier[libvirt] . identifier[VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA] +
identifier[libvirt] . identifier[VIR_DOMAIN_UNDEFINE_NVRAM] )
identifier[log] . identifier[debug] ( literal[string] , identifier[what] , identifier[item] . identifier[name] ())
keyword[except] identifier[libvirtError] :
keyword[pass]
keyword[if] identifier[what] == literal[string] :
keyword[try] :
identifier[item] . identifier[delete] ()
identifier[log] . identifier[debug] ( literal[string] , identifier[what] , identifier[item] . identifier[name] ())
keyword[except] identifier[libvirtError] :
keyword[pass] | def do_cleanup(cleanup):
"""
Clean up clone domain leftovers as much as possible.
Extra robust clean up in order to deal with some small changes in libvirt
behavior over time. Passed in volumes and domains are deleted, any errors
are ignored. Used when cloning/provisioning a domain fails.
:param cleanup: list containing dictonaries with two keys: 'what' and 'item'.
If 'what' is domain the 'item' is a libvirt domain object.
If 'what' is volume then the item is a libvirt volume object.
Returns:
none
.. versionadded: 2017.7.3
"""
log.info('Cleaning up after exception')
for leftover in cleanup:
what = leftover['what']
item = leftover['item']
if what == 'domain':
log.info('Cleaning up %s %s', what, item.name())
try:
item.destroy()
log.debug('%s %s forced off', what, item.name()) # depends on [control=['try'], data=[]]
except libvirtError:
pass # depends on [control=['except'], data=[]]
try:
item.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE + libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA + libvirt.VIR_DOMAIN_UNDEFINE_NVRAM)
log.debug('%s %s undefined', what, item.name()) # depends on [control=['try'], data=[]]
except libvirtError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['what']]
if what == 'volume':
try:
item.delete()
log.debug('%s %s cleaned up', what, item.name()) # depends on [control=['try'], data=[]]
except libvirtError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['what']] # depends on [control=['for'], data=['leftover']] |
def _pfp__restore_snapshot(self, recurse=True):
"""Restore the snapshotted value without triggering any events
"""
super(Struct, self)._pfp__restore_snapshot(recurse=recurse)
if recurse:
for child in self._pfp__children:
child._pfp__restore_snapshot(recurse=recurse) | def function[_pfp__restore_snapshot, parameter[self, recurse]]:
constant[Restore the snapshotted value without triggering any events
]
call[call[name[super], parameter[name[Struct], name[self]]]._pfp__restore_snapshot, parameter[]]
if name[recurse] begin[:]
for taget[name[child]] in starred[name[self]._pfp__children] begin[:]
call[name[child]._pfp__restore_snapshot, parameter[]] | keyword[def] identifier[_pfp__restore_snapshot] ( identifier[self] , identifier[recurse] = keyword[True] ):
literal[string]
identifier[super] ( identifier[Struct] , identifier[self] ). identifier[_pfp__restore_snapshot] ( identifier[recurse] = identifier[recurse] )
keyword[if] identifier[recurse] :
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[_pfp__children] :
identifier[child] . identifier[_pfp__restore_snapshot] ( identifier[recurse] = identifier[recurse] ) | def _pfp__restore_snapshot(self, recurse=True):
"""Restore the snapshotted value without triggering any events
"""
super(Struct, self)._pfp__restore_snapshot(recurse=recurse)
if recurse:
for child in self._pfp__children:
child._pfp__restore_snapshot(recurse=recurse) # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]] |
def close(self):
"""
Disconnect from the controller.
"""
logger.info("Closing connection to %s:%s", self._host, self._port)
self._ioloop_future.cancel()
try:
yield from self._ioloop_future
except asyncio.CancelledError:
pass | def function[close, parameter[self]]:
constant[
Disconnect from the controller.
]
call[name[logger].info, parameter[constant[Closing connection to %s:%s], name[self]._host, name[self]._port]]
call[name[self]._ioloop_future.cancel, parameter[]]
<ast.Try object at 0x7da1b0a63cd0> | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[_host] , identifier[self] . identifier[_port] )
identifier[self] . identifier[_ioloop_future] . identifier[cancel] ()
keyword[try] :
keyword[yield] keyword[from] identifier[self] . identifier[_ioloop_future]
keyword[except] identifier[asyncio] . identifier[CancelledError] :
keyword[pass] | def close(self):
"""
Disconnect from the controller.
"""
logger.info('Closing connection to %s:%s', self._host, self._port)
self._ioloop_future.cancel()
try:
yield from self._ioloop_future # depends on [control=['try'], data=[]]
except asyncio.CancelledError:
pass # depends on [control=['except'], data=[]] |
def readDivPressure(fileName):
"""Reads in diversifying pressures from some file.
Scale diversifying pressure values so absolute value of the max value is 1,
unless all values are zero.
Args:
`fileName` (string or readable file-like object)
File holding diversifying pressure values. Can be
comma-, space-, or tab-separated file. The first column
is the site (consecutively numbered, sites starting
with one) and the second column is the diversifying pressure values.
Returns:
`divPressure` (dict keyed by ints)
`divPressure[r][v]` is the diversifying pressure value of site `r`.
"""
try:
df = pandas.read_csv(fileName, sep=None, engine='python')
pandasformat = True
except ValueError:
pandasformat = False
df.columns = ['site', 'divPressureValue']
scaleFactor = max(df["divPressureValue"].abs())
if scaleFactor > 0:
df["divPressureValue"] = [x / scaleFactor for x in df["divPressureValue"]]
assert len(df['site'].tolist()) == len(set(df['site'].tolist())),"There is at least one non-unique site in {0}".format(fileName)
assert max(df["divPressureValue"].abs()) <= 1, "The scaling produced a diversifying pressure value with an absolute value greater than one."
sites = df['site'].tolist()
divPressure = {}
for r in sites:
divPressure[r] = df[df['site'] == r]["divPressureValue"].tolist()[0]
return divPressure | def function[readDivPressure, parameter[fileName]]:
constant[Reads in diversifying pressures from some file.
Scale diversifying pressure values so absolute value of the max value is 1,
unless all values are zero.
Args:
`fileName` (string or readable file-like object)
File holding diversifying pressure values. Can be
comma-, space-, or tab-separated file. The first column
is the site (consecutively numbered, sites starting
with one) and the second column is the diversifying pressure values.
Returns:
`divPressure` (dict keyed by ints)
`divPressure[r][v]` is the diversifying pressure value of site `r`.
]
<ast.Try object at 0x7da20e9b04c0>
name[df].columns assign[=] list[[<ast.Constant object at 0x7da20e9b1480>, <ast.Constant object at 0x7da20e9b1ea0>]]
variable[scaleFactor] assign[=] call[name[max], parameter[call[call[name[df]][constant[divPressureValue]].abs, parameter[]]]]
if compare[name[scaleFactor] greater[>] constant[0]] begin[:]
call[name[df]][constant[divPressureValue]] assign[=] <ast.ListComp object at 0x7da20e9b1e10>
assert[compare[call[name[len], parameter[call[call[name[df]][constant[site]].tolist, parameter[]]]] equal[==] call[name[len], parameter[call[name[set], parameter[call[call[name[df]][constant[site]].tolist, parameter[]]]]]]]]
assert[compare[call[name[max], parameter[call[call[name[df]][constant[divPressureValue]].abs, parameter[]]]] less_or_equal[<=] constant[1]]]
variable[sites] assign[=] call[call[name[df]][constant[site]].tolist, parameter[]]
variable[divPressure] assign[=] dictionary[[], []]
for taget[name[r]] in starred[name[sites]] begin[:]
call[name[divPressure]][name[r]] assign[=] call[call[call[call[name[df]][compare[call[name[df]][constant[site]] equal[==] name[r]]]][constant[divPressureValue]].tolist, parameter[]]][constant[0]]
return[name[divPressure]] | keyword[def] identifier[readDivPressure] ( identifier[fileName] ):
literal[string]
keyword[try] :
identifier[df] = identifier[pandas] . identifier[read_csv] ( identifier[fileName] , identifier[sep] = keyword[None] , identifier[engine] = literal[string] )
identifier[pandasformat] = keyword[True]
keyword[except] identifier[ValueError] :
identifier[pandasformat] = keyword[False]
identifier[df] . identifier[columns] =[ literal[string] , literal[string] ]
identifier[scaleFactor] = identifier[max] ( identifier[df] [ literal[string] ]. identifier[abs] ())
keyword[if] identifier[scaleFactor] > literal[int] :
identifier[df] [ literal[string] ]=[ identifier[x] / identifier[scaleFactor] keyword[for] identifier[x] keyword[in] identifier[df] [ literal[string] ]]
keyword[assert] identifier[len] ( identifier[df] [ literal[string] ]. identifier[tolist] ())== identifier[len] ( identifier[set] ( identifier[df] [ literal[string] ]. identifier[tolist] ())), literal[string] . identifier[format] ( identifier[fileName] )
keyword[assert] identifier[max] ( identifier[df] [ literal[string] ]. identifier[abs] ())<= literal[int] , literal[string]
identifier[sites] = identifier[df] [ literal[string] ]. identifier[tolist] ()
identifier[divPressure] ={}
keyword[for] identifier[r] keyword[in] identifier[sites] :
identifier[divPressure] [ identifier[r] ]= identifier[df] [ identifier[df] [ literal[string] ]== identifier[r] ][ literal[string] ]. identifier[tolist] ()[ literal[int] ]
keyword[return] identifier[divPressure] | def readDivPressure(fileName):
"""Reads in diversifying pressures from some file.
Scale diversifying pressure values so absolute value of the max value is 1,
unless all values are zero.
Args:
`fileName` (string or readable file-like object)
File holding diversifying pressure values. Can be
comma-, space-, or tab-separated file. The first column
is the site (consecutively numbered, sites starting
with one) and the second column is the diversifying pressure values.
Returns:
`divPressure` (dict keyed by ints)
`divPressure[r][v]` is the diversifying pressure value of site `r`.
"""
try:
df = pandas.read_csv(fileName, sep=None, engine='python')
pandasformat = True # depends on [control=['try'], data=[]]
except ValueError:
pandasformat = False # depends on [control=['except'], data=[]]
df.columns = ['site', 'divPressureValue']
scaleFactor = max(df['divPressureValue'].abs())
if scaleFactor > 0:
df['divPressureValue'] = [x / scaleFactor for x in df['divPressureValue']] # depends on [control=['if'], data=['scaleFactor']]
assert len(df['site'].tolist()) == len(set(df['site'].tolist())), 'There is at least one non-unique site in {0}'.format(fileName)
assert max(df['divPressureValue'].abs()) <= 1, 'The scaling produced a diversifying pressure value with an absolute value greater than one.'
sites = df['site'].tolist()
divPressure = {}
for r in sites:
divPressure[r] = df[df['site'] == r]['divPressureValue'].tolist()[0] # depends on [control=['for'], data=['r']]
return divPressure |
def get_related_targetApplication(vR, app_id, app_ver):
"""Return the first matching target application in this version range.
Returns None if there are no target applications or no matching ones."""
targetApplication = vR.get('targetApplication')
if not targetApplication:
return None
for tA in targetApplication:
guid = tA.get('guid')
if not guid or guid == app_id:
if not app_ver:
return tA
# We purposefully use maxVersion only, so that the blocklist contains items
# whose minimum version is ahead of the version we get passed. This means
# the blocklist we serve is "future-proof" for app upgrades.
if between(version_int(app_ver), '0', tA.get('maxVersion', '*')):
return tA
return None | def function[get_related_targetApplication, parameter[vR, app_id, app_ver]]:
constant[Return the first matching target application in this version range.
Returns None if there are no target applications or no matching ones.]
variable[targetApplication] assign[=] call[name[vR].get, parameter[constant[targetApplication]]]
if <ast.UnaryOp object at 0x7da204566680> begin[:]
return[constant[None]]
for taget[name[tA]] in starred[name[targetApplication]] begin[:]
variable[guid] assign[=] call[name[tA].get, parameter[constant[guid]]]
if <ast.BoolOp object at 0x7da204566cb0> begin[:]
if <ast.UnaryOp object at 0x7da204565870> begin[:]
return[name[tA]]
if call[name[between], parameter[call[name[version_int], parameter[name[app_ver]]], constant[0], call[name[tA].get, parameter[constant[maxVersion], constant[*]]]]] begin[:]
return[name[tA]]
return[constant[None]] | keyword[def] identifier[get_related_targetApplication] ( identifier[vR] , identifier[app_id] , identifier[app_ver] ):
literal[string]
identifier[targetApplication] = identifier[vR] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[targetApplication] :
keyword[return] keyword[None]
keyword[for] identifier[tA] keyword[in] identifier[targetApplication] :
identifier[guid] = identifier[tA] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[guid] keyword[or] identifier[guid] == identifier[app_id] :
keyword[if] keyword[not] identifier[app_ver] :
keyword[return] identifier[tA]
keyword[if] identifier[between] ( identifier[version_int] ( identifier[app_ver] ), literal[string] , identifier[tA] . identifier[get] ( literal[string] , literal[string] )):
keyword[return] identifier[tA]
keyword[return] keyword[None] | def get_related_targetApplication(vR, app_id, app_ver):
"""Return the first matching target application in this version range.
Returns None if there are no target applications or no matching ones."""
targetApplication = vR.get('targetApplication')
if not targetApplication:
return None # depends on [control=['if'], data=[]]
for tA in targetApplication:
guid = tA.get('guid')
if not guid or guid == app_id:
if not app_ver:
return tA # depends on [control=['if'], data=[]]
# We purposefully use maxVersion only, so that the blocklist contains items
# whose minimum version is ahead of the version we get passed. This means
# the blocklist we serve is "future-proof" for app upgrades.
if between(version_int(app_ver), '0', tA.get('maxVersion', '*')):
return tA # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tA']]
return None |
def templated(template=None):
"""Template decorator.
Ref: http://flask.pocoo.org/docs/patterns/viewdecorators/
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
template_name = template
if template_name is None:
template_name = request.endpoint.replace('.', '/') + '.html'
context = f(*args, **kwargs)
if context is None:
context = {}
elif not isinstance(context, dict):
return context
return render_template(template_name, **context)
return decorated_function
return decorator | def function[templated, parameter[template]]:
constant[Template decorator.
Ref: http://flask.pocoo.org/docs/patterns/viewdecorators/
]
def function[decorator, parameter[f]]:
def function[decorated_function, parameter[]]:
variable[template_name] assign[=] name[template]
if compare[name[template_name] is constant[None]] begin[:]
variable[template_name] assign[=] binary_operation[call[name[request].endpoint.replace, parameter[constant[.], constant[/]]] + constant[.html]]
variable[context] assign[=] call[name[f], parameter[<ast.Starred object at 0x7da1b26af130>]]
if compare[name[context] is constant[None]] begin[:]
variable[context] assign[=] dictionary[[], []]
return[call[name[render_template], parameter[name[template_name]]]]
return[name[decorated_function]]
return[name[decorator]] | keyword[def] identifier[templated] ( identifier[template] = keyword[None] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[f] ):
@ identifier[wraps] ( identifier[f] )
keyword[def] identifier[decorated_function] (* identifier[args] ,** identifier[kwargs] ):
identifier[template_name] = identifier[template]
keyword[if] identifier[template_name] keyword[is] keyword[None] :
identifier[template_name] = identifier[request] . identifier[endpoint] . identifier[replace] ( literal[string] , literal[string] )+ literal[string]
identifier[context] = identifier[f] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[context] keyword[is] keyword[None] :
identifier[context] ={}
keyword[elif] keyword[not] identifier[isinstance] ( identifier[context] , identifier[dict] ):
keyword[return] identifier[context]
keyword[return] identifier[render_template] ( identifier[template_name] ,** identifier[context] )
keyword[return] identifier[decorated_function]
keyword[return] identifier[decorator] | def templated(template=None):
"""Template decorator.
Ref: http://flask.pocoo.org/docs/patterns/viewdecorators/
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
template_name = template
if template_name is None:
template_name = request.endpoint.replace('.', '/') + '.html' # depends on [control=['if'], data=['template_name']]
context = f(*args, **kwargs)
if context is None:
context = {} # depends on [control=['if'], data=['context']]
elif not isinstance(context, dict):
return context # depends on [control=['if'], data=[]]
return render_template(template_name, **context)
return decorated_function
return decorator |
def read(self, mode='color', alpha=True):
""" Return array of pixel values in an attached buffer
Parameters
----------
mode : str
The buffer type to read. May be 'color', 'depth', or 'stencil'.
alpha : bool
If True, returns RGBA array. Otherwise, returns RGB.
Returns
-------
buffer : array
3D array of pixels in np.uint8 format.
The array shape is (h, w, 3) or (h, w, 4), with the top-left
corner of the framebuffer at index [0, 0] in the returned array.
"""
_check_valid('mode', mode, ['color', 'depth', 'stencil'])
buffer = getattr(self, mode+'_buffer')
h, w = buffer.shape[:2]
# todo: this is ostensibly required, but not available in gloo.gl
#gl.glReadBuffer(buffer._target)
return read_pixels((0, 0, w, h), alpha=alpha) | def function[read, parameter[self, mode, alpha]]:
constant[ Return array of pixel values in an attached buffer
Parameters
----------
mode : str
The buffer type to read. May be 'color', 'depth', or 'stencil'.
alpha : bool
If True, returns RGBA array. Otherwise, returns RGB.
Returns
-------
buffer : array
3D array of pixels in np.uint8 format.
The array shape is (h, w, 3) or (h, w, 4), with the top-left
corner of the framebuffer at index [0, 0] in the returned array.
]
call[name[_check_valid], parameter[constant[mode], name[mode], list[[<ast.Constant object at 0x7da1b0f93fd0>, <ast.Constant object at 0x7da1b0f93010>, <ast.Constant object at 0x7da1b0f916c0>]]]]
variable[buffer] assign[=] call[name[getattr], parameter[name[self], binary_operation[name[mode] + constant[_buffer]]]]
<ast.Tuple object at 0x7da1b0f92920> assign[=] call[name[buffer].shape][<ast.Slice object at 0x7da1b0f918a0>]
return[call[name[read_pixels], parameter[tuple[[<ast.Constant object at 0x7da1b0f91180>, <ast.Constant object at 0x7da1b0f92080>, <ast.Name object at 0x7da1b0f91420>, <ast.Name object at 0x7da1b0f93d60>]]]]] | keyword[def] identifier[read] ( identifier[self] , identifier[mode] = literal[string] , identifier[alpha] = keyword[True] ):
literal[string]
identifier[_check_valid] ( literal[string] , identifier[mode] ,[ literal[string] , literal[string] , literal[string] ])
identifier[buffer] = identifier[getattr] ( identifier[self] , identifier[mode] + literal[string] )
identifier[h] , identifier[w] = identifier[buffer] . identifier[shape] [: literal[int] ]
keyword[return] identifier[read_pixels] (( literal[int] , literal[int] , identifier[w] , identifier[h] ), identifier[alpha] = identifier[alpha] ) | def read(self, mode='color', alpha=True):
""" Return array of pixel values in an attached buffer
Parameters
----------
mode : str
The buffer type to read. May be 'color', 'depth', or 'stencil'.
alpha : bool
If True, returns RGBA array. Otherwise, returns RGB.
Returns
-------
buffer : array
3D array of pixels in np.uint8 format.
The array shape is (h, w, 3) or (h, w, 4), with the top-left
corner of the framebuffer at index [0, 0] in the returned array.
"""
_check_valid('mode', mode, ['color', 'depth', 'stencil'])
buffer = getattr(self, mode + '_buffer')
(h, w) = buffer.shape[:2]
# todo: this is ostensibly required, but not available in gloo.gl
#gl.glReadBuffer(buffer._target)
return read_pixels((0, 0, w, h), alpha=alpha) |
def crop(data, crinfo):
"""
Crop the data.
crop(data, crinfo)
:param crinfo: min and max for each axis - [[minX, maxX], [minY, maxY], [minZ, maxZ]]
"""
crinfo = fix_crinfo(crinfo)
return data[
__int_or_none(crinfo[0][0]) : __int_or_none(crinfo[0][1]),
__int_or_none(crinfo[1][0]) : __int_or_none(crinfo[1][1]),
__int_or_none(crinfo[2][0]) : __int_or_none(crinfo[2][1]),
] | def function[crop, parameter[data, crinfo]]:
constant[
Crop the data.
crop(data, crinfo)
:param crinfo: min and max for each axis - [[minX, maxX], [minY, maxY], [minZ, maxZ]]
]
variable[crinfo] assign[=] call[name[fix_crinfo], parameter[name[crinfo]]]
return[call[name[data]][tuple[[<ast.Slice object at 0x7da18eb575b0>, <ast.Slice object at 0x7da18eb54700>, <ast.Slice object at 0x7da18eb57ca0>]]]] | keyword[def] identifier[crop] ( identifier[data] , identifier[crinfo] ):
literal[string]
identifier[crinfo] = identifier[fix_crinfo] ( identifier[crinfo] )
keyword[return] identifier[data] [
identifier[__int_or_none] ( identifier[crinfo] [ literal[int] ][ literal[int] ]): identifier[__int_or_none] ( identifier[crinfo] [ literal[int] ][ literal[int] ]),
identifier[__int_or_none] ( identifier[crinfo] [ literal[int] ][ literal[int] ]): identifier[__int_or_none] ( identifier[crinfo] [ literal[int] ][ literal[int] ]),
identifier[__int_or_none] ( identifier[crinfo] [ literal[int] ][ literal[int] ]): identifier[__int_or_none] ( identifier[crinfo] [ literal[int] ][ literal[int] ]),
] | def crop(data, crinfo):
"""
Crop the data.
crop(data, crinfo)
:param crinfo: min and max for each axis - [[minX, maxX], [minY, maxY], [minZ, maxZ]]
"""
crinfo = fix_crinfo(crinfo)
return data[__int_or_none(crinfo[0][0]):__int_or_none(crinfo[0][1]), __int_or_none(crinfo[1][0]):__int_or_none(crinfo[1][1]), __int_or_none(crinfo[2][0]):__int_or_none(crinfo[2][1])] |
def transaction(self, _filter=None, default=None, yield_resource=False):
"""
transaction(_filter=None, default=None)
Claims a resource from the pool for use in a thread-safe,
reentrant manner (as part of a with statement). Resources are
created as needed when all members of the pool are claimed or
the pool is empty.
:param _filter: a filter that can be used to select a member
of the pool
:type _filter: callable
:param default: a value that will be used instead of calling
:meth:`create_resource` if a new resource needs to be created
:param yield_resource: set to True to yield the Resource object
itself
:type yield_resource: boolean
"""
resource = self.acquire(_filter=_filter, default=default)
try:
if yield_resource:
yield resource
else:
yield resource.object
if resource.errored:
self.delete_resource(resource)
except BadResource:
self.delete_resource(resource)
raise
finally:
self.release(resource) | def function[transaction, parameter[self, _filter, default, yield_resource]]:
constant[
transaction(_filter=None, default=None)
Claims a resource from the pool for use in a thread-safe,
reentrant manner (as part of a with statement). Resources are
created as needed when all members of the pool are claimed or
the pool is empty.
:param _filter: a filter that can be used to select a member
of the pool
:type _filter: callable
:param default: a value that will be used instead of calling
:meth:`create_resource` if a new resource needs to be created
:param yield_resource: set to True to yield the Resource object
itself
:type yield_resource: boolean
]
variable[resource] assign[=] call[name[self].acquire, parameter[]]
<ast.Try object at 0x7da20c7c8250> | keyword[def] identifier[transaction] ( identifier[self] , identifier[_filter] = keyword[None] , identifier[default] = keyword[None] , identifier[yield_resource] = keyword[False] ):
literal[string]
identifier[resource] = identifier[self] . identifier[acquire] ( identifier[_filter] = identifier[_filter] , identifier[default] = identifier[default] )
keyword[try] :
keyword[if] identifier[yield_resource] :
keyword[yield] identifier[resource]
keyword[else] :
keyword[yield] identifier[resource] . identifier[object]
keyword[if] identifier[resource] . identifier[errored] :
identifier[self] . identifier[delete_resource] ( identifier[resource] )
keyword[except] identifier[BadResource] :
identifier[self] . identifier[delete_resource] ( identifier[resource] )
keyword[raise]
keyword[finally] :
identifier[self] . identifier[release] ( identifier[resource] ) | def transaction(self, _filter=None, default=None, yield_resource=False):
"""
transaction(_filter=None, default=None)
Claims a resource from the pool for use in a thread-safe,
reentrant manner (as part of a with statement). Resources are
created as needed when all members of the pool are claimed or
the pool is empty.
:param _filter: a filter that can be used to select a member
of the pool
:type _filter: callable
:param default: a value that will be used instead of calling
:meth:`create_resource` if a new resource needs to be created
:param yield_resource: set to True to yield the Resource object
itself
:type yield_resource: boolean
"""
resource = self.acquire(_filter=_filter, default=default)
try:
if yield_resource:
yield resource # depends on [control=['if'], data=[]]
else:
yield resource.object
if resource.errored:
self.delete_resource(resource) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except BadResource:
self.delete_resource(resource)
raise # depends on [control=['except'], data=[]]
finally:
self.release(resource) |
def set_cache_max(self, cache_name, maxsize, **kwargs):
"""
Sets the maxsize attribute of the named cache
"""
cache = self._get_cache(cache_name)
cache.set_maxsize(maxsize, **kwargs) | def function[set_cache_max, parameter[self, cache_name, maxsize]]:
constant[
Sets the maxsize attribute of the named cache
]
variable[cache] assign[=] call[name[self]._get_cache, parameter[name[cache_name]]]
call[name[cache].set_maxsize, parameter[name[maxsize]]] | keyword[def] identifier[set_cache_max] ( identifier[self] , identifier[cache_name] , identifier[maxsize] ,** identifier[kwargs] ):
literal[string]
identifier[cache] = identifier[self] . identifier[_get_cache] ( identifier[cache_name] )
identifier[cache] . identifier[set_maxsize] ( identifier[maxsize] ,** identifier[kwargs] ) | def set_cache_max(self, cache_name, maxsize, **kwargs):
"""
Sets the maxsize attribute of the named cache
"""
cache = self._get_cache(cache_name)
cache.set_maxsize(maxsize, **kwargs) |
def get_orm_columns(cls: Type) -> List[Column]:
"""
Gets :class:`Column` objects from an SQLAlchemy ORM class.
Does not provide their attribute names.
"""
mapper = inspect(cls) # type: Mapper
# ... returns InstanceState if called with an ORM object
# http://docs.sqlalchemy.org/en/latest/orm/session_state_management.html#session-object-states # noqa
# ... returns Mapper if called with an ORM class
# http://docs.sqlalchemy.org/en/latest/orm/mapping_api.html#sqlalchemy.orm.mapper.Mapper # noqa
colmap = mapper.columns # type: OrderedProperties
return colmap.values() | def function[get_orm_columns, parameter[cls]]:
constant[
Gets :class:`Column` objects from an SQLAlchemy ORM class.
Does not provide their attribute names.
]
variable[mapper] assign[=] call[name[inspect], parameter[name[cls]]]
variable[colmap] assign[=] name[mapper].columns
return[call[name[colmap].values, parameter[]]] | keyword[def] identifier[get_orm_columns] ( identifier[cls] : identifier[Type] )-> identifier[List] [ identifier[Column] ]:
literal[string]
identifier[mapper] = identifier[inspect] ( identifier[cls] )
identifier[colmap] = identifier[mapper] . identifier[columns]
keyword[return] identifier[colmap] . identifier[values] () | def get_orm_columns(cls: Type) -> List[Column]:
"""
Gets :class:`Column` objects from an SQLAlchemy ORM class.
Does not provide their attribute names.
"""
mapper = inspect(cls) # type: Mapper
# ... returns InstanceState if called with an ORM object
# http://docs.sqlalchemy.org/en/latest/orm/session_state_management.html#session-object-states # noqa
# ... returns Mapper if called with an ORM class
# http://docs.sqlalchemy.org/en/latest/orm/mapping_api.html#sqlalchemy.orm.mapper.Mapper # noqa
colmap = mapper.columns # type: OrderedProperties
return colmap.values() |
def show_progress(self, n, total_runs):
"""Displays a progressbar"""
if self.report_progress:
percentage, logger_name, log_level = self.report_progress
if logger_name == 'print':
logger = 'print'
else:
logger = logging.getLogger(logger_name)
if n == -1:
# Compute the number of digits and avoid log10(0)
digits = int(math.log10(total_runs + 0.1)) + 1
self._format_string = 'PROGRESS: Finished %' + '%d' % digits + 'd/%d runs '
fmt_string = self._format_string % (n + 1, total_runs) + '%s'
reprint = log_level == 0
progressbar(n, total_runs, percentage_step=percentage,
logger=logger, log_level=log_level,
fmt_string=fmt_string, reprint=reprint) | def function[show_progress, parameter[self, n, total_runs]]:
constant[Displays a progressbar]
if name[self].report_progress begin[:]
<ast.Tuple object at 0x7da18f00cd30> assign[=] name[self].report_progress
if compare[name[logger_name] equal[==] constant[print]] begin[:]
variable[logger] assign[=] constant[print]
if compare[name[n] equal[==] <ast.UnaryOp object at 0x7da18f00d060>] begin[:]
variable[digits] assign[=] binary_operation[call[name[int], parameter[call[name[math].log10, parameter[binary_operation[name[total_runs] + constant[0.1]]]]]] + constant[1]]
name[self]._format_string assign[=] binary_operation[binary_operation[constant[PROGRESS: Finished %] + binary_operation[constant[%d] <ast.Mod object at 0x7da2590d6920> name[digits]]] + constant[d/%d runs ]]
variable[fmt_string] assign[=] binary_operation[binary_operation[name[self]._format_string <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da18f722cb0>, <ast.Name object at 0x7da18f721990>]]] + constant[%s]]
variable[reprint] assign[=] compare[name[log_level] equal[==] constant[0]]
call[name[progressbar], parameter[name[n], name[total_runs]]] | keyword[def] identifier[show_progress] ( identifier[self] , identifier[n] , identifier[total_runs] ):
literal[string]
keyword[if] identifier[self] . identifier[report_progress] :
identifier[percentage] , identifier[logger_name] , identifier[log_level] = identifier[self] . identifier[report_progress]
keyword[if] identifier[logger_name] == literal[string] :
identifier[logger] = literal[string]
keyword[else] :
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[logger_name] )
keyword[if] identifier[n] ==- literal[int] :
identifier[digits] = identifier[int] ( identifier[math] . identifier[log10] ( identifier[total_runs] + literal[int] ))+ literal[int]
identifier[self] . identifier[_format_string] = literal[string] + literal[string] % identifier[digits] + literal[string]
identifier[fmt_string] = identifier[self] . identifier[_format_string] %( identifier[n] + literal[int] , identifier[total_runs] )+ literal[string]
identifier[reprint] = identifier[log_level] == literal[int]
identifier[progressbar] ( identifier[n] , identifier[total_runs] , identifier[percentage_step] = identifier[percentage] ,
identifier[logger] = identifier[logger] , identifier[log_level] = identifier[log_level] ,
identifier[fmt_string] = identifier[fmt_string] , identifier[reprint] = identifier[reprint] ) | def show_progress(self, n, total_runs):
"""Displays a progressbar"""
if self.report_progress:
(percentage, logger_name, log_level) = self.report_progress
if logger_name == 'print':
logger = 'print' # depends on [control=['if'], data=[]]
else:
logger = logging.getLogger(logger_name)
if n == -1:
# Compute the number of digits and avoid log10(0)
digits = int(math.log10(total_runs + 0.1)) + 1
self._format_string = 'PROGRESS: Finished %' + '%d' % digits + 'd/%d runs ' # depends on [control=['if'], data=[]]
fmt_string = self._format_string % (n + 1, total_runs) + '%s'
reprint = log_level == 0
progressbar(n, total_runs, percentage_step=percentage, logger=logger, log_level=log_level, fmt_string=fmt_string, reprint=reprint) # depends on [control=['if'], data=[]] |
def postcmd(self, stop: bool, line: str) -> bool:
"""Hook method executed just after a command dispatch is finished.
:param stop: if True, the command has indicated the application should exit
:param line: the command line text for this command
:return: if this is True, the application will exit after this command and the postloop() will run
"""
"""Override this so prompt always displays cwd."""
self._set_prompt()
return stop | def function[postcmd, parameter[self, stop, line]]:
constant[Hook method executed just after a command dispatch is finished.
:param stop: if True, the command has indicated the application should exit
:param line: the command line text for this command
:return: if this is True, the application will exit after this command and the postloop() will run
]
constant[Override this so prompt always displays cwd.]
call[name[self]._set_prompt, parameter[]]
return[name[stop]] | keyword[def] identifier[postcmd] ( identifier[self] , identifier[stop] : identifier[bool] , identifier[line] : identifier[str] )-> identifier[bool] :
literal[string]
literal[string]
identifier[self] . identifier[_set_prompt] ()
keyword[return] identifier[stop] | def postcmd(self, stop: bool, line: str) -> bool:
"""Hook method executed just after a command dispatch is finished.
:param stop: if True, the command has indicated the application should exit
:param line: the command line text for this command
:return: if this is True, the application will exit after this command and the postloop() will run
"""
'Override this so prompt always displays cwd.'
self._set_prompt()
return stop |
def initialize(self, initialization_order=None):
"""
This function tries to initialize the stateful objects.
In the case where an initialization function for `Stock A` depends on
the value of `Stock B`, if we try to initialize `Stock A` before `Stock B`
then we will get an error, as the value will not yet exist.
In this case, just skip initializing `Stock A` for now, and
go on to the other state initializations. Then come back to it and try again.
"""
# Initialize time
if self.time is None:
if self.time_initialization is None:
self.time = Time()
else:
self.time = self.time_initialization()
# if self.time is None:
# self.time = time
# self.components.time = self.time
# self.components.functions.time = self.time # rewrite functions so we don't need this
self.components._init_outer_references({
'scope': self,
'time': self.time
})
remaining = set(self._stateful_elements)
while remaining:
progress = set()
for element in remaining:
try:
element.initialize()
progress.add(element)
except (KeyError, TypeError, AttributeError):
pass
if progress:
remaining.difference_update(progress)
else:
raise KeyError('Unresolvable Reference: Probable circular initialization' +
'\n'.join([repr(e) for e in remaining])) | def function[initialize, parameter[self, initialization_order]]:
constant[
This function tries to initialize the stateful objects.
In the case where an initialization function for `Stock A` depends on
the value of `Stock B`, if we try to initialize `Stock A` before `Stock B`
then we will get an error, as the value will not yet exist.
In this case, just skip initializing `Stock A` for now, and
go on to the other state initializations. Then come back to it and try again.
]
if compare[name[self].time is constant[None]] begin[:]
if compare[name[self].time_initialization is constant[None]] begin[:]
name[self].time assign[=] call[name[Time], parameter[]]
call[name[self].components._init_outer_references, parameter[dictionary[[<ast.Constant object at 0x7da18bccbdc0>, <ast.Constant object at 0x7da18bcc9a80>], [<ast.Name object at 0x7da18bccb6d0>, <ast.Attribute object at 0x7da18bcca860>]]]]
variable[remaining] assign[=] call[name[set], parameter[name[self]._stateful_elements]]
while name[remaining] begin[:]
variable[progress] assign[=] call[name[set], parameter[]]
for taget[name[element]] in starred[name[remaining]] begin[:]
<ast.Try object at 0x7da18bccad70>
if name[progress] begin[:]
call[name[remaining].difference_update, parameter[name[progress]]] | keyword[def] identifier[initialize] ( identifier[self] , identifier[initialization_order] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[time] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[time_initialization] keyword[is] keyword[None] :
identifier[self] . identifier[time] = identifier[Time] ()
keyword[else] :
identifier[self] . identifier[time] = identifier[self] . identifier[time_initialization] ()
identifier[self] . identifier[components] . identifier[_init_outer_references] ({
literal[string] : identifier[self] ,
literal[string] : identifier[self] . identifier[time]
})
identifier[remaining] = identifier[set] ( identifier[self] . identifier[_stateful_elements] )
keyword[while] identifier[remaining] :
identifier[progress] = identifier[set] ()
keyword[for] identifier[element] keyword[in] identifier[remaining] :
keyword[try] :
identifier[element] . identifier[initialize] ()
identifier[progress] . identifier[add] ( identifier[element] )
keyword[except] ( identifier[KeyError] , identifier[TypeError] , identifier[AttributeError] ):
keyword[pass]
keyword[if] identifier[progress] :
identifier[remaining] . identifier[difference_update] ( identifier[progress] )
keyword[else] :
keyword[raise] identifier[KeyError] ( literal[string] +
literal[string] . identifier[join] ([ identifier[repr] ( identifier[e] ) keyword[for] identifier[e] keyword[in] identifier[remaining] ])) | def initialize(self, initialization_order=None):
"""
This function tries to initialize the stateful objects.
In the case where an initialization function for `Stock A` depends on
the value of `Stock B`, if we try to initialize `Stock A` before `Stock B`
then we will get an error, as the value will not yet exist.
In this case, just skip initializing `Stock A` for now, and
go on to the other state initializations. Then come back to it and try again.
"""
# Initialize time
if self.time is None:
if self.time_initialization is None:
self.time = Time() # depends on [control=['if'], data=[]]
else:
self.time = self.time_initialization() # depends on [control=['if'], data=[]]
# if self.time is None:
# self.time = time
# self.components.time = self.time
# self.components.functions.time = self.time # rewrite functions so we don't need this
self.components._init_outer_references({'scope': self, 'time': self.time})
remaining = set(self._stateful_elements)
while remaining:
progress = set()
for element in remaining:
try:
element.initialize()
progress.add(element) # depends on [control=['try'], data=[]]
except (KeyError, TypeError, AttributeError):
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['element']]
if progress:
remaining.difference_update(progress) # depends on [control=['if'], data=[]]
else:
raise KeyError('Unresolvable Reference: Probable circular initialization' + '\n'.join([repr(e) for e in remaining])) # depends on [control=['while'], data=[]] |
def do(cmdline=None, runas=None):
'''
Execute a python command with pyenv's shims from the user or the system.
CLI Example:
.. code-block:: bash
salt '*' pyenv.do 'gem list bundler'
salt '*' pyenv.do 'gem list bundler' deploy
'''
path = _pyenv_path(runas)
cmd_split = cmdline.split()
quoted_line = ''
for cmd in cmd_split:
quoted_line = quoted_line + ' ' + _cmd_quote(cmd)
result = __salt__['cmd.run_all'](
'env PATH={0}/shims:$PATH {1}'.format(_cmd_quote(path), quoted_line),
runas=runas,
python_shell=True
)
if result['retcode'] == 0:
rehash(runas=runas)
return result['stdout']
else:
return False | def function[do, parameter[cmdline, runas]]:
constant[
Execute a python command with pyenv's shims from the user or the system.
CLI Example:
.. code-block:: bash
salt '*' pyenv.do 'gem list bundler'
salt '*' pyenv.do 'gem list bundler' deploy
]
variable[path] assign[=] call[name[_pyenv_path], parameter[name[runas]]]
variable[cmd_split] assign[=] call[name[cmdline].split, parameter[]]
variable[quoted_line] assign[=] constant[]
for taget[name[cmd]] in starred[name[cmd_split]] begin[:]
variable[quoted_line] assign[=] binary_operation[binary_operation[name[quoted_line] + constant[ ]] + call[name[_cmd_quote], parameter[name[cmd]]]]
variable[result] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[call[constant[env PATH={0}/shims:$PATH {1}].format, parameter[call[name[_cmd_quote], parameter[name[path]]], name[quoted_line]]]]]
if compare[call[name[result]][constant[retcode]] equal[==] constant[0]] begin[:]
call[name[rehash], parameter[]]
return[call[name[result]][constant[stdout]]] | keyword[def] identifier[do] ( identifier[cmdline] = keyword[None] , identifier[runas] = keyword[None] ):
literal[string]
identifier[path] = identifier[_pyenv_path] ( identifier[runas] )
identifier[cmd_split] = identifier[cmdline] . identifier[split] ()
identifier[quoted_line] = literal[string]
keyword[for] identifier[cmd] keyword[in] identifier[cmd_split] :
identifier[quoted_line] = identifier[quoted_line] + literal[string] + identifier[_cmd_quote] ( identifier[cmd] )
identifier[result] = identifier[__salt__] [ literal[string] ](
literal[string] . identifier[format] ( identifier[_cmd_quote] ( identifier[path] ), identifier[quoted_line] ),
identifier[runas] = identifier[runas] ,
identifier[python_shell] = keyword[True]
)
keyword[if] identifier[result] [ literal[string] ]== literal[int] :
identifier[rehash] ( identifier[runas] = identifier[runas] )
keyword[return] identifier[result] [ literal[string] ]
keyword[else] :
keyword[return] keyword[False] | def do(cmdline=None, runas=None):
"""
Execute a python command with pyenv's shims from the user or the system.
CLI Example:
.. code-block:: bash
salt '*' pyenv.do 'gem list bundler'
salt '*' pyenv.do 'gem list bundler' deploy
"""
path = _pyenv_path(runas)
cmd_split = cmdline.split()
quoted_line = ''
for cmd in cmd_split:
quoted_line = quoted_line + ' ' + _cmd_quote(cmd) # depends on [control=['for'], data=['cmd']]
result = __salt__['cmd.run_all']('env PATH={0}/shims:$PATH {1}'.format(_cmd_quote(path), quoted_line), runas=runas, python_shell=True)
if result['retcode'] == 0:
rehash(runas=runas)
return result['stdout'] # depends on [control=['if'], data=[]]
else:
return False |
def readall(self):
# type: () -> bytes
'''
A method to read and return the remaining bytes in the file.
Parameters:
None.
Returns:
The rest of the data left in the file. If the file is at or past EOF,
returns an empty bytestring.
'''
if not self._open:
raise pycdlibexception.PyCdlibInvalidInput('I/O operation on closed file.')
readsize = self._length - self._offset
if readsize > 0:
data = self._fp.read(readsize)
self._offset += readsize
else:
data = b''
return data | def function[readall, parameter[self]]:
constant[
A method to read and return the remaining bytes in the file.
Parameters:
None.
Returns:
The rest of the data left in the file. If the file is at or past EOF,
returns an empty bytestring.
]
if <ast.UnaryOp object at 0x7da1b0d0f010> begin[:]
<ast.Raise object at 0x7da1b0d0ead0>
variable[readsize] assign[=] binary_operation[name[self]._length - name[self]._offset]
if compare[name[readsize] greater[>] constant[0]] begin[:]
variable[data] assign[=] call[name[self]._fp.read, parameter[name[readsize]]]
<ast.AugAssign object at 0x7da1b0d0cd90>
return[name[data]] | keyword[def] identifier[readall] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_open] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInvalidInput] ( literal[string] )
identifier[readsize] = identifier[self] . identifier[_length] - identifier[self] . identifier[_offset]
keyword[if] identifier[readsize] > literal[int] :
identifier[data] = identifier[self] . identifier[_fp] . identifier[read] ( identifier[readsize] )
identifier[self] . identifier[_offset] += identifier[readsize]
keyword[else] :
identifier[data] = literal[string]
keyword[return] identifier[data] | def readall(self):
# type: () -> bytes
'\n A method to read and return the remaining bytes in the file.\n\n Parameters:\n None.\n Returns:\n The rest of the data left in the file. If the file is at or past EOF,\n returns an empty bytestring.\n '
if not self._open:
raise pycdlibexception.PyCdlibInvalidInput('I/O operation on closed file.') # depends on [control=['if'], data=[]]
readsize = self._length - self._offset
if readsize > 0:
data = self._fp.read(readsize)
self._offset += readsize # depends on [control=['if'], data=['readsize']]
else:
data = b''
return data |
def show_qparams(qtype, stream=sys.stdout):
"""Print to the given stream the template of the :class:`QueueAdapter` of type `qtype`."""
for cls in all_subclasses(QueueAdapter):
if cls.QTYPE == qtype: return stream.write(cls.QTEMPLATE)
raise ValueError("Cannot find class associated to qtype %s" % qtype) | def function[show_qparams, parameter[qtype, stream]]:
constant[Print to the given stream the template of the :class:`QueueAdapter` of type `qtype`.]
for taget[name[cls]] in starred[call[name[all_subclasses], parameter[name[QueueAdapter]]]] begin[:]
if compare[name[cls].QTYPE equal[==] name[qtype]] begin[:]
return[call[name[stream].write, parameter[name[cls].QTEMPLATE]]]
<ast.Raise object at 0x7da18eb56e30> | keyword[def] identifier[show_qparams] ( identifier[qtype] , identifier[stream] = identifier[sys] . identifier[stdout] ):
literal[string]
keyword[for] identifier[cls] keyword[in] identifier[all_subclasses] ( identifier[QueueAdapter] ):
keyword[if] identifier[cls] . identifier[QTYPE] == identifier[qtype] : keyword[return] identifier[stream] . identifier[write] ( identifier[cls] . identifier[QTEMPLATE] )
keyword[raise] identifier[ValueError] ( literal[string] % identifier[qtype] ) | def show_qparams(qtype, stream=sys.stdout):
"""Print to the given stream the template of the :class:`QueueAdapter` of type `qtype`."""
for cls in all_subclasses(QueueAdapter):
if cls.QTYPE == qtype:
return stream.write(cls.QTEMPLATE) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cls']]
raise ValueError('Cannot find class associated to qtype %s' % qtype) |
def event_handler_event_handler_list_trigger_trigger_choice_vcs_vcs(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
event_handler = ET.SubElement(config, "event-handler", xmlns="urn:brocade.com:mgmt:brocade-event-handler")
event_handler_list = ET.SubElement(event_handler, "event-handler-list")
name_key = ET.SubElement(event_handler_list, "name")
name_key.text = kwargs.pop('name')
trigger = ET.SubElement(event_handler_list, "trigger")
trigger_id_key = ET.SubElement(trigger, "trigger-id")
trigger_id_key.text = kwargs.pop('trigger_id')
trigger_choice = ET.SubElement(trigger, "trigger-choice")
vcs = ET.SubElement(trigger_choice, "vcs")
vcs = ET.SubElement(vcs, "vcs")
vcs.text = kwargs.pop('vcs')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[event_handler_event_handler_list_trigger_trigger_choice_vcs_vcs, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[event_handler] assign[=] call[name[ET].SubElement, parameter[name[config], constant[event-handler]]]
variable[event_handler_list] assign[=] call[name[ET].SubElement, parameter[name[event_handler], constant[event-handler-list]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[event_handler_list], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[trigger] assign[=] call[name[ET].SubElement, parameter[name[event_handler_list], constant[trigger]]]
variable[trigger_id_key] assign[=] call[name[ET].SubElement, parameter[name[trigger], constant[trigger-id]]]
name[trigger_id_key].text assign[=] call[name[kwargs].pop, parameter[constant[trigger_id]]]
variable[trigger_choice] assign[=] call[name[ET].SubElement, parameter[name[trigger], constant[trigger-choice]]]
variable[vcs] assign[=] call[name[ET].SubElement, parameter[name[trigger_choice], constant[vcs]]]
variable[vcs] assign[=] call[name[ET].SubElement, parameter[name[vcs], constant[vcs]]]
name[vcs].text assign[=] call[name[kwargs].pop, parameter[constant[vcs]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[event_handler_event_handler_list_trigger_trigger_choice_vcs_vcs] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[event_handler] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[event_handler_list] = identifier[ET] . identifier[SubElement] ( identifier[event_handler] , literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[event_handler_list] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[trigger] = identifier[ET] . identifier[SubElement] ( identifier[event_handler_list] , literal[string] )
identifier[trigger_id_key] = identifier[ET] . identifier[SubElement] ( identifier[trigger] , literal[string] )
identifier[trigger_id_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[trigger_choice] = identifier[ET] . identifier[SubElement] ( identifier[trigger] , literal[string] )
identifier[vcs] = identifier[ET] . identifier[SubElement] ( identifier[trigger_choice] , literal[string] )
identifier[vcs] = identifier[ET] . identifier[SubElement] ( identifier[vcs] , literal[string] )
identifier[vcs] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def event_handler_event_handler_list_trigger_trigger_choice_vcs_vcs(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
event_handler = ET.SubElement(config, 'event-handler', xmlns='urn:brocade.com:mgmt:brocade-event-handler')
event_handler_list = ET.SubElement(event_handler, 'event-handler-list')
name_key = ET.SubElement(event_handler_list, 'name')
name_key.text = kwargs.pop('name')
trigger = ET.SubElement(event_handler_list, 'trigger')
trigger_id_key = ET.SubElement(trigger, 'trigger-id')
trigger_id_key.text = kwargs.pop('trigger_id')
trigger_choice = ET.SubElement(trigger, 'trigger-choice')
vcs = ET.SubElement(trigger_choice, 'vcs')
vcs = ET.SubElement(vcs, 'vcs')
vcs.text = kwargs.pop('vcs')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def enter_api_key(parser=None):
"""Prompt for API key and secret
Then write them to CONFIG_PATH(Default: ~/.grabflickr.conf)
:param parser: Config parser
:type parser: SafeConfigParser
"""
if parser is None:
parser = SafeConfigParser()
parser.add_section('flickr')
global api_key, api_secret
api_key = raw_input('Enter your API key: ')
api_secret = raw_input('Enter your API secret: ')
parser.set('flickr', 'API_KEY', api_key)
parser.set('flickr', 'API_SECRET', api_secret)
with open(CONFIG_PATH, 'wb') as f:
parser.write(f) | def function[enter_api_key, parameter[parser]]:
constant[Prompt for API key and secret
Then write them to CONFIG_PATH(Default: ~/.grabflickr.conf)
:param parser: Config parser
:type parser: SafeConfigParser
]
if compare[name[parser] is constant[None]] begin[:]
variable[parser] assign[=] call[name[SafeConfigParser], parameter[]]
call[name[parser].add_section, parameter[constant[flickr]]]
<ast.Global object at 0x7da2044c06a0>
variable[api_key] assign[=] call[name[raw_input], parameter[constant[Enter your API key: ]]]
variable[api_secret] assign[=] call[name[raw_input], parameter[constant[Enter your API secret: ]]]
call[name[parser].set, parameter[constant[flickr], constant[API_KEY], name[api_key]]]
call[name[parser].set, parameter[constant[flickr], constant[API_SECRET], name[api_secret]]]
with call[name[open], parameter[name[CONFIG_PATH], constant[wb]]] begin[:]
call[name[parser].write, parameter[name[f]]] | keyword[def] identifier[enter_api_key] ( identifier[parser] = keyword[None] ):
literal[string]
keyword[if] identifier[parser] keyword[is] keyword[None] :
identifier[parser] = identifier[SafeConfigParser] ()
identifier[parser] . identifier[add_section] ( literal[string] )
keyword[global] identifier[api_key] , identifier[api_secret]
identifier[api_key] = identifier[raw_input] ( literal[string] )
identifier[api_secret] = identifier[raw_input] ( literal[string] )
identifier[parser] . identifier[set] ( literal[string] , literal[string] , identifier[api_key] )
identifier[parser] . identifier[set] ( literal[string] , literal[string] , identifier[api_secret] )
keyword[with] identifier[open] ( identifier[CONFIG_PATH] , literal[string] ) keyword[as] identifier[f] :
identifier[parser] . identifier[write] ( identifier[f] ) | def enter_api_key(parser=None):
"""Prompt for API key and secret
Then write them to CONFIG_PATH(Default: ~/.grabflickr.conf)
:param parser: Config parser
:type parser: SafeConfigParser
"""
if parser is None:
parser = SafeConfigParser() # depends on [control=['if'], data=['parser']]
parser.add_section('flickr')
global api_key, api_secret
api_key = raw_input('Enter your API key: ')
api_secret = raw_input('Enter your API secret: ')
parser.set('flickr', 'API_KEY', api_key)
parser.set('flickr', 'API_SECRET', api_secret)
with open(CONFIG_PATH, 'wb') as f:
parser.write(f) # depends on [control=['with'], data=['f']] |
def get_named_range(self, name):
""" Retrieves a Named range by it's name """
url = self.build_url(self._endpoints.get('get_named_range').format(name=name))
response = self.session.get(url)
if not response:
return None
return self.named_range_constructor(parent=self, **{self._cloud_data_key: response.json()}) | def function[get_named_range, parameter[self, name]]:
constant[ Retrieves a Named range by it's name ]
variable[url] assign[=] call[name[self].build_url, parameter[call[call[name[self]._endpoints.get, parameter[constant[get_named_range]]].format, parameter[]]]]
variable[response] assign[=] call[name[self].session.get, parameter[name[url]]]
if <ast.UnaryOp object at 0x7da1b1b299f0> begin[:]
return[constant[None]]
return[call[name[self].named_range_constructor, parameter[]]] | keyword[def] identifier[get_named_range] ( identifier[self] , identifier[name] ):
literal[string]
identifier[url] = identifier[self] . identifier[build_url] ( identifier[self] . identifier[_endpoints] . identifier[get] ( literal[string] ). identifier[format] ( identifier[name] = identifier[name] ))
identifier[response] = identifier[self] . identifier[session] . identifier[get] ( identifier[url] )
keyword[if] keyword[not] identifier[response] :
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[named_range_constructor] ( identifier[parent] = identifier[self] ,**{ identifier[self] . identifier[_cloud_data_key] : identifier[response] . identifier[json] ()}) | def get_named_range(self, name):
""" Retrieves a Named range by it's name """
url = self.build_url(self._endpoints.get('get_named_range').format(name=name))
response = self.session.get(url)
if not response:
return None # depends on [control=['if'], data=[]]
return self.named_range_constructor(parent=self, **{self._cloud_data_key: response.json()}) |
def set_log_level(self, log_level):
'''Configures class log level
Arguments:
log_level (:obj:`str`): log level ('NOTSET','DEBUG','INFO' 'WARNING',
'ERROR', 'CRITICAL')
'''
if log_level == 'DEBUG':
self.log.setLevel(logging.DEBUG)
self.log.debug("Changing log level to "+log_level)
elif log_level == 'INFO':
self.log.setLevel(logging.INFO)
self.log.info("Changing log level to "+log_level)
elif log_level == 'WARNING':
self.log.setLevel(logging.WARNING)
self.log.warning("Changing log level to "+log_level)
elif log_level == 'ERROR':
self.log.setLevel(logging.ERROR)
self.log.error("Changing log level to "+log_level)
elif log_level == 'CRITICAL':
self.log.setLevel(logging.CRITICAL)
self.log.critical("Changing log level to "+log_level)
elif log_level == 'NOTSET':
self.log.setLevel(logging.NOTSET)
else:
raise NotImplementedError('Not implemented log level '+str(log_level)) | def function[set_log_level, parameter[self, log_level]]:
constant[Configures class log level
Arguments:
log_level (:obj:`str`): log level ('NOTSET','DEBUG','INFO' 'WARNING',
'ERROR', 'CRITICAL')
]
if compare[name[log_level] equal[==] constant[DEBUG]] begin[:]
call[name[self].log.setLevel, parameter[name[logging].DEBUG]]
call[name[self].log.debug, parameter[binary_operation[constant[Changing log level to ] + name[log_level]]]] | keyword[def] identifier[set_log_level] ( identifier[self] , identifier[log_level] ):
literal[string]
keyword[if] identifier[log_level] == literal[string] :
identifier[self] . identifier[log] . identifier[setLevel] ( identifier[logging] . identifier[DEBUG] )
identifier[self] . identifier[log] . identifier[debug] ( literal[string] + identifier[log_level] )
keyword[elif] identifier[log_level] == literal[string] :
identifier[self] . identifier[log] . identifier[setLevel] ( identifier[logging] . identifier[INFO] )
identifier[self] . identifier[log] . identifier[info] ( literal[string] + identifier[log_level] )
keyword[elif] identifier[log_level] == literal[string] :
identifier[self] . identifier[log] . identifier[setLevel] ( identifier[logging] . identifier[WARNING] )
identifier[self] . identifier[log] . identifier[warning] ( literal[string] + identifier[log_level] )
keyword[elif] identifier[log_level] == literal[string] :
identifier[self] . identifier[log] . identifier[setLevel] ( identifier[logging] . identifier[ERROR] )
identifier[self] . identifier[log] . identifier[error] ( literal[string] + identifier[log_level] )
keyword[elif] identifier[log_level] == literal[string] :
identifier[self] . identifier[log] . identifier[setLevel] ( identifier[logging] . identifier[CRITICAL] )
identifier[self] . identifier[log] . identifier[critical] ( literal[string] + identifier[log_level] )
keyword[elif] identifier[log_level] == literal[string] :
identifier[self] . identifier[log] . identifier[setLevel] ( identifier[logging] . identifier[NOTSET] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] + identifier[str] ( identifier[log_level] )) | def set_log_level(self, log_level):
"""Configures class log level
Arguments:
log_level (:obj:`str`): log level ('NOTSET','DEBUG','INFO' 'WARNING',
'ERROR', 'CRITICAL')
"""
if log_level == 'DEBUG':
self.log.setLevel(logging.DEBUG)
self.log.debug('Changing log level to ' + log_level) # depends on [control=['if'], data=['log_level']]
elif log_level == 'INFO':
self.log.setLevel(logging.INFO)
self.log.info('Changing log level to ' + log_level) # depends on [control=['if'], data=['log_level']]
elif log_level == 'WARNING':
self.log.setLevel(logging.WARNING)
self.log.warning('Changing log level to ' + log_level) # depends on [control=['if'], data=['log_level']]
elif log_level == 'ERROR':
self.log.setLevel(logging.ERROR)
self.log.error('Changing log level to ' + log_level) # depends on [control=['if'], data=['log_level']]
elif log_level == 'CRITICAL':
self.log.setLevel(logging.CRITICAL)
self.log.critical('Changing log level to ' + log_level) # depends on [control=['if'], data=['log_level']]
elif log_level == 'NOTSET':
self.log.setLevel(logging.NOTSET) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('Not implemented log level ' + str(log_level)) |
def jsonex_request(url, data, headers=None):
""" Make a request with JsonEx
:param url: URL
:type url: str
:param data: Data to POST
:type data: dict
:return: Response
:rtype: dict
:raises exc.ConnectionError: Connection error
:raises exc.ServerError: Remote server error (unknown)
:raises exc.ProviderError: any errors reported by the remote
"""
# Authentication?
url, headers = _parse_authentication(url)
headers['Content-Type'] = 'application/json'
# Request
try:
req = Request(url, headers=headers)
response = urlopen(req, jsonex_dumps(data))
res_str = response.read()
res = jsonex_loads(res_str)
except HTTPError as e:
if 'Content-Type' in e.headers and e.headers['Content-Type'] == 'application/json':
res = jsonex_loads(e.read())
else:
raise exc.ServerError('Server at "{}" failed: {}'.format(url, e))
except URLError as e:
raise exc.ConnectionError('Connection to "{}" failed: {}'.format(url, e))
# Errors?
if 'error' in res: # Exception object
raise res['error'] # Error raised by the remote side
return res | def function[jsonex_request, parameter[url, data, headers]]:
constant[ Make a request with JsonEx
:param url: URL
:type url: str
:param data: Data to POST
:type data: dict
:return: Response
:rtype: dict
:raises exc.ConnectionError: Connection error
:raises exc.ServerError: Remote server error (unknown)
:raises exc.ProviderError: any errors reported by the remote
]
<ast.Tuple object at 0x7da18bc71ae0> assign[=] call[name[_parse_authentication], parameter[name[url]]]
call[name[headers]][constant[Content-Type]] assign[=] constant[application/json]
<ast.Try object at 0x7da18bc702b0>
if compare[constant[error] in name[res]] begin[:]
<ast.Raise object at 0x7da18bc71cc0>
return[name[res]] | keyword[def] identifier[jsonex_request] ( identifier[url] , identifier[data] , identifier[headers] = keyword[None] ):
literal[string]
identifier[url] , identifier[headers] = identifier[_parse_authentication] ( identifier[url] )
identifier[headers] [ literal[string] ]= literal[string]
keyword[try] :
identifier[req] = identifier[Request] ( identifier[url] , identifier[headers] = identifier[headers] )
identifier[response] = identifier[urlopen] ( identifier[req] , identifier[jsonex_dumps] ( identifier[data] ))
identifier[res_str] = identifier[response] . identifier[read] ()
identifier[res] = identifier[jsonex_loads] ( identifier[res_str] )
keyword[except] identifier[HTTPError] keyword[as] identifier[e] :
keyword[if] literal[string] keyword[in] identifier[e] . identifier[headers] keyword[and] identifier[e] . identifier[headers] [ literal[string] ]== literal[string] :
identifier[res] = identifier[jsonex_loads] ( identifier[e] . identifier[read] ())
keyword[else] :
keyword[raise] identifier[exc] . identifier[ServerError] ( literal[string] . identifier[format] ( identifier[url] , identifier[e] ))
keyword[except] identifier[URLError] keyword[as] identifier[e] :
keyword[raise] identifier[exc] . identifier[ConnectionError] ( literal[string] . identifier[format] ( identifier[url] , identifier[e] ))
keyword[if] literal[string] keyword[in] identifier[res] :
keyword[raise] identifier[res] [ literal[string] ]
keyword[return] identifier[res] | def jsonex_request(url, data, headers=None):
""" Make a request with JsonEx
:param url: URL
:type url: str
:param data: Data to POST
:type data: dict
:return: Response
:rtype: dict
:raises exc.ConnectionError: Connection error
:raises exc.ServerError: Remote server error (unknown)
:raises exc.ProviderError: any errors reported by the remote
"""
# Authentication?
(url, headers) = _parse_authentication(url)
headers['Content-Type'] = 'application/json'
# Request
try:
req = Request(url, headers=headers)
response = urlopen(req, jsonex_dumps(data))
res_str = response.read()
res = jsonex_loads(res_str) # depends on [control=['try'], data=[]]
except HTTPError as e:
if 'Content-Type' in e.headers and e.headers['Content-Type'] == 'application/json':
res = jsonex_loads(e.read()) # depends on [control=['if'], data=[]]
else:
raise exc.ServerError('Server at "{}" failed: {}'.format(url, e)) # depends on [control=['except'], data=['e']]
except URLError as e:
raise exc.ConnectionError('Connection to "{}" failed: {}'.format(url, e)) # depends on [control=['except'], data=['e']]
# Errors?
if 'error' in res: # Exception object
raise res['error'] # Error raised by the remote side # depends on [control=['if'], data=['res']]
return res |
def send_stdout(cls, sock, payload):
"""Send the Stdout chunk over the specified socket."""
cls.write_chunk(sock, ChunkType.STDOUT, payload) | def function[send_stdout, parameter[cls, sock, payload]]:
constant[Send the Stdout chunk over the specified socket.]
call[name[cls].write_chunk, parameter[name[sock], name[ChunkType].STDOUT, name[payload]]] | keyword[def] identifier[send_stdout] ( identifier[cls] , identifier[sock] , identifier[payload] ):
literal[string]
identifier[cls] . identifier[write_chunk] ( identifier[sock] , identifier[ChunkType] . identifier[STDOUT] , identifier[payload] ) | def send_stdout(cls, sock, payload):
"""Send the Stdout chunk over the specified socket."""
cls.write_chunk(sock, ChunkType.STDOUT, payload) |
def resume(profile_process='worker'):
"""
Resume paused profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXProcessProfilePause(int(0),
profile_process2int[profile_process],
profiler_kvstore_handle)) | def function[resume, parameter[profile_process]]:
constant[
Resume paused profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
]
variable[profile_process2int] assign[=] dictionary[[<ast.Constant object at 0x7da1b208ae00>, <ast.Constant object at 0x7da1b2088160>], [<ast.Constant object at 0x7da1b2089b70>, <ast.Constant object at 0x7da1b208b970>]]
call[name[check_call], parameter[call[name[_LIB].MXProcessProfilePause, parameter[call[name[int], parameter[constant[0]]], call[name[profile_process2int]][name[profile_process]], name[profiler_kvstore_handle]]]]] | keyword[def] identifier[resume] ( identifier[profile_process] = literal[string] ):
literal[string]
identifier[profile_process2int] ={ literal[string] : literal[int] , literal[string] : literal[int] }
identifier[check_call] ( identifier[_LIB] . identifier[MXProcessProfilePause] ( identifier[int] ( literal[int] ),
identifier[profile_process2int] [ identifier[profile_process] ],
identifier[profiler_kvstore_handle] )) | def resume(profile_process='worker'):
"""
Resume paused profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXProcessProfilePause(int(0), profile_process2int[profile_process], profiler_kvstore_handle)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.